Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mcg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses. You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  * Redistribution and use in source and binary forms, with or
12  * without modification, are permitted provided that the following
13  * conditions are met:
14  *
15  * - Redistributions of source code must retain the above
16  * copyright notice, this list of conditions and the following
17  * disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above
20  * copyright notice, this list of conditions and the following
21  * disclaimer in the documentation and/or other materials
22  * provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/string.h>
35 #include <linux/etherdevice.h>
36 
37 #include <linux/mlx4/cmd.h>
38 #include <linux/export.h>
39 
40 #include "mlx4.h"
41 
42 #define MGM_QPN_MASK 0x00FFFFFF
43 #define MGM_BLCK_LB_BIT 30
44 
45 static const u8 zero_gid[16]; /* automatically initialized to 0 */
46 
47 struct mlx4_mgm {
51  u8 gid[16];
53 };
54 
56 {
57  if (dev->caps.steering_mode ==
59  return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE;
60  else
61  return min((1 << mlx4_log_num_mgm_entry_size),
63 }
64 
66 {
67  return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
68 }
69 
70 static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev,
71  struct mlx4_cmd_mailbox *mailbox,
72  u32 size,
73  u64 *reg_id)
74 {
75  u64 imm;
76  int err = 0;
77 
78  err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0,
81  if (err)
82  return err;
83  *reg_id = imm;
84 
85  return err;
86 }
87 
88 static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid)
89 {
90  int err = 0;
91 
92  err = mlx4_cmd(dev, regid, 0, 0,
95 
96  return err;
97 }
98 
99 static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
100  struct mlx4_cmd_mailbox *mailbox)
101 {
102  return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
104 }
105 
106 static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
107  struct mlx4_cmd_mailbox *mailbox)
108 {
109  return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
111 }
112 
113 static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
114  struct mlx4_cmd_mailbox *mailbox)
115 {
116  u32 in_mod;
117 
118  in_mod = (u32) port << 16 | steer << 1;
119  return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
122 }
123 
124 static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
125  u16 *hash, u8 op_mod)
126 {
127  u64 imm;
128  int err;
129 
130  err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
133 
134  if (!err)
135  *hash = imm;
136 
137  return err;
138 }
139 
140 static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port,
141  enum mlx4_steer_type steer,
142  u32 qpn)
143 {
144  struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[port - 1];
145  struct mlx4_promisc_qp *pqp;
146 
147  list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
148  if (pqp->qpn == qpn)
149  return pqp;
150  }
151  /* not found */
152  return NULL;
153 }
154 
155 /*
156  * Add new entry to steering data structure.
157  * All promisc QPs should be added as well
158  */
159 static int new_steering_entry(struct mlx4_dev *dev, u8 port,
160  enum mlx4_steer_type steer,
161  unsigned int index, u32 qpn)
162 {
163  struct mlx4_steer *s_steer;
164  struct mlx4_cmd_mailbox *mailbox;
165  struct mlx4_mgm *mgm;
167  struct mlx4_steer_index *new_entry;
168  struct mlx4_promisc_qp *pqp;
169  struct mlx4_promisc_qp *dqp = NULL;
170  u32 prot;
171  int err;
172 
173  s_steer = &mlx4_priv(dev)->steer[port - 1];
174  new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
175  if (!new_entry)
176  return -ENOMEM;
177 
178  INIT_LIST_HEAD(&new_entry->duplicates);
179  new_entry->index = index;
180  list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
181 
182  /* If the given qpn is also a promisc qp,
183  * it should be inserted to duplicates list
184  */
185  pqp = get_promisc_qp(dev, port, steer, qpn);
186  if (pqp) {
187  dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
188  if (!dqp) {
189  err = -ENOMEM;
190  goto out_alloc;
191  }
192  dqp->qpn = qpn;
193  list_add_tail(&dqp->list, &new_entry->duplicates);
194  }
195 
196  /* if no promisc qps for this vep, we are done */
197  if (list_empty(&s_steer->promisc_qps[steer]))
198  return 0;
199 
200  /* now need to add all the promisc qps to the new
201  * steering entry, as they should also receive the packets
202  * destined to this address */
203  mailbox = mlx4_alloc_cmd_mailbox(dev);
204  if (IS_ERR(mailbox)) {
205  err = -ENOMEM;
206  goto out_alloc;
207  }
208  mgm = mailbox->buf;
209 
210  err = mlx4_READ_ENTRY(dev, index, mailbox);
211  if (err)
212  goto out_mailbox;
213 
214  members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
215  prot = be32_to_cpu(mgm->members_count) >> 30;
216  list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
217  /* don't add already existing qpn */
218  if (pqp->qpn == qpn)
219  continue;
220  if (members_count == dev->caps.num_qp_per_mgm) {
221  /* out of space */
222  err = -ENOMEM;
223  goto out_mailbox;
224  }
225 
226  /* add the qpn */
227  mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
228  }
229  /* update the qps count and update the entry with all the promisc qps*/
230  mgm->members_count = cpu_to_be32(members_count | (prot << 30));
231  err = mlx4_WRITE_ENTRY(dev, index, mailbox);
232 
233 out_mailbox:
234  mlx4_free_cmd_mailbox(dev, mailbox);
235  if (!err)
236  return 0;
237 out_alloc:
238  if (dqp) {
239  list_del(&dqp->list);
240  kfree(dqp);
241  }
242  list_del(&new_entry->list);
243  kfree(new_entry);
244  return err;
245 }
246 
247 /* update the data structures with existing steering entry */
248 static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
249  enum mlx4_steer_type steer,
250  unsigned int index, u32 qpn)
251 {
252  struct mlx4_steer *s_steer;
253  struct mlx4_steer_index *tmp_entry, *entry = NULL;
254  struct mlx4_promisc_qp *pqp;
255  struct mlx4_promisc_qp *dqp;
256 
257  s_steer = &mlx4_priv(dev)->steer[port - 1];
258 
259  pqp = get_promisc_qp(dev, port, steer, qpn);
260  if (!pqp)
261  return 0; /* nothing to do */
262 
263  list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
264  if (tmp_entry->index == index) {
265  entry = tmp_entry;
266  break;
267  }
268  }
269  if (unlikely(!entry)) {
270  mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
271  return -EINVAL;
272  }
273 
274  /* the given qpn is listed as a promisc qpn
275  * we need to add it as a duplicate to this entry
276  * for future references */
277  list_for_each_entry(dqp, &entry->duplicates, list) {
278  if (qpn == pqp->qpn)
279  return 0; /* qp is already duplicated */
280  }
281 
282  /* add the qp as a duplicate on this index */
283  dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
284  if (!dqp)
285  return -ENOMEM;
286  dqp->qpn = qpn;
287  list_add_tail(&dqp->list, &entry->duplicates);
288 
289  return 0;
290 }
291 
292 /* Check whether a qpn is a duplicate on steering entry
293  * If so, it should not be removed from mgm */
294 static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
295  enum mlx4_steer_type steer,
296  unsigned int index, u32 qpn)
297 {
298  struct mlx4_steer *s_steer;
299  struct mlx4_steer_index *tmp_entry, *entry = NULL;
300  struct mlx4_promisc_qp *dqp, *tmp_dqp;
301 
302  s_steer = &mlx4_priv(dev)->steer[port - 1];
303 
304  /* if qp is not promisc, it cannot be duplicated */
305  if (!get_promisc_qp(dev, port, steer, qpn))
306  return false;
307 
308  /* The qp is promisc qp so it is a duplicate on this index
309  * Find the index entry, and remove the duplicate */
310  list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
311  if (tmp_entry->index == index) {
312  entry = tmp_entry;
313  break;
314  }
315  }
316  if (unlikely(!entry)) {
317  mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
318  return false;
319  }
320  list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
321  if (dqp->qpn == qpn) {
322  list_del(&dqp->list);
323  kfree(dqp);
324  }
325  }
326  return true;
327 }
328 
329 /* I a steering entry contains only promisc QPs, it can be removed. */
330 static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
331  enum mlx4_steer_type steer,
332  unsigned int index, u32 tqpn)
333 {
334  struct mlx4_steer *s_steer;
335  struct mlx4_cmd_mailbox *mailbox;
336  struct mlx4_mgm *mgm;
337  struct mlx4_steer_index *entry = NULL, *tmp_entry;
338  u32 qpn;
339  u32 members_count;
340  bool ret = false;
341  int i;
342 
343  s_steer = &mlx4_priv(dev)->steer[port - 1];
344 
345  mailbox = mlx4_alloc_cmd_mailbox(dev);
346  if (IS_ERR(mailbox))
347  return false;
348  mgm = mailbox->buf;
349 
350  if (mlx4_READ_ENTRY(dev, index, mailbox))
351  goto out;
352  members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
353  for (i = 0; i < members_count; i++) {
354  qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
355  if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) {
356  /* the qp is not promisc, the entry can't be removed */
357  goto out;
358  }
359  }
360  /* All the qps currently registered for this entry are promiscuous,
361  * Checking for duplicates */
362  ret = true;
363  list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
364  if (entry->index == index) {
365  if (list_empty(&entry->duplicates)) {
366  list_del(&entry->list);
367  kfree(entry);
368  } else {
369  /* This entry contains duplicates so it shouldn't be removed */
370  ret = false;
371  goto out;
372  }
373  }
374  }
375 
376 out:
377  mlx4_free_cmd_mailbox(dev, mailbox);
378  return ret;
379 }
380 
381 static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
382  enum mlx4_steer_type steer, u32 qpn)
383 {
384  struct mlx4_steer *s_steer;
385  struct mlx4_cmd_mailbox *mailbox;
386  struct mlx4_mgm *mgm;
387  struct mlx4_steer_index *entry;
388  struct mlx4_promisc_qp *pqp;
389  struct mlx4_promisc_qp *dqp;
390  u32 members_count;
391  u32 prot;
392  int i;
393  bool found;
394  int err;
395  struct mlx4_priv *priv = mlx4_priv(dev);
396 
397  s_steer = &mlx4_priv(dev)->steer[port - 1];
398 
399  mutex_lock(&priv->mcg_table.mutex);
400 
401  if (get_promisc_qp(dev, port, steer, qpn)) {
402  err = 0; /* Noting to do, already exists */
403  goto out_mutex;
404  }
405 
406  pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
407  if (!pqp) {
408  err = -ENOMEM;
409  goto out_mutex;
410  }
411  pqp->qpn = qpn;
412 
413  mailbox = mlx4_alloc_cmd_mailbox(dev);
414  if (IS_ERR(mailbox)) {
415  err = -ENOMEM;
416  goto out_alloc;
417  }
418  mgm = mailbox->buf;
419 
420  /* the promisc qp needs to be added for each one of the steering
421  * entries, if it already exists, needs to be added as a duplicate
422  * for this entry */
423  list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
424  err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
425  if (err)
426  goto out_mailbox;
427 
428  members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
429  prot = be32_to_cpu(mgm->members_count) >> 30;
430  found = false;
431  for (i = 0; i < members_count; i++) {
432  if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
433  /* Entry already exists, add to duplicates */
434  dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
435  if (!dqp) {
436  err = -ENOMEM;
437  goto out_mailbox;
438  }
439  dqp->qpn = qpn;
440  list_add_tail(&dqp->list, &entry->duplicates);
441  found = true;
442  }
443  }
444  if (!found) {
445  /* Need to add the qpn to mgm */
446  if (members_count == dev->caps.num_qp_per_mgm) {
447  /* entry is full */
448  err = -ENOMEM;
449  goto out_mailbox;
450  }
451  mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
452  mgm->members_count = cpu_to_be32(members_count | (prot << 30));
453  err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
454  if (err)
455  goto out_mailbox;
456  }
457  }
458 
459  /* add the new qpn to list of promisc qps */
460  list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
461  /* now need to add all the promisc qps to default entry */
462  memset(mgm, 0, sizeof *mgm);
463  members_count = 0;
464  list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
465  mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
466  mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
467 
468  err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
469  if (err)
470  goto out_list;
471 
472  mlx4_free_cmd_mailbox(dev, mailbox);
473  mutex_unlock(&priv->mcg_table.mutex);
474  return 0;
475 
476 out_list:
477  list_del(&pqp->list);
478 out_mailbox:
479  mlx4_free_cmd_mailbox(dev, mailbox);
480 out_alloc:
481  kfree(pqp);
482 out_mutex:
483  mutex_unlock(&priv->mcg_table.mutex);
484  return err;
485 }
486 
487 static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
488  enum mlx4_steer_type steer, u32 qpn)
489 {
490  struct mlx4_priv *priv = mlx4_priv(dev);
491  struct mlx4_steer *s_steer;
492  struct mlx4_cmd_mailbox *mailbox;
493  struct mlx4_mgm *mgm;
494  struct mlx4_steer_index *entry;
495  struct mlx4_promisc_qp *pqp;
496  struct mlx4_promisc_qp *dqp;
497  u32 members_count;
498  bool found;
499  bool back_to_list = false;
500  int loc, i;
501  int err;
502 
503  s_steer = &mlx4_priv(dev)->steer[port - 1];
504  mutex_lock(&priv->mcg_table.mutex);
505 
506  pqp = get_promisc_qp(dev, port, steer, qpn);
507  if (unlikely(!pqp)) {
508  mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
509  /* nothing to do */
510  err = 0;
511  goto out_mutex;
512  }
513 
514  /*remove from list of promisc qps */
515  list_del(&pqp->list);
516 
517  /* set the default entry not to include the removed one */
518  mailbox = mlx4_alloc_cmd_mailbox(dev);
519  if (IS_ERR(mailbox)) {
520  err = -ENOMEM;
521  back_to_list = true;
522  goto out_list;
523  }
524  mgm = mailbox->buf;
525  memset(mgm, 0, sizeof *mgm);
526  members_count = 0;
527  list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
528  mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
529  mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
530 
531  err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
532  if (err)
533  goto out_mailbox;
534 
535  /* remove the qp from all the steering entries*/
536  list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
537  found = false;
538  list_for_each_entry(dqp, &entry->duplicates, list) {
539  if (dqp->qpn == qpn) {
540  found = true;
541  break;
542  }
543  }
544  if (found) {
545  /* a duplicate, no need to change the mgm,
546  * only update the duplicates list */
547  list_del(&dqp->list);
548  kfree(dqp);
549  } else {
550  err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
551  if (err)
552  goto out_mailbox;
553  members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
554  for (loc = -1, i = 0; i < members_count; ++i)
555  if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
556  loc = i;
557 
558  mgm->members_count = cpu_to_be32(--members_count |
559  (MLX4_PROT_ETH << 30));
560  mgm->qp[loc] = mgm->qp[i - 1];
561  mgm->qp[i - 1] = 0;
562 
563  err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
564  if (err)
565  goto out_mailbox;
566  }
567 
568  }
569 
570 out_mailbox:
571  mlx4_free_cmd_mailbox(dev, mailbox);
572 out_list:
573  if (back_to_list)
574  list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
575  else
576  kfree(pqp);
577 out_mutex:
578  mutex_unlock(&priv->mcg_table.mutex);
579  return err;
580 }
581 
582 /*
583  * Caller must hold MCG table semaphore. gid and mgm parameters must
584  * be properly aligned for command interface.
585  *
586  * Returns 0 unless a firmware command error occurs.
587  *
588  * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
589  * and *mgm holds MGM entry.
590  *
591  * if GID is found in AMGM, *index = index in AMGM, *prev = index of
592  * previous entry in hash chain and *mgm holds AMGM entry.
593  *
594  * If no AMGM exists for given gid, *index = -1, *prev = index of last
595  * entry in hash chain and *mgm holds end of hash chain.
596  */
597 static int find_entry(struct mlx4_dev *dev, u8 port,
598  u8 *gid, enum mlx4_protocol prot,
599  struct mlx4_cmd_mailbox *mgm_mailbox,
600  int *prev, int *index)
601 {
602  struct mlx4_cmd_mailbox *mailbox;
603  struct mlx4_mgm *mgm = mgm_mailbox->buf;
604  u8 *mgid;
605  int err;
606  u16 hash;
607  u8 op_mod = (prot == MLX4_PROT_ETH) ?
608  !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
609 
610  mailbox = mlx4_alloc_cmd_mailbox(dev);
611  if (IS_ERR(mailbox))
612  return -ENOMEM;
613  mgid = mailbox->buf;
614 
615  memcpy(mgid, gid, 16);
616 
617  err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod);
618  mlx4_free_cmd_mailbox(dev, mailbox);
619  if (err)
620  return err;
621 
622  if (0)
623  mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash);
624 
625  *index = hash;
626  *prev = -1;
627 
628  do {
629  err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
630  if (err)
631  return err;
632 
633  if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
634  if (*index != hash) {
635  mlx4_err(dev, "Found zero MGID in AMGM.\n");
636  err = -EINVAL;
637  }
638  return err;
639  }
640 
641  if (!memcmp(mgm->gid, gid, 16) &&
642  be32_to_cpu(mgm->members_count) >> 30 == prot)
643  return err;
644 
645  *prev = *index;
646  *index = be32_to_cpu(mgm->next_gid_index) >> 6;
647  } while (*index);
648 
649  *index = -1;
650  return err;
651 }
652 
653 static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
655 {
656  static const u8 __promisc_mode[] = {
657  [MLX4_FS_PROMISC_NONE] = 0x0,
658  [MLX4_FS_PROMISC_UPLINK] = 0x1,
661  };
662 
663  u32 dw = 0;
664 
665  dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
666  dw |= ctrl->exclusive ? (1 << 2) : 0;
667  dw |= ctrl->allow_loopback ? (1 << 3) : 0;
668  dw |= __promisc_mode[ctrl->promisc_mode] << 8;
669  dw |= ctrl->priority << 16;
670 
671  hw->ctrl = cpu_to_be32(dw);
672  hw->vf_vep_port = cpu_to_be32(ctrl->port);
673  hw->qpn = cpu_to_be32(ctrl->qpn);
674 }
675 
676 const u16 __sw_id_hw[] = {
677  [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001,
678  [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005,
679  [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
680  [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
681  [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
682  [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
683 };
684 
685 static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
686  struct _rule_hw *rule_hw)
687 {
688  static const size_t __rule_hw_sz[] = {
690  sizeof(struct mlx4_net_trans_rule_hw_eth),
692  sizeof(struct mlx4_net_trans_rule_hw_ib),
695  sizeof(struct mlx4_net_trans_rule_hw_ipv4),
697  sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
699  sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
700  };
701  if (spec->id >= MLX4_NET_TRANS_RULE_NUM) {
702  mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id);
703  return -EINVAL;
704  }
705  memset(rule_hw, 0, __rule_hw_sz[spec->id]);
706  rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
707  rule_hw->size = __rule_hw_sz[spec->id] >> 2;
708 
709  switch (spec->id) {
711  memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN);
712  memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk,
713  ETH_ALEN);
714  memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN);
715  memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk,
716  ETH_ALEN);
717  if (spec->eth.ether_type_enable) {
718  rule_hw->eth.ether_type_enable = 1;
719  rule_hw->eth.ether_type = spec->eth.ether_type;
720  }
721  rule_hw->eth.vlan_id = spec->eth.vlan_id;
722  rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk;
723  break;
724 
726  rule_hw->ib.qpn = spec->ib.r_qpn;
727  rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
728  memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
729  memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
730  break;
731 
733  return -EOPNOTSUPP;
734 
736  rule_hw->ipv4.src_ip = spec->ipv4.src_ip;
737  rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk;
738  rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip;
739  rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk;
740  break;
741 
744  rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port;
745  rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk;
746  rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port;
747  rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
748  break;
749 
750  default:
751  return -EINVAL;
752  }
753 
754  return __rule_hw_sz[spec->id];
755 }
756 
757 static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
758  struct mlx4_net_trans_rule *rule)
759 {
760 #define BUF_SIZE 256
761  struct mlx4_spec_list *cur;
762  char buf[BUF_SIZE];
763  int len = 0;
764 
765  mlx4_err(dev, "%s", str);
766  len += snprintf(buf + len, BUF_SIZE - len,
767  "port = %d prio = 0x%x qp = 0x%x ",
768  rule->port, rule->priority, rule->qpn);
769 
770  list_for_each_entry(cur, &rule->list, list) {
771  switch (cur->id) {
773  len += snprintf(buf + len, BUF_SIZE - len,
774  "dmac = %pM ", &cur->eth.dst_mac);
775  if (cur->eth.ether_type)
776  len += snprintf(buf + len, BUF_SIZE - len,
777  "ethertype = 0x%x ",
778  be16_to_cpu(cur->eth.ether_type));
779  if (cur->eth.vlan_id)
780  len += snprintf(buf + len, BUF_SIZE - len,
781  "vlan-id = %d ",
782  be16_to_cpu(cur->eth.vlan_id));
783  break;
784 
786  if (cur->ipv4.src_ip)
787  len += snprintf(buf + len, BUF_SIZE - len,
788  "src-ip = %pI4 ",
789  &cur->ipv4.src_ip);
790  if (cur->ipv4.dst_ip)
791  len += snprintf(buf + len, BUF_SIZE - len,
792  "dst-ip = %pI4 ",
793  &cur->ipv4.dst_ip);
794  break;
795 
798  if (cur->tcp_udp.src_port)
799  len += snprintf(buf + len, BUF_SIZE - len,
800  "src-port = %d ",
801  be16_to_cpu(cur->tcp_udp.src_port));
802  if (cur->tcp_udp.dst_port)
803  len += snprintf(buf + len, BUF_SIZE - len,
804  "dst-port = %d ",
805  be16_to_cpu(cur->tcp_udp.dst_port));
806  break;
807 
809  len += snprintf(buf + len, BUF_SIZE - len,
810  "dst-gid = %pI6\n", cur->ib.dst_gid);
811  len += snprintf(buf + len, BUF_SIZE - len,
812  "dst-gid-mask = %pI6\n",
813  cur->ib.dst_gid_msk);
814  break;
815 
817  break;
818 
819  default:
820  break;
821  }
822  }
823  len += snprintf(buf + len, BUF_SIZE - len, "\n");
824  mlx4_err(dev, "%s", buf);
825 
826  if (len >= BUF_SIZE)
827  mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
828 }
829 
830 int mlx4_flow_attach(struct mlx4_dev *dev,
831  struct mlx4_net_trans_rule *rule, u64 *reg_id)
832 {
833  struct mlx4_cmd_mailbox *mailbox;
834  struct mlx4_spec_list *cur;
835  u32 size = 0;
836  int ret;
837 
838  mailbox = mlx4_alloc_cmd_mailbox(dev);
839  if (IS_ERR(mailbox))
840  return PTR_ERR(mailbox);
841 
842  memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
843  trans_rule_ctrl_to_hw(rule, mailbox->buf);
844 
845  size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
846 
847  list_for_each_entry(cur, &rule->list, list) {
848  ret = parse_trans_rule(dev, cur, mailbox->buf + size);
849  if (ret < 0) {
850  mlx4_free_cmd_mailbox(dev, mailbox);
851  return -EINVAL;
852  }
853  size += ret;
854  }
855 
856  ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
857  if (ret == -ENOMEM)
858  mlx4_err_rule(dev,
859  "mcg table is full. Fail to register network rule.\n",
860  rule);
861  else if (ret)
862  mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
863 
864  mlx4_free_cmd_mailbox(dev, mailbox);
865 
866  return ret;
867 }
869 
870 int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
871 {
872  int err;
873 
874  err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id);
875  if (err)
876  mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n",
877  reg_id);
878  return err;
879 }
881 
882 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
883  int block_mcast_loopback, enum mlx4_protocol prot,
884  enum mlx4_steer_type steer)
885 {
886  struct mlx4_priv *priv = mlx4_priv(dev);
887  struct mlx4_cmd_mailbox *mailbox;
888  struct mlx4_mgm *mgm;
890  int index, prev;
891  int link = 0;
892  int i;
893  int err;
894  u8 port = gid[5];
895  u8 new_entry = 0;
896 
897  mailbox = mlx4_alloc_cmd_mailbox(dev);
898  if (IS_ERR(mailbox))
899  return PTR_ERR(mailbox);
900  mgm = mailbox->buf;
901 
902  mutex_lock(&priv->mcg_table.mutex);
903  err = find_entry(dev, port, gid, prot,
904  mailbox, &prev, &index);
905  if (err)
906  goto out;
907 
908  if (index != -1) {
909  if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
910  new_entry = 1;
911  memcpy(mgm->gid, gid, 16);
912  }
913  } else {
914  link = 1;
915 
916  index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
917  if (index == -1) {
918  mlx4_err(dev, "No AMGM entries left\n");
919  err = -ENOMEM;
920  goto out;
921  }
922  index += dev->caps.num_mgms;
923 
924  new_entry = 1;
925  memset(mgm, 0, sizeof *mgm);
926  memcpy(mgm->gid, gid, 16);
927  }
928 
929  members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
930  if (members_count == dev->caps.num_qp_per_mgm) {
931  mlx4_err(dev, "MGM at index %x is full.\n", index);
932  err = -ENOMEM;
933  goto out;
934  }
935 
936  for (i = 0; i < members_count; ++i)
937  if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
938  mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
939  err = 0;
940  goto out;
941  }
942 
943  if (block_mcast_loopback)
944  mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
945  (1U << MGM_BLCK_LB_BIT));
946  else
947  mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
948 
949  mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
950 
951  err = mlx4_WRITE_ENTRY(dev, index, mailbox);
952  if (err)
953  goto out;
954 
955  if (!link)
956  goto out;
957 
958  err = mlx4_READ_ENTRY(dev, prev, mailbox);
959  if (err)
960  goto out;
961 
962  mgm->next_gid_index = cpu_to_be32(index << 6);
963 
964  err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
965  if (err)
966  goto out;
967 
968 out:
969  if (prot == MLX4_PROT_ETH) {
970  /* manage the steering entry for promisc mode */
971  if (new_entry)
972  new_steering_entry(dev, port, steer, index, qp->qpn);
973  else
974  existing_steering_entry(dev, port, steer,
975  index, qp->qpn);
976  }
977  if (err && link && index != -1) {
978  if (index < dev->caps.num_mgms)
979  mlx4_warn(dev, "Got AMGM index %d < %d",
980  index, dev->caps.num_mgms);
981  else
982  mlx4_bitmap_free(&priv->mcg_table.bitmap,
983  index - dev->caps.num_mgms);
984  }
985  mutex_unlock(&priv->mcg_table.mutex);
986 
987  mlx4_free_cmd_mailbox(dev, mailbox);
988  return err;
989 }
990 
991 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
992  enum mlx4_protocol prot, enum mlx4_steer_type steer)
993 {
994  struct mlx4_priv *priv = mlx4_priv(dev);
995  struct mlx4_cmd_mailbox *mailbox;
996  struct mlx4_mgm *mgm;
998  int prev, index;
999  int i, loc;
1000  int err;
1001  u8 port = gid[5];
1002  bool removed_entry = false;
1003 
1004  mailbox = mlx4_alloc_cmd_mailbox(dev);
1005  if (IS_ERR(mailbox))
1006  return PTR_ERR(mailbox);
1007  mgm = mailbox->buf;
1008 
1009  mutex_lock(&priv->mcg_table.mutex);
1010 
1011  err = find_entry(dev, port, gid, prot,
1012  mailbox, &prev, &index);
1013  if (err)
1014  goto out;
1015 
1016  if (index == -1) {
1017  mlx4_err(dev, "MGID %pI6 not found\n", gid);
1018  err = -EINVAL;
1019  goto out;
1020  }
1021 
1022  /* if this pq is also a promisc qp, it shouldn't be removed */
1023  if (prot == MLX4_PROT_ETH &&
1024  check_duplicate_entry(dev, port, steer, index, qp->qpn))
1025  goto out;
1026 
1027  members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
1028  for (loc = -1, i = 0; i < members_count; ++i)
1029  if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
1030  loc = i;
1031 
1032  if (loc == -1) {
1033  mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
1034  err = -EINVAL;
1035  goto out;
1036  }
1037 
1038 
1039  mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
1040  mgm->qp[loc] = mgm->qp[i - 1];
1041  mgm->qp[i - 1] = 0;
1042 
1043  if (prot == MLX4_PROT_ETH)
1044  removed_entry = can_remove_steering_entry(dev, port, steer,
1045  index, qp->qpn);
1046  if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
1047  err = mlx4_WRITE_ENTRY(dev, index, mailbox);
1048  goto out;
1049  }
1050 
1051  /* We are going to delete the entry, members count should be 0 */
1052  mgm->members_count = cpu_to_be32((u32) prot << 30);
1053 
1054  if (prev == -1) {
1055  /* Remove entry from MGM */
1056  int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
1057  if (amgm_index) {
1058  err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
1059  if (err)
1060  goto out;
1061  } else
1062  memset(mgm->gid, 0, 16);
1063 
1064  err = mlx4_WRITE_ENTRY(dev, index, mailbox);
1065  if (err)
1066  goto out;
1067 
1068  if (amgm_index) {
1069  if (amgm_index < dev->caps.num_mgms)
1070  mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
1071  index, amgm_index, dev->caps.num_mgms);
1072  else
1073  mlx4_bitmap_free(&priv->mcg_table.bitmap,
1074  amgm_index - dev->caps.num_mgms);
1075  }
1076  } else {
1077  /* Remove entry from AMGM */
1078  int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
1079  err = mlx4_READ_ENTRY(dev, prev, mailbox);
1080  if (err)
1081  goto out;
1082 
1083  mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
1084 
1085  err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
1086  if (err)
1087  goto out;
1088 
1089  if (index < dev->caps.num_mgms)
1090  mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
1091  prev, index, dev->caps.num_mgms);
1092  else
1093  mlx4_bitmap_free(&priv->mcg_table.bitmap,
1094  index - dev->caps.num_mgms);
1095  }
1096 
1097 out:
1098  mutex_unlock(&priv->mcg_table.mutex);
1099 
1100  mlx4_free_cmd_mailbox(dev, mailbox);
1101  return err;
1102 }
1103 
1104 static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
1105  u8 gid[16], u8 attach, u8 block_loopback,
1106  enum mlx4_protocol prot)
1107 {
1108  struct mlx4_cmd_mailbox *mailbox;
1109  int err = 0;
1110  int qpn;
1111 
1112  if (!mlx4_is_mfunc(dev))
1113  return -EBADF;
1114 
1115  mailbox = mlx4_alloc_cmd_mailbox(dev);
1116  if (IS_ERR(mailbox))
1117  return PTR_ERR(mailbox);
1118 
1119  memcpy(mailbox->buf, gid, 16);
1120  qpn = qp->qpn;
1121  qpn |= (prot << 28);
1122  if (attach && block_loopback)
1123  qpn |= (1 << 31);
1124 
1125  err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
1128 
1129  mlx4_free_cmd_mailbox(dev, mailbox);
1130  return err;
1131 }
1132 
1133 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1134  u8 port, int block_mcast_loopback,
1135  enum mlx4_protocol prot, u64 *reg_id)
1136 {
1137 
1138  switch (dev->caps.steering_mode) {
1139  case MLX4_STEERING_MODE_A0:
1140  if (prot == MLX4_PROT_ETH)
1141  return 0;
1142 
1143  case MLX4_STEERING_MODE_B0:
1144  if (prot == MLX4_PROT_ETH)
1145  gid[7] |= (MLX4_MC_STEER << 1);
1146 
1147  if (mlx4_is_mfunc(dev))
1148  return mlx4_QP_ATTACH(dev, qp, gid, 1,
1149  block_mcast_loopback, prot);
1150  return mlx4_qp_attach_common(dev, qp, gid,
1151  block_mcast_loopback, prot,
1152  MLX4_MC_STEER);
1153 
1155  struct mlx4_spec_list spec = { {NULL} };
1156  __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
1157 
1158  struct mlx4_net_trans_rule rule = {
1160  .exclusive = 0,
1161  .promisc_mode = MLX4_FS_PROMISC_NONE,
1162  .priority = MLX4_DOMAIN_NIC,
1163  };
1164 
1165  rule.allow_loopback = ~block_mcast_loopback;
1166  rule.port = port;
1167  rule.qpn = qp->qpn;
1168  INIT_LIST_HEAD(&rule.list);
1169 
1170  switch (prot) {
1171  case MLX4_PROT_ETH:
1173  memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN);
1174  memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
1175  break;
1176 
1177  case MLX4_PROT_IB_IPV6:
1179  memcpy(spec.ib.dst_gid, gid, 16);
1180  memset(&spec.ib.dst_gid_msk, 0xff, 16);
1181  break;
1182  default:
1183  return -EINVAL;
1184  }
1185  list_add_tail(&spec.list, &rule.list);
1186 
1187  return mlx4_flow_attach(dev, &rule, reg_id);
1188  }
1189 
1190  default:
1191  return -EINVAL;
1192  }
1193 }
1195 
1196 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1197  enum mlx4_protocol prot, u64 reg_id)
1198 {
1199  switch (dev->caps.steering_mode) {
1200  case MLX4_STEERING_MODE_A0:
1201  if (prot == MLX4_PROT_ETH)
1202  return 0;
1203 
1204  case MLX4_STEERING_MODE_B0:
1205  if (prot == MLX4_PROT_ETH)
1206  gid[7] |= (MLX4_MC_STEER << 1);
1207 
1208  if (mlx4_is_mfunc(dev))
1209  return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
1210 
1211  return mlx4_qp_detach_common(dev, qp, gid, prot,
1212  MLX4_MC_STEER);
1213 
1215  return mlx4_flow_detach(dev, reg_id);
1216 
1217  default:
1218  return -EINVAL;
1219  }
1220 }
1222 
1225 {
1226  struct mlx4_net_trans_rule rule;
1227  u64 *regid_p;
1228 
1229  switch (mode) {
1232  regid_p = &dev->regid_promisc_array[port];
1233  break;
1235  regid_p = &dev->regid_allmulti_array[port];
1236  break;
1237  default:
1238  return -1;
1239  }
1240 
1241  if (*regid_p != 0)
1242  return -1;
1243 
1244  rule.promisc_mode = mode;
1245  rule.port = port;
1246  rule.qpn = qpn;
1247  INIT_LIST_HEAD(&rule.list);
1248  mlx4_err(dev, "going promisc on %x\n", port);
1249 
1250  return mlx4_flow_attach(dev, &rule, regid_p);
1251 }
1253 
1256 {
1257  int ret;
1258  u64 *regid_p;
1259 
1260  switch (mode) {
1263  regid_p = &dev->regid_promisc_array[port];
1264  break;
1266  regid_p = &dev->regid_allmulti_array[port];
1267  break;
1268  default:
1269  return -1;
1270  }
1271 
1272  if (*regid_p == 0)
1273  return -1;
1274 
1275  ret = mlx4_flow_detach(dev, *regid_p);
1276  if (ret == 0)
1277  *regid_p = 0;
1278 
1279  return ret;
1280 }
1282 
1284  struct mlx4_qp *qp, u8 gid[16],
1285  int block_mcast_loopback, enum mlx4_protocol prot)
1286 {
1287  if (prot == MLX4_PROT_ETH)
1288  gid[7] |= (MLX4_UC_STEER << 1);
1289 
1290  if (mlx4_is_mfunc(dev))
1291  return mlx4_QP_ATTACH(dev, qp, gid, 1,
1292  block_mcast_loopback, prot);
1293 
1294  return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
1295  prot, MLX4_UC_STEER);
1296 }
1298 
1299 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
1300  u8 gid[16], enum mlx4_protocol prot)
1301 {
1302  if (prot == MLX4_PROT_ETH)
1303  gid[7] |= (MLX4_UC_STEER << 1);
1304 
1305  if (mlx4_is_mfunc(dev))
1306  return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
1307 
1308  return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
1309 }
1311 
1312 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
1313  struct mlx4_vhcr *vhcr,
1314  struct mlx4_cmd_mailbox *inbox,
1315  struct mlx4_cmd_mailbox *outbox,
1316  struct mlx4_cmd_info *cmd)
1317 {
1318  u32 qpn = (u32) vhcr->in_param & 0xffffffff;
1319  u8 port = vhcr->in_param >> 62;
1320  enum mlx4_steer_type steer = vhcr->in_modifier;
1321 
1322  /* Promiscuous unicast is not allowed in mfunc */
1323  if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
1324  return 0;
1325 
1326  if (vhcr->op_modifier)
1327  return add_promisc_qp(dev, port, steer, qpn);
1328  else
1329  return remove_promisc_qp(dev, port, steer, qpn);
1330 }
1331 
1332 static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
1333  enum mlx4_steer_type steer, u8 add, u8 port)
1334 {
1335  return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
1338 }
1339 
1340 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
1341 {
1342  if (mlx4_is_mfunc(dev))
1343  return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
1344 
1345  return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
1346 }
1348 
1349 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
1350 {
1351  if (mlx4_is_mfunc(dev))
1352  return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
1353 
1354  return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
1355 }
1357 
1358 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
1359 {
1360  if (mlx4_is_mfunc(dev))
1361  return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
1362 
1363  return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
1364 }
1366 
1367 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
1368 {
1369  if (mlx4_is_mfunc(dev))
1370  return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
1371 
1372  return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
1373 }
1375 
1377 {
1378  struct mlx4_priv *priv = mlx4_priv(dev);
1379  int err;
1380 
1381  /* No need for mcg_table when fw managed the mcg table*/
1382  if (dev->caps.steering_mode ==
1384  return 0;
1385  err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
1386  dev->caps.num_amgms - 1, 0, 0);
1387  if (err)
1388  return err;
1389 
1390  mutex_init(&priv->mcg_table.mutex);
1391 
1392  return 0;
1393 }
1394 
1396 {
1397  if (dev->caps.steering_mode !=
1399  mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
1400 }