Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
resource_tracker.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses. You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or
14  * without modification, are permitted provided that the following
15  * conditions are met:
16  *
17  * - Redistributions of source code must retain the above
18  * copyright notice, this list of conditions and the following
19  * disclaimer.
20  *
21  * - Redistributions in binary form must reproduce the above
22  * copyright notice, this list of conditions and the following
23  * disclaimer in the documentation and/or other materials
24  * provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46 
47 #include "mlx4.h"
48 #include "fw.h"
49 
50 #define MLX4_MAC_VALID (1ull << 63)
51 
52 struct mac_res {
53  struct list_head list;
56 };
57 
58 struct res_common {
59  struct list_head list;
60  struct rb_node node;
62  int owner;
63  int state;
65  int to_state;
66  int removing;
67 };
68 
69 enum {
71 };
72 
73 struct res_gid {
74  struct list_head list;
75  u8 gid[16];
78 };
79 
82 
83  /* QP number was allocated */
85 
86  /* ICM memory for QP context was mapped */
88 
89  /* QP is in hw ownership */
91 };
92 
93 struct res_qp {
94  struct res_common com;
95  struct res_mtt *mtt;
96  struct res_cq *rcq;
97  struct res_cq *scq;
98  struct res_srq *srq;
102 };
103 
107 };
108 
109 static inline const char *mtt_states_str(enum res_mtt_states state)
110 {
111  switch (state) {
112  case RES_MTT_BUSY: return "RES_MTT_BUSY";
113  case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
114  default: return "Unknown";
115  }
116 }
117 
118 struct res_mtt {
119  struct res_common com;
120  int order;
122 };
123 
129 };
130 
131 struct res_mpt {
132  struct res_common com;
133  struct res_mtt *mtt;
134  int key;
135 };
136 
141 };
142 
143 struct res_eq {
144  struct res_common com;
145  struct res_mtt *mtt;
146 };
147 
152 };
153 
154 struct res_cq {
155  struct res_common com;
156  struct res_mtt *mtt;
158 };
159 
164 };
165 
166 struct res_srq {
167  struct res_common com;
168  struct res_mtt *mtt;
169  struct res_cq *cq;
171 };
172 
176 };
177 
178 struct res_counter {
179  struct res_common com;
180  int port;
181 };
182 
186 };
187 
188 struct res_xrcdn {
189  struct res_common com;
190  int port;
191 };
192 
196 };
197 
198 struct res_fs_rule {
199  struct res_common com;
200 };
201 
202 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
203 {
204  struct rb_node *node = root->rb_node;
205 
206  while (node) {
207  struct res_common *res = container_of(node, struct res_common,
208  node);
209 
210  if (res_id < res->res_id)
211  node = node->rb_left;
212  else if (res_id > res->res_id)
213  node = node->rb_right;
214  else
215  return res;
216  }
217  return NULL;
218 }
219 
220 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
221 {
222  struct rb_node **new = &(root->rb_node), *parent = NULL;
223 
224  /* Figure out where to put new node */
225  while (*new) {
226  struct res_common *this = container_of(*new, struct res_common,
227  node);
228 
229  parent = *new;
230  if (res->res_id < this->res_id)
231  new = &((*new)->rb_left);
232  else if (res->res_id > this->res_id)
233  new = &((*new)->rb_right);
234  else
235  return -EEXIST;
236  }
237 
238  /* Add new node and rebalance tree. */
239  rb_link_node(&res->node, parent, new);
240  rb_insert_color(&res->node, root);
241 
242  return 0;
243 }
244 
252 };
253 
254 /* For Debug uses */
255 static const char *ResourceType(enum mlx4_resource rt)
256 {
257  switch (rt) {
258  case RES_QP: return "RES_QP";
259  case RES_CQ: return "RES_CQ";
260  case RES_SRQ: return "RES_SRQ";
261  case RES_MPT: return "RES_MPT";
262  case RES_MTT: return "RES_MTT";
263  case RES_MAC: return "RES_MAC";
264  case RES_EQ: return "RES_EQ";
265  case RES_COUNTER: return "RES_COUNTER";
266  case RES_FS_RULE: return "RES_FS_RULE";
267  case RES_XRCD: return "RES_XRCD";
268  default: return "Unknown resource type !!!";
269  };
270 }
271 
273 {
274  struct mlx4_priv *priv = mlx4_priv(dev);
275  int i;
276  int t;
277 
278  priv->mfunc.master.res_tracker.slave_list =
279  kzalloc(dev->num_slaves * sizeof(struct slave_list),
280  GFP_KERNEL);
281  if (!priv->mfunc.master.res_tracker.slave_list)
282  return -ENOMEM;
283 
284  for (i = 0 ; i < dev->num_slaves; i++) {
285  for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
286  INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
287  slave_list[i].res_list[t]);
288  mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
289  }
290 
291  mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
292  dev->num_slaves);
293  for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
294  priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
295 
296  spin_lock_init(&priv->mfunc.master.res_tracker.lock);
297  return 0 ;
298 }
299 
302 {
303  struct mlx4_priv *priv = mlx4_priv(dev);
304  int i;
305 
306  if (priv->mfunc.master.res_tracker.slave_list) {
307  if (type != RES_TR_FREE_STRUCTS_ONLY)
308  for (i = 0 ; i < dev->num_slaves; i++)
309  if (type == RES_TR_FREE_ALL ||
310  dev->caps.function != i)
312 
313  if (type != RES_TR_FREE_SLAVES_ONLY) {
314  kfree(priv->mfunc.master.res_tracker.slave_list);
315  priv->mfunc.master.res_tracker.slave_list = NULL;
316  }
317  }
318 }
319 
320 static void update_pkey_index(struct mlx4_dev *dev, int slave,
321  struct mlx4_cmd_mailbox *inbox)
322 {
323  u8 sched = *(u8 *)(inbox->buf + 64);
324  u8 orig_index = *(u8 *)(inbox->buf + 35);
325  u8 new_index;
326  struct mlx4_priv *priv = mlx4_priv(dev);
327  int port;
328 
329  port = (sched >> 6 & 1) + 1;
330 
331  new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
332  *(u8 *)(inbox->buf + 35) = new_index;
333 }
334 
335 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
336  u8 slave)
337 {
338  struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
339  enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
340  u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
341 
342  if (MLX4_QP_ST_UD == ts)
343  qp_ctx->pri_path.mgid_index = 0x80 | slave;
344 
345  if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
347  qp_ctx->pri_path.mgid_index = slave & 0x7F;
348  if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
349  qp_ctx->alt_path.mgid_index = slave & 0x7F;
350  }
351 }
352 
353 static int mpt_mask(struct mlx4_dev *dev)
354 {
355  return dev->caps.num_mpts - 1;
356 }
357 
358 static void *find_res(struct mlx4_dev *dev, int res_id,
359  enum mlx4_resource type)
360 {
361  struct mlx4_priv *priv = mlx4_priv(dev);
362 
363  return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
364  res_id);
365 }
366 
367 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
368  enum mlx4_resource type,
369  void *res)
370 {
371  struct res_common *r;
372  int err = 0;
373 
374  spin_lock_irq(mlx4_tlock(dev));
375  r = find_res(dev, res_id, type);
376  if (!r) {
377  err = -ENONET;
378  goto exit;
379  }
380 
381  if (r->state == RES_ANY_BUSY) {
382  err = -EBUSY;
383  goto exit;
384  }
385 
386  if (r->owner != slave) {
387  err = -EPERM;
388  goto exit;
389  }
390 
391  r->from_state = r->state;
392  r->state = RES_ANY_BUSY;
393 
394  if (res)
395  *((struct res_common **)res) = r;
396 
397 exit:
398  spin_unlock_irq(mlx4_tlock(dev));
399  return err;
400 }
401 
403  enum mlx4_resource type,
404  u64 res_id, int *slave)
405 {
406 
407  struct res_common *r;
408  int err = -ENOENT;
409  int id = res_id;
410 
411  if (type == RES_QP)
412  id &= 0x7fffff;
413  spin_lock(mlx4_tlock(dev));
414 
415  r = find_res(dev, id, type);
416  if (r) {
417  *slave = r->owner;
418  err = 0;
419  }
420  spin_unlock(mlx4_tlock(dev));
421 
422  return err;
423 }
424 
425 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
426  enum mlx4_resource type)
427 {
428  struct res_common *r;
429 
430  spin_lock_irq(mlx4_tlock(dev));
431  r = find_res(dev, res_id, type);
432  if (r)
433  r->state = r->from_state;
434  spin_unlock_irq(mlx4_tlock(dev));
435 }
436 
437 static struct res_common *alloc_qp_tr(int id)
438 {
439  struct res_qp *ret;
440 
441  ret = kzalloc(sizeof *ret, GFP_KERNEL);
442  if (!ret)
443  return NULL;
444 
445  ret->com.res_id = id;
446  ret->com.state = RES_QP_RESERVED;
447  ret->local_qpn = id;
448  INIT_LIST_HEAD(&ret->mcg_list);
449  spin_lock_init(&ret->mcg_spl);
450 
451  return &ret->com;
452 }
453 
454 static struct res_common *alloc_mtt_tr(int id, int order)
455 {
456  struct res_mtt *ret;
457 
458  ret = kzalloc(sizeof *ret, GFP_KERNEL);
459  if (!ret)
460  return NULL;
461 
462  ret->com.res_id = id;
463  ret->order = order;
464  ret->com.state = RES_MTT_ALLOCATED;
465  atomic_set(&ret->ref_count, 0);
466 
467  return &ret->com;
468 }
469 
470 static struct res_common *alloc_mpt_tr(int id, int key)
471 {
472  struct res_mpt *ret;
473 
474  ret = kzalloc(sizeof *ret, GFP_KERNEL);
475  if (!ret)
476  return NULL;
477 
478  ret->com.res_id = id;
479  ret->com.state = RES_MPT_RESERVED;
480  ret->key = key;
481 
482  return &ret->com;
483 }
484 
485 static struct res_common *alloc_eq_tr(int id)
486 {
487  struct res_eq *ret;
488 
489  ret = kzalloc(sizeof *ret, GFP_KERNEL);
490  if (!ret)
491  return NULL;
492 
493  ret->com.res_id = id;
494  ret->com.state = RES_EQ_RESERVED;
495 
496  return &ret->com;
497 }
498 
499 static struct res_common *alloc_cq_tr(int id)
500 {
501  struct res_cq *ret;
502 
503  ret = kzalloc(sizeof *ret, GFP_KERNEL);
504  if (!ret)
505  return NULL;
506 
507  ret->com.res_id = id;
508  ret->com.state = RES_CQ_ALLOCATED;
509  atomic_set(&ret->ref_count, 0);
510 
511  return &ret->com;
512 }
513 
514 static struct res_common *alloc_srq_tr(int id)
515 {
516  struct res_srq *ret;
517 
518  ret = kzalloc(sizeof *ret, GFP_KERNEL);
519  if (!ret)
520  return NULL;
521 
522  ret->com.res_id = id;
523  ret->com.state = RES_SRQ_ALLOCATED;
524  atomic_set(&ret->ref_count, 0);
525 
526  return &ret->com;
527 }
528 
529 static struct res_common *alloc_counter_tr(int id)
530 {
531  struct res_counter *ret;
532 
533  ret = kzalloc(sizeof *ret, GFP_KERNEL);
534  if (!ret)
535  return NULL;
536 
537  ret->com.res_id = id;
538  ret->com.state = RES_COUNTER_ALLOCATED;
539 
540  return &ret->com;
541 }
542 
543 static struct res_common *alloc_xrcdn_tr(int id)
544 {
545  struct res_xrcdn *ret;
546 
547  ret = kzalloc(sizeof *ret, GFP_KERNEL);
548  if (!ret)
549  return NULL;
550 
551  ret->com.res_id = id;
552  ret->com.state = RES_XRCD_ALLOCATED;
553 
554  return &ret->com;
555 }
556 
557 static struct res_common *alloc_fs_rule_tr(u64 id)
558 {
559  struct res_fs_rule *ret;
560 
561  ret = kzalloc(sizeof *ret, GFP_KERNEL);
562  if (!ret)
563  return NULL;
564 
565  ret->com.res_id = id;
566  ret->com.state = RES_FS_RULE_ALLOCATED;
567 
568  return &ret->com;
569 }
570 
571 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
572  int extra)
573 {
574  struct res_common *ret;
575 
576  switch (type) {
577  case RES_QP:
578  ret = alloc_qp_tr(id);
579  break;
580  case RES_MPT:
581  ret = alloc_mpt_tr(id, extra);
582  break;
583  case RES_MTT:
584  ret = alloc_mtt_tr(id, extra);
585  break;
586  case RES_EQ:
587  ret = alloc_eq_tr(id);
588  break;
589  case RES_CQ:
590  ret = alloc_cq_tr(id);
591  break;
592  case RES_SRQ:
593  ret = alloc_srq_tr(id);
594  break;
595  case RES_MAC:
596  printk(KERN_ERR "implementation missing\n");
597  return NULL;
598  case RES_COUNTER:
599  ret = alloc_counter_tr(id);
600  break;
601  case RES_XRCD:
602  ret = alloc_xrcdn_tr(id);
603  break;
604  case RES_FS_RULE:
605  ret = alloc_fs_rule_tr(id);
606  break;
607  default:
608  return NULL;
609  }
610  if (ret)
611  ret->owner = slave;
612 
613  return ret;
614 }
615 
616 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
617  enum mlx4_resource type, int extra)
618 {
619  int i;
620  int err;
621  struct mlx4_priv *priv = mlx4_priv(dev);
622  struct res_common **res_arr;
623  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
624  struct rb_root *root = &tracker->res_tree[type];
625 
626  res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
627  if (!res_arr)
628  return -ENOMEM;
629 
630  for (i = 0; i < count; ++i) {
631  res_arr[i] = alloc_tr(base + i, type, slave, extra);
632  if (!res_arr[i]) {
633  for (--i; i >= 0; --i)
634  kfree(res_arr[i]);
635 
636  kfree(res_arr);
637  return -ENOMEM;
638  }
639  }
640 
641  spin_lock_irq(mlx4_tlock(dev));
642  for (i = 0; i < count; ++i) {
643  if (find_res(dev, base + i, type)) {
644  err = -EEXIST;
645  goto undo;
646  }
647  err = res_tracker_insert(root, res_arr[i]);
648  if (err)
649  goto undo;
650  list_add_tail(&res_arr[i]->list,
651  &tracker->slave_list[slave].res_list[type]);
652  }
653  spin_unlock_irq(mlx4_tlock(dev));
654  kfree(res_arr);
655 
656  return 0;
657 
658 undo:
659  for (--i; i >= base; --i)
660  rb_erase(&res_arr[i]->node, root);
661 
662  spin_unlock_irq(mlx4_tlock(dev));
663 
664  for (i = 0; i < count; ++i)
665  kfree(res_arr[i]);
666 
667  kfree(res_arr);
668 
669  return err;
670 }
671 
672 static int remove_qp_ok(struct res_qp *res)
673 {
674  if (res->com.state == RES_QP_BUSY)
675  return -EBUSY;
676  else if (res->com.state != RES_QP_RESERVED)
677  return -EPERM;
678 
679  return 0;
680 }
681 
682 static int remove_mtt_ok(struct res_mtt *res, int order)
683 {
684  if (res->com.state == RES_MTT_BUSY ||
685  atomic_read(&res->ref_count)) {
686  printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
687  __func__, __LINE__,
688  mtt_states_str(res->com.state),
689  atomic_read(&res->ref_count));
690  return -EBUSY;
691  } else if (res->com.state != RES_MTT_ALLOCATED)
692  return -EPERM;
693  else if (res->order != order)
694  return -EINVAL;
695 
696  return 0;
697 }
698 
699 static int remove_mpt_ok(struct res_mpt *res)
700 {
701  if (res->com.state == RES_MPT_BUSY)
702  return -EBUSY;
703  else if (res->com.state != RES_MPT_RESERVED)
704  return -EPERM;
705 
706  return 0;
707 }
708 
709 static int remove_eq_ok(struct res_eq *res)
710 {
711  if (res->com.state == RES_MPT_BUSY)
712  return -EBUSY;
713  else if (res->com.state != RES_MPT_RESERVED)
714  return -EPERM;
715 
716  return 0;
717 }
718 
719 static int remove_counter_ok(struct res_counter *res)
720 {
721  if (res->com.state == RES_COUNTER_BUSY)
722  return -EBUSY;
723  else if (res->com.state != RES_COUNTER_ALLOCATED)
724  return -EPERM;
725 
726  return 0;
727 }
728 
729 static int remove_xrcdn_ok(struct res_xrcdn *res)
730 {
731  if (res->com.state == RES_XRCD_BUSY)
732  return -EBUSY;
733  else if (res->com.state != RES_XRCD_ALLOCATED)
734  return -EPERM;
735 
736  return 0;
737 }
738 
739 static int remove_fs_rule_ok(struct res_fs_rule *res)
740 {
741  if (res->com.state == RES_FS_RULE_BUSY)
742  return -EBUSY;
743  else if (res->com.state != RES_FS_RULE_ALLOCATED)
744  return -EPERM;
745 
746  return 0;
747 }
748 
749 static int remove_cq_ok(struct res_cq *res)
750 {
751  if (res->com.state == RES_CQ_BUSY)
752  return -EBUSY;
753  else if (res->com.state != RES_CQ_ALLOCATED)
754  return -EPERM;
755 
756  return 0;
757 }
758 
759 static int remove_srq_ok(struct res_srq *res)
760 {
761  if (res->com.state == RES_SRQ_BUSY)
762  return -EBUSY;
763  else if (res->com.state != RES_SRQ_ALLOCATED)
764  return -EPERM;
765 
766  return 0;
767 }
768 
769 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
770 {
771  switch (type) {
772  case RES_QP:
773  return remove_qp_ok((struct res_qp *)res);
774  case RES_CQ:
775  return remove_cq_ok((struct res_cq *)res);
776  case RES_SRQ:
777  return remove_srq_ok((struct res_srq *)res);
778  case RES_MPT:
779  return remove_mpt_ok((struct res_mpt *)res);
780  case RES_MTT:
781  return remove_mtt_ok((struct res_mtt *)res, extra);
782  case RES_MAC:
783  return -ENOSYS;
784  case RES_EQ:
785  return remove_eq_ok((struct res_eq *)res);
786  case RES_COUNTER:
787  return remove_counter_ok((struct res_counter *)res);
788  case RES_XRCD:
789  return remove_xrcdn_ok((struct res_xrcdn *)res);
790  case RES_FS_RULE:
791  return remove_fs_rule_ok((struct res_fs_rule *)res);
792  default:
793  return -EINVAL;
794  }
795 }
796 
797 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
798  enum mlx4_resource type, int extra)
799 {
800  u64 i;
801  int err;
802  struct mlx4_priv *priv = mlx4_priv(dev);
803  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
804  struct res_common *r;
805 
806  spin_lock_irq(mlx4_tlock(dev));
807  for (i = base; i < base + count; ++i) {
808  r = res_tracker_lookup(&tracker->res_tree[type], i);
809  if (!r) {
810  err = -ENOENT;
811  goto out;
812  }
813  if (r->owner != slave) {
814  err = -EPERM;
815  goto out;
816  }
817  err = remove_ok(r, type, extra);
818  if (err)
819  goto out;
820  }
821 
822  for (i = base; i < base + count; ++i) {
823  r = res_tracker_lookup(&tracker->res_tree[type], i);
824  rb_erase(&r->node, &tracker->res_tree[type]);
825  list_del(&r->list);
826  kfree(r);
827  }
828  err = 0;
829 
830 out:
831  spin_unlock_irq(mlx4_tlock(dev));
832 
833  return err;
834 }
835 
836 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
837  enum res_qp_states state, struct res_qp **qp,
838  int alloc)
839 {
840  struct mlx4_priv *priv = mlx4_priv(dev);
841  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
842  struct res_qp *r;
843  int err = 0;
844 
845  spin_lock_irq(mlx4_tlock(dev));
846  r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
847  if (!r)
848  err = -ENOENT;
849  else if (r->com.owner != slave)
850  err = -EPERM;
851  else {
852  switch (state) {
853  case RES_QP_BUSY:
854  mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
855  __func__, r->com.res_id);
856  err = -EBUSY;
857  break;
858 
859  case RES_QP_RESERVED:
860  if (r->com.state == RES_QP_MAPPED && !alloc)
861  break;
862 
863  mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
864  err = -EINVAL;
865  break;
866 
867  case RES_QP_MAPPED:
868  if ((r->com.state == RES_QP_RESERVED && alloc) ||
869  r->com.state == RES_QP_HW)
870  break;
871  else {
872  mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
873  r->com.res_id);
874  err = -EINVAL;
875  }
876 
877  break;
878 
879  case RES_QP_HW:
880  if (r->com.state != RES_QP_MAPPED)
881  err = -EINVAL;
882  break;
883  default:
884  err = -EINVAL;
885  }
886 
887  if (!err) {
888  r->com.from_state = r->com.state;
889  r->com.to_state = state;
890  r->com.state = RES_QP_BUSY;
891  if (qp)
892  *qp = r;
893  }
894  }
895 
896  spin_unlock_irq(mlx4_tlock(dev));
897 
898  return err;
899 }
900 
901 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
902  enum res_mpt_states state, struct res_mpt **mpt)
903 {
904  struct mlx4_priv *priv = mlx4_priv(dev);
905  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
906  struct res_mpt *r;
907  int err = 0;
908 
909  spin_lock_irq(mlx4_tlock(dev));
910  r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
911  if (!r)
912  err = -ENOENT;
913  else if (r->com.owner != slave)
914  err = -EPERM;
915  else {
916  switch (state) {
917  case RES_MPT_BUSY:
918  err = -EINVAL;
919  break;
920 
921  case RES_MPT_RESERVED:
922  if (r->com.state != RES_MPT_MAPPED)
923  err = -EINVAL;
924  break;
925 
926  case RES_MPT_MAPPED:
927  if (r->com.state != RES_MPT_RESERVED &&
928  r->com.state != RES_MPT_HW)
929  err = -EINVAL;
930  break;
931 
932  case RES_MPT_HW:
933  if (r->com.state != RES_MPT_MAPPED)
934  err = -EINVAL;
935  break;
936  default:
937  err = -EINVAL;
938  }
939 
940  if (!err) {
941  r->com.from_state = r->com.state;
942  r->com.to_state = state;
943  r->com.state = RES_MPT_BUSY;
944  if (mpt)
945  *mpt = r;
946  }
947  }
948 
949  spin_unlock_irq(mlx4_tlock(dev));
950 
951  return err;
952 }
953 
954 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
955  enum res_eq_states state, struct res_eq **eq)
956 {
957  struct mlx4_priv *priv = mlx4_priv(dev);
958  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
959  struct res_eq *r;
960  int err = 0;
961 
962  spin_lock_irq(mlx4_tlock(dev));
963  r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
964  if (!r)
965  err = -ENOENT;
966  else if (r->com.owner != slave)
967  err = -EPERM;
968  else {
969  switch (state) {
970  case RES_EQ_BUSY:
971  err = -EINVAL;
972  break;
973 
974  case RES_EQ_RESERVED:
975  if (r->com.state != RES_EQ_HW)
976  err = -EINVAL;
977  break;
978 
979  case RES_EQ_HW:
980  if (r->com.state != RES_EQ_RESERVED)
981  err = -EINVAL;
982  break;
983 
984  default:
985  err = -EINVAL;
986  }
987 
988  if (!err) {
989  r->com.from_state = r->com.state;
990  r->com.to_state = state;
991  r->com.state = RES_EQ_BUSY;
992  if (eq)
993  *eq = r;
994  }
995  }
996 
997  spin_unlock_irq(mlx4_tlock(dev));
998 
999  return err;
1000 }
1001 
1002 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1003  enum res_cq_states state, struct res_cq **cq)
1004 {
1005  struct mlx4_priv *priv = mlx4_priv(dev);
1006  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1007  struct res_cq *r;
1008  int err;
1009 
1010  spin_lock_irq(mlx4_tlock(dev));
1011  r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1012  if (!r)
1013  err = -ENOENT;
1014  else if (r->com.owner != slave)
1015  err = -EPERM;
1016  else {
1017  switch (state) {
1018  case RES_CQ_BUSY:
1019  err = -EBUSY;
1020  break;
1021 
1022  case RES_CQ_ALLOCATED:
1023  if (r->com.state != RES_CQ_HW)
1024  err = -EINVAL;
1025  else if (atomic_read(&r->ref_count))
1026  err = -EBUSY;
1027  else
1028  err = 0;
1029  break;
1030 
1031  case RES_CQ_HW:
1032  if (r->com.state != RES_CQ_ALLOCATED)
1033  err = -EINVAL;
1034  else
1035  err = 0;
1036  break;
1037 
1038  default:
1039  err = -EINVAL;
1040  }
1041 
1042  if (!err) {
1043  r->com.from_state = r->com.state;
1044  r->com.to_state = state;
1045  r->com.state = RES_CQ_BUSY;
1046  if (cq)
1047  *cq = r;
1048  }
1049  }
1050 
1051  spin_unlock_irq(mlx4_tlock(dev));
1052 
1053  return err;
1054 }
1055 
1056 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1057  enum res_cq_states state, struct res_srq **srq)
1058 {
1059  struct mlx4_priv *priv = mlx4_priv(dev);
1060  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1061  struct res_srq *r;
1062  int err = 0;
1063 
1064  spin_lock_irq(mlx4_tlock(dev));
1065  r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1066  if (!r)
1067  err = -ENOENT;
1068  else if (r->com.owner != slave)
1069  err = -EPERM;
1070  else {
1071  switch (state) {
1072  case RES_SRQ_BUSY:
1073  err = -EINVAL;
1074  break;
1075 
1076  case RES_SRQ_ALLOCATED:
1077  if (r->com.state != RES_SRQ_HW)
1078  err = -EINVAL;
1079  else if (atomic_read(&r->ref_count))
1080  err = -EBUSY;
1081  break;
1082 
1083  case RES_SRQ_HW:
1084  if (r->com.state != RES_SRQ_ALLOCATED)
1085  err = -EINVAL;
1086  break;
1087 
1088  default:
1089  err = -EINVAL;
1090  }
1091 
1092  if (!err) {
1093  r->com.from_state = r->com.state;
1094  r->com.to_state = state;
1095  r->com.state = RES_SRQ_BUSY;
1096  if (srq)
1097  *srq = r;
1098  }
1099  }
1100 
1101  spin_unlock_irq(mlx4_tlock(dev));
1102 
1103  return err;
1104 }
1105 
1106 static void res_abort_move(struct mlx4_dev *dev, int slave,
1107  enum mlx4_resource type, int id)
1108 {
1109  struct mlx4_priv *priv = mlx4_priv(dev);
1110  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1111  struct res_common *r;
1112 
1113  spin_lock_irq(mlx4_tlock(dev));
1114  r = res_tracker_lookup(&tracker->res_tree[type], id);
1115  if (r && (r->owner == slave))
1116  r->state = r->from_state;
1117  spin_unlock_irq(mlx4_tlock(dev));
1118 }
1119 
1120 static void res_end_move(struct mlx4_dev *dev, int slave,
1121  enum mlx4_resource type, int id)
1122 {
1123  struct mlx4_priv *priv = mlx4_priv(dev);
1124  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1125  struct res_common *r;
1126 
1127  spin_lock_irq(mlx4_tlock(dev));
1128  r = res_tracker_lookup(&tracker->res_tree[type], id);
1129  if (r && (r->owner == slave))
1130  r->state = r->to_state;
1131  spin_unlock_irq(mlx4_tlock(dev));
1132 }
1133 
1134 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1135 {
1136  return mlx4_is_qp_reserved(dev, qpn) &&
1137  (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1138 }
1139 
1140 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1141 {
1142  return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1143 }
1144 
1145 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1146  u64 in_param, u64 *out_param)
1147 {
1148  int err;
1149  int count;
1150  int align;
1151  int base;
1152  int qpn;
1153 
1154  switch (op) {
1155  case RES_OP_RESERVE:
1156  count = get_param_l(&in_param);
1157  align = get_param_h(&in_param);
1158  err = __mlx4_qp_reserve_range(dev, count, align, &base);
1159  if (err)
1160  return err;
1161 
1162  err = add_res_range(dev, slave, base, count, RES_QP, 0);
1163  if (err) {
1164  __mlx4_qp_release_range(dev, base, count);
1165  return err;
1166  }
1167  set_param_l(out_param, base);
1168  break;
1169  case RES_OP_MAP_ICM:
1170  qpn = get_param_l(&in_param) & 0x7fffff;
1171  if (valid_reserved(dev, slave, qpn)) {
1172  err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1173  if (err)
1174  return err;
1175  }
1176 
1177  err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1178  NULL, 1);
1179  if (err)
1180  return err;
1181 
1182  if (!fw_reserved(dev, qpn)) {
1183  err = __mlx4_qp_alloc_icm(dev, qpn);
1184  if (err) {
1185  res_abort_move(dev, slave, RES_QP, qpn);
1186  return err;
1187  }
1188  }
1189 
1190  res_end_move(dev, slave, RES_QP, qpn);
1191  break;
1192 
1193  default:
1194  err = -EINVAL;
1195  break;
1196  }
1197  return err;
1198 }
1199 
1200 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1201  u64 in_param, u64 *out_param)
1202 {
1203  int err = -EINVAL;
1204  int base;
1205  int order;
1206 
1207  if (op != RES_OP_RESERVE_AND_MAP)
1208  return err;
1209 
1210  order = get_param_l(&in_param);
1211  base = __mlx4_alloc_mtt_range(dev, order);
1212  if (base == -1)
1213  return -ENOMEM;
1214 
1215  err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1216  if (err)
1217  __mlx4_free_mtt_range(dev, base, order);
1218  else
1219  set_param_l(out_param, base);
1220 
1221  return err;
1222 }
1223 
1224 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1225  u64 in_param, u64 *out_param)
1226 {
1227  int err = -EINVAL;
1228  int index;
1229  int id;
1230  struct res_mpt *mpt;
1231 
1232  switch (op) {
1233  case RES_OP_RESERVE:
1234  index = __mlx4_mr_reserve(dev);
1235  if (index == -1)
1236  break;
1237  id = index & mpt_mask(dev);
1238 
1239  err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1240  if (err) {
1241  __mlx4_mr_release(dev, index);
1242  break;
1243  }
1244  set_param_l(out_param, index);
1245  break;
1246  case RES_OP_MAP_ICM:
1247  index = get_param_l(&in_param);
1248  id = index & mpt_mask(dev);
1249  err = mr_res_start_move_to(dev, slave, id,
1250  RES_MPT_MAPPED, &mpt);
1251  if (err)
1252  return err;
1253 
1254  err = __mlx4_mr_alloc_icm(dev, mpt->key);
1255  if (err) {
1256  res_abort_move(dev, slave, RES_MPT, id);
1257  return err;
1258  }
1259 
1260  res_end_move(dev, slave, RES_MPT, id);
1261  break;
1262  }
1263  return err;
1264 }
1265 
1266 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1267  u64 in_param, u64 *out_param)
1268 {
1269  int cqn;
1270  int err;
1271 
1272  switch (op) {
1274  err = __mlx4_cq_alloc_icm(dev, &cqn);
1275  if (err)
1276  break;
1277 
1278  err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1279  if (err) {
1280  __mlx4_cq_free_icm(dev, cqn);
1281  break;
1282  }
1283 
1284  set_param_l(out_param, cqn);
1285  break;
1286 
1287  default:
1288  err = -EINVAL;
1289  }
1290 
1291  return err;
1292 }
1293 
1294 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1295  u64 in_param, u64 *out_param)
1296 {
1297  int srqn;
1298  int err;
1299 
1300  switch (op) {
1302  err = __mlx4_srq_alloc_icm(dev, &srqn);
1303  if (err)
1304  break;
1305 
1306  err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1307  if (err) {
1308  __mlx4_srq_free_icm(dev, srqn);
1309  break;
1310  }
1311 
1312  set_param_l(out_param, srqn);
1313  break;
1314 
1315  default:
1316  err = -EINVAL;
1317  }
1318 
1319  return err;
1320 }
1321 
1322 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1323 {
1324  struct mlx4_priv *priv = mlx4_priv(dev);
1325  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1326  struct mac_res *res;
1327 
1328  res = kzalloc(sizeof *res, GFP_KERNEL);
1329  if (!res)
1330  return -ENOMEM;
1331  res->mac = mac;
1332  res->port = (u8) port;
1333  list_add_tail(&res->list,
1334  &tracker->slave_list[slave].res_list[RES_MAC]);
1335  return 0;
1336 }
1337 
1338 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1339  int port)
1340 {
1341  struct mlx4_priv *priv = mlx4_priv(dev);
1342  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1343  struct list_head *mac_list =
1344  &tracker->slave_list[slave].res_list[RES_MAC];
1345  struct mac_res *res, *tmp;
1346 
1347  list_for_each_entry_safe(res, tmp, mac_list, list) {
1348  if (res->mac == mac && res->port == (u8) port) {
1349  list_del(&res->list);
1350  kfree(res);
1351  break;
1352  }
1353  }
1354 }
1355 
1356 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1357 {
1358  struct mlx4_priv *priv = mlx4_priv(dev);
1359  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1360  struct list_head *mac_list =
1361  &tracker->slave_list[slave].res_list[RES_MAC];
1362  struct mac_res *res, *tmp;
1363 
1364  list_for_each_entry_safe(res, tmp, mac_list, list) {
1365  list_del(&res->list);
1366  __mlx4_unregister_mac(dev, res->port, res->mac);
1367  kfree(res);
1368  }
1369 }
1370 
1371 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1372  u64 in_param, u64 *out_param)
1373 {
1374  int err = -EINVAL;
1375  int port;
1376  u64 mac;
1377 
1378  if (op != RES_OP_RESERVE_AND_MAP)
1379  return err;
1380 
1381  port = get_param_l(out_param);
1382  mac = in_param;
1383 
1384  err = __mlx4_register_mac(dev, port, mac);
1385  if (err >= 0) {
1386  set_param_l(out_param, err);
1387  err = 0;
1388  }
1389 
1390  if (!err) {
1391  err = mac_add_to_slave(dev, slave, mac, port);
1392  if (err)
1393  __mlx4_unregister_mac(dev, port, mac);
1394  }
1395  return err;
1396 }
1397 
1398 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1399  u64 in_param, u64 *out_param)
1400 {
1401  return 0;
1402 }
1403 
1404 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1405  u64 in_param, u64 *out_param)
1406 {
1407  u32 index;
1408  int err;
1409 
1410  if (op != RES_OP_RESERVE)
1411  return -EINVAL;
1412 
1413  err = __mlx4_counter_alloc(dev, &index);
1414  if (err)
1415  return err;
1416 
1417  err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1418  if (err)
1419  __mlx4_counter_free(dev, index);
1420  else
1421  set_param_l(out_param, index);
1422 
1423  return err;
1424 }
1425 
1426 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1427  u64 in_param, u64 *out_param)
1428 {
1429  u32 xrcdn;
1430  int err;
1431 
1432  if (op != RES_OP_RESERVE)
1433  return -EINVAL;
1434 
1435  err = __mlx4_xrcd_alloc(dev, &xrcdn);
1436  if (err)
1437  return err;
1438 
1439  err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1440  if (err)
1441  __mlx4_xrcd_free(dev, xrcdn);
1442  else
1443  set_param_l(out_param, xrcdn);
1444 
1445  return err;
1446 }
1447 
1448 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1449  struct mlx4_vhcr *vhcr,
1450  struct mlx4_cmd_mailbox *inbox,
1451  struct mlx4_cmd_mailbox *outbox,
1452  struct mlx4_cmd_info *cmd)
1453 {
1454  int err;
1455  int alop = vhcr->op_modifier;
1456 
1457  switch (vhcr->in_modifier) {
1458  case RES_QP:
1459  err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1460  vhcr->in_param, &vhcr->out_param);
1461  break;
1462 
1463  case RES_MTT:
1464  err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1465  vhcr->in_param, &vhcr->out_param);
1466  break;
1467 
1468  case RES_MPT:
1469  err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1470  vhcr->in_param, &vhcr->out_param);
1471  break;
1472 
1473  case RES_CQ:
1474  err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1475  vhcr->in_param, &vhcr->out_param);
1476  break;
1477 
1478  case RES_SRQ:
1479  err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1480  vhcr->in_param, &vhcr->out_param);
1481  break;
1482 
1483  case RES_MAC:
1484  err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1485  vhcr->in_param, &vhcr->out_param);
1486  break;
1487 
1488  case RES_VLAN:
1489  err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1490  vhcr->in_param, &vhcr->out_param);
1491  break;
1492 
1493  case RES_COUNTER:
1494  err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1495  vhcr->in_param, &vhcr->out_param);
1496  break;
1497 
1498  case RES_XRCD:
1499  err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1500  vhcr->in_param, &vhcr->out_param);
1501  break;
1502 
1503  default:
1504  err = -EINVAL;
1505  break;
1506  }
1507 
1508  return err;
1509 }
1510 
1511 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1512  u64 in_param)
1513 {
1514  int err;
1515  int count;
1516  int base;
1517  int qpn;
1518 
1519  switch (op) {
1520  case RES_OP_RESERVE:
1521  base = get_param_l(&in_param) & 0x7fffff;
1522  count = get_param_h(&in_param);
1523  err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1524  if (err)
1525  break;
1526  __mlx4_qp_release_range(dev, base, count);
1527  break;
1528  case RES_OP_MAP_ICM:
1529  qpn = get_param_l(&in_param) & 0x7fffff;
1530  err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1531  NULL, 0);
1532  if (err)
1533  return err;
1534 
1535  if (!fw_reserved(dev, qpn))
1536  __mlx4_qp_free_icm(dev, qpn);
1537 
1538  res_end_move(dev, slave, RES_QP, qpn);
1539 
1540  if (valid_reserved(dev, slave, qpn))
1541  err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1542  break;
1543  default:
1544  err = -EINVAL;
1545  break;
1546  }
1547  return err;
1548 }
1549 
1550 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1551  u64 in_param, u64 *out_param)
1552 {
1553  int err = -EINVAL;
1554  int base;
1555  int order;
1556 
1557  if (op != RES_OP_RESERVE_AND_MAP)
1558  return err;
1559 
1560  base = get_param_l(&in_param);
1561  order = get_param_h(&in_param);
1562  err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1563  if (!err)
1564  __mlx4_free_mtt_range(dev, base, order);
1565  return err;
1566 }
1567 
1568 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1569  u64 in_param)
1570 {
1571  int err = -EINVAL;
1572  int index;
1573  int id;
1574  struct res_mpt *mpt;
1575 
1576  switch (op) {
1577  case RES_OP_RESERVE:
1578  index = get_param_l(&in_param);
1579  id = index & mpt_mask(dev);
1580  err = get_res(dev, slave, id, RES_MPT, &mpt);
1581  if (err)
1582  break;
1583  index = mpt->key;
1584  put_res(dev, slave, id, RES_MPT);
1585 
1586  err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1587  if (err)
1588  break;
1589  __mlx4_mr_release(dev, index);
1590  break;
1591  case RES_OP_MAP_ICM:
1592  index = get_param_l(&in_param);
1593  id = index & mpt_mask(dev);
1594  err = mr_res_start_move_to(dev, slave, id,
1595  RES_MPT_RESERVED, &mpt);
1596  if (err)
1597  return err;
1598 
1599  __mlx4_mr_free_icm(dev, mpt->key);
1600  res_end_move(dev, slave, RES_MPT, id);
1601  return err;
1602  break;
1603  default:
1604  err = -EINVAL;
1605  break;
1606  }
1607  return err;
1608 }
1609 
1610 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1611  u64 in_param, u64 *out_param)
1612 {
1613  int cqn;
1614  int err;
1615 
1616  switch (op) {
1618  cqn = get_param_l(&in_param);
1619  err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1620  if (err)
1621  break;
1622 
1623  __mlx4_cq_free_icm(dev, cqn);
1624  break;
1625 
1626  default:
1627  err = -EINVAL;
1628  break;
1629  }
1630 
1631  return err;
1632 }
1633 
1634 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1635  u64 in_param, u64 *out_param)
1636 {
1637  int srqn;
1638  int err;
1639 
1640  switch (op) {
1642  srqn = get_param_l(&in_param);
1643  err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1644  if (err)
1645  break;
1646 
1647  __mlx4_srq_free_icm(dev, srqn);
1648  break;
1649 
1650  default:
1651  err = -EINVAL;
1652  break;
1653  }
1654 
1655  return err;
1656 }
1657 
1658 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1659  u64 in_param, u64 *out_param)
1660 {
1661  int port;
1662  int err = 0;
1663 
1664  switch (op) {
1666  port = get_param_l(out_param);
1667  mac_del_from_slave(dev, slave, in_param, port);
1668  __mlx4_unregister_mac(dev, port, in_param);
1669  break;
1670  default:
1671  err = -EINVAL;
1672  break;
1673  }
1674 
1675  return err;
1676 
1677 }
1678 
1679 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1680  u64 in_param, u64 *out_param)
1681 {
1682  return 0;
1683 }
1684 
1685 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1686  u64 in_param, u64 *out_param)
1687 {
1688  int index;
1689  int err;
1690 
1691  if (op != RES_OP_RESERVE)
1692  return -EINVAL;
1693 
1694  index = get_param_l(&in_param);
1695  err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1696  if (err)
1697  return err;
1698 
1699  __mlx4_counter_free(dev, index);
1700 
1701  return err;
1702 }
1703 
1704 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1705  u64 in_param, u64 *out_param)
1706 {
1707  int xrcdn;
1708  int err;
1709 
1710  if (op != RES_OP_RESERVE)
1711  return -EINVAL;
1712 
1713  xrcdn = get_param_l(&in_param);
1714  err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1715  if (err)
1716  return err;
1717 
1718  __mlx4_xrcd_free(dev, xrcdn);
1719 
1720  return err;
1721 }
1722 
1723 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1724  struct mlx4_vhcr *vhcr,
1725  struct mlx4_cmd_mailbox *inbox,
1726  struct mlx4_cmd_mailbox *outbox,
1727  struct mlx4_cmd_info *cmd)
1728 {
1729  int err = -EINVAL;
1730  int alop = vhcr->op_modifier;
1731 
1732  switch (vhcr->in_modifier) {
1733  case RES_QP:
1734  err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1735  vhcr->in_param);
1736  break;
1737 
1738  case RES_MTT:
1739  err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1740  vhcr->in_param, &vhcr->out_param);
1741  break;
1742 
1743  case RES_MPT:
1744  err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1745  vhcr->in_param);
1746  break;
1747 
1748  case RES_CQ:
1749  err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1750  vhcr->in_param, &vhcr->out_param);
1751  break;
1752 
1753  case RES_SRQ:
1754  err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1755  vhcr->in_param, &vhcr->out_param);
1756  break;
1757 
1758  case RES_MAC:
1759  err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1760  vhcr->in_param, &vhcr->out_param);
1761  break;
1762 
1763  case RES_VLAN:
1764  err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1765  vhcr->in_param, &vhcr->out_param);
1766  break;
1767 
1768  case RES_COUNTER:
1769  err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1770  vhcr->in_param, &vhcr->out_param);
1771  break;
1772 
1773  case RES_XRCD:
1774  err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1775  vhcr->in_param, &vhcr->out_param);
1776 
1777  default:
1778  break;
1779  }
1780  return err;
1781 }
1782 
1783 /* ugly but other choices are uglier */
1784 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1785 {
1786  return (be32_to_cpu(mpt->flags) >> 9) & 1;
1787 }
1788 
1789 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1790 {
1791  return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1792 }
1793 
1794 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1795 {
1796  return be32_to_cpu(mpt->mtt_sz);
1797 }
1798 
1799 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1800 {
1801  return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1802 }
1803 
1804 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1805 {
1806  return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1807 }
1808 
1809 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1810 {
1811  int page_shift = (qpc->log_page_size & 0x3f) + 12;
1812  int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1813  int log_sq_sride = qpc->sq_size_stride & 7;
1814  int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1815  int log_rq_stride = qpc->rq_size_stride & 7;
1816  int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1817  int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1818  int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1819  int sq_size;
1820  int rq_size;
1821  int total_pages;
1822  int total_mem;
1823  int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1824 
1825  sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1826  rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1827  total_mem = sq_size + rq_size;
1828  total_pages =
1829  roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1830  page_shift);
1831 
1832  return total_pages;
1833 }
1834 
1835 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1836  int size, struct res_mtt *mtt)
1837 {
1838  int res_start = mtt->com.res_id;
1839  int res_size = (1 << mtt->order);
1840 
1841  if (start < res_start || start + size > res_start + res_size)
1842  return -EPERM;
1843  return 0;
1844 }
1845 
1846 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1847  struct mlx4_vhcr *vhcr,
1848  struct mlx4_cmd_mailbox *inbox,
1849  struct mlx4_cmd_mailbox *outbox,
1850  struct mlx4_cmd_info *cmd)
1851 {
1852  int err;
1853  int index = vhcr->in_modifier;
1854  struct res_mtt *mtt;
1855  struct res_mpt *mpt;
1856  int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1857  int phys;
1858  int id;
1859 
1860  id = index & mpt_mask(dev);
1861  err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1862  if (err)
1863  return err;
1864 
1865  phys = mr_phys_mpt(inbox->buf);
1866  if (!phys) {
1867  err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1868  if (err)
1869  goto ex_abort;
1870 
1871  err = check_mtt_range(dev, slave, mtt_base,
1872  mr_get_mtt_size(inbox->buf), mtt);
1873  if (err)
1874  goto ex_put;
1875 
1876  mpt->mtt = mtt;
1877  }
1878 
1879  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1880  if (err)
1881  goto ex_put;
1882 
1883  if (!phys) {
1884  atomic_inc(&mtt->ref_count);
1885  put_res(dev, slave, mtt->com.res_id, RES_MTT);
1886  }
1887 
1888  res_end_move(dev, slave, RES_MPT, id);
1889  return 0;
1890 
1891 ex_put:
1892  if (!phys)
1893  put_res(dev, slave, mtt->com.res_id, RES_MTT);
1894 ex_abort:
1895  res_abort_move(dev, slave, RES_MPT, id);
1896 
1897  return err;
1898 }
1899 
1900 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1901  struct mlx4_vhcr *vhcr,
1902  struct mlx4_cmd_mailbox *inbox,
1903  struct mlx4_cmd_mailbox *outbox,
1904  struct mlx4_cmd_info *cmd)
1905 {
1906  int err;
1907  int index = vhcr->in_modifier;
1908  struct res_mpt *mpt;
1909  int id;
1910 
1911  id = index & mpt_mask(dev);
1912  err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1913  if (err)
1914  return err;
1915 
1916  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1917  if (err)
1918  goto ex_abort;
1919 
1920  if (mpt->mtt)
1921  atomic_dec(&mpt->mtt->ref_count);
1922 
1923  res_end_move(dev, slave, RES_MPT, id);
1924  return 0;
1925 
1926 ex_abort:
1927  res_abort_move(dev, slave, RES_MPT, id);
1928 
1929  return err;
1930 }
1931 
1932 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1933  struct mlx4_vhcr *vhcr,
1934  struct mlx4_cmd_mailbox *inbox,
1935  struct mlx4_cmd_mailbox *outbox,
1936  struct mlx4_cmd_info *cmd)
1937 {
1938  int err;
1939  int index = vhcr->in_modifier;
1940  struct res_mpt *mpt;
1941  int id;
1942 
1943  id = index & mpt_mask(dev);
1944  err = get_res(dev, slave, id, RES_MPT, &mpt);
1945  if (err)
1946  return err;
1947 
1948  if (mpt->com.from_state != RES_MPT_HW) {
1949  err = -EBUSY;
1950  goto out;
1951  }
1952 
1953  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1954 
1955 out:
1956  put_res(dev, slave, id, RES_MPT);
1957  return err;
1958 }
1959 
1960 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1961 {
1962  return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1963 }
1964 
1965 static int qp_get_scqn(struct mlx4_qp_context *qpc)
1966 {
1967  return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1968 }
1969 
1970 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1971 {
1972  return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1973 }
1974 
1975 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
1976  struct mlx4_qp_context *context)
1977 {
1978  u32 qpn = vhcr->in_modifier & 0xffffff;
1979  u32 qkey = 0;
1980 
1981  if (mlx4_get_parav_qkey(dev, qpn, &qkey))
1982  return;
1983 
1984  /* adjust qkey in qp context */
1985  context->qkey = cpu_to_be32(qkey);
1986 }
1987 
1988 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1989  struct mlx4_vhcr *vhcr,
1990  struct mlx4_cmd_mailbox *inbox,
1991  struct mlx4_cmd_mailbox *outbox,
1992  struct mlx4_cmd_info *cmd)
1993 {
1994  int err;
1995  int qpn = vhcr->in_modifier & 0x7fffff;
1996  struct res_mtt *mtt;
1997  struct res_qp *qp;
1998  struct mlx4_qp_context *qpc = inbox->buf + 8;
1999  int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2000  int mtt_size = qp_get_mtt_size(qpc);
2001  struct res_cq *rcq;
2002  struct res_cq *scq;
2003  int rcqn = qp_get_rcqn(qpc);
2004  int scqn = qp_get_scqn(qpc);
2005  u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2006  int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2007  struct res_srq *srq;
2008  int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2009 
2010  err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2011  if (err)
2012  return err;
2013  qp->local_qpn = local_qpn;
2014 
2015  err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2016  if (err)
2017  goto ex_abort;
2018 
2019  err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2020  if (err)
2021  goto ex_put_mtt;
2022 
2023  err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2024  if (err)
2025  goto ex_put_mtt;
2026 
2027  if (scqn != rcqn) {
2028  err = get_res(dev, slave, scqn, RES_CQ, &scq);
2029  if (err)
2030  goto ex_put_rcq;
2031  } else
2032  scq = rcq;
2033 
2034  if (use_srq) {
2035  err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2036  if (err)
2037  goto ex_put_scq;
2038  }
2039 
2040  adjust_proxy_tun_qkey(dev, vhcr, qpc);
2041  update_pkey_index(dev, slave, inbox);
2042  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2043  if (err)
2044  goto ex_put_srq;
2045  atomic_inc(&mtt->ref_count);
2046  qp->mtt = mtt;
2047  atomic_inc(&rcq->ref_count);
2048  qp->rcq = rcq;
2049  atomic_inc(&scq->ref_count);
2050  qp->scq = scq;
2051 
2052  if (scqn != rcqn)
2053  put_res(dev, slave, scqn, RES_CQ);
2054 
2055  if (use_srq) {
2056  atomic_inc(&srq->ref_count);
2057  put_res(dev, slave, srqn, RES_SRQ);
2058  qp->srq = srq;
2059  }
2060  put_res(dev, slave, rcqn, RES_CQ);
2061  put_res(dev, slave, mtt_base, RES_MTT);
2062  res_end_move(dev, slave, RES_QP, qpn);
2063 
2064  return 0;
2065 
2066 ex_put_srq:
2067  if (use_srq)
2068  put_res(dev, slave, srqn, RES_SRQ);
2069 ex_put_scq:
2070  if (scqn != rcqn)
2071  put_res(dev, slave, scqn, RES_CQ);
2072 ex_put_rcq:
2073  put_res(dev, slave, rcqn, RES_CQ);
2074 ex_put_mtt:
2075  put_res(dev, slave, mtt_base, RES_MTT);
2076 ex_abort:
2077  res_abort_move(dev, slave, RES_QP, qpn);
2078 
2079  return err;
2080 }
2081 
2082 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2083 {
2084  return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2085 }
2086 
2087 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2088 {
2089  int log_eq_size = eqc->log_eq_size & 0x1f;
2090  int page_shift = (eqc->log_page_size & 0x3f) + 12;
2091 
2092  if (log_eq_size + 5 < page_shift)
2093  return 1;
2094 
2095  return 1 << (log_eq_size + 5 - page_shift);
2096 }
2097 
2098 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2099 {
2100  return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2101 }
2102 
2103 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2104 {
2105  int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2106  int page_shift = (cqc->log_page_size & 0x3f) + 12;
2107 
2108  if (log_cq_size + 5 < page_shift)
2109  return 1;
2110 
2111  return 1 << (log_cq_size + 5 - page_shift);
2112 }
2113 
2114 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2115  struct mlx4_vhcr *vhcr,
2116  struct mlx4_cmd_mailbox *inbox,
2117  struct mlx4_cmd_mailbox *outbox,
2118  struct mlx4_cmd_info *cmd)
2119 {
2120  int err;
2121  int eqn = vhcr->in_modifier;
2122  int res_id = (slave << 8) | eqn;
2123  struct mlx4_eq_context *eqc = inbox->buf;
2124  int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2125  int mtt_size = eq_get_mtt_size(eqc);
2126  struct res_eq *eq;
2127  struct res_mtt *mtt;
2128 
2129  err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2130  if (err)
2131  return err;
2132  err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2133  if (err)
2134  goto out_add;
2135 
2136  err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2137  if (err)
2138  goto out_move;
2139 
2140  err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2141  if (err)
2142  goto out_put;
2143 
2144  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2145  if (err)
2146  goto out_put;
2147 
2148  atomic_inc(&mtt->ref_count);
2149  eq->mtt = mtt;
2150  put_res(dev, slave, mtt->com.res_id, RES_MTT);
2151  res_end_move(dev, slave, RES_EQ, res_id);
2152  return 0;
2153 
2154 out_put:
2155  put_res(dev, slave, mtt->com.res_id, RES_MTT);
2156 out_move:
2157  res_abort_move(dev, slave, RES_EQ, res_id);
2158 out_add:
2159  rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2160  return err;
2161 }
2162 
2163 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2164  int len, struct res_mtt **res)
2165 {
2166  struct mlx4_priv *priv = mlx4_priv(dev);
2167  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2168  struct res_mtt *mtt;
2169  int err = -EINVAL;
2170 
2171  spin_lock_irq(mlx4_tlock(dev));
2172  list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2173  com.list) {
2174  if (!check_mtt_range(dev, slave, start, len, mtt)) {
2175  *res = mtt;
2176  mtt->com.from_state = mtt->com.state;
2177  mtt->com.state = RES_MTT_BUSY;
2178  err = 0;
2179  break;
2180  }
2181  }
2182  spin_unlock_irq(mlx4_tlock(dev));
2183 
2184  return err;
2185 }
2186 
2187 static int verify_qp_parameters(struct mlx4_dev *dev,
2188  struct mlx4_cmd_mailbox *inbox,
2189  enum qp_transition transition, u8 slave)
2190 {
2191  u32 qp_type;
2192  struct mlx4_qp_context *qp_ctx;
2193  enum mlx4_qp_optpar optpar;
2194 
2195  qp_ctx = inbox->buf + 8;
2196  qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2197  optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2198 
2199  switch (qp_type) {
2200  case MLX4_QP_ST_RC:
2201  case MLX4_QP_ST_UC:
2202  switch (transition) {
2203  case QP_TRANS_INIT2RTR:
2204  case QP_TRANS_RTR2RTS:
2205  case QP_TRANS_RTS2RTS:
2206  case QP_TRANS_SQD2SQD:
2207  case QP_TRANS_SQD2RTS:
2208  if (slave != mlx4_master_func_num(dev))
2209  /* slaves have only gid index 0 */
2210  if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2211  if (qp_ctx->pri_path.mgid_index)
2212  return -EINVAL;
2213  if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2214  if (qp_ctx->alt_path.mgid_index)
2215  return -EINVAL;
2216  break;
2217  default:
2218  break;
2219  }
2220 
2221  break;
2222  default:
2223  break;
2224  }
2225 
2226  return 0;
2227 }
2228 
2229 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2230  struct mlx4_vhcr *vhcr,
2231  struct mlx4_cmd_mailbox *inbox,
2232  struct mlx4_cmd_mailbox *outbox,
2233  struct mlx4_cmd_info *cmd)
2234 {
2235  struct mlx4_mtt mtt;
2236  __be64 *page_list = inbox->buf;
2237  u64 *pg_list = (u64 *)page_list;
2238  int i;
2239  struct res_mtt *rmtt = NULL;
2240  int start = be64_to_cpu(page_list[0]);
2241  int npages = vhcr->in_modifier;
2242  int err;
2243 
2244  err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2245  if (err)
2246  return err;
2247 
2248  /* Call the SW implementation of write_mtt:
2249  * - Prepare a dummy mtt struct
2250  * - Translate inbox contents to simple addresses in host endianess */
2251  mtt.offset = 0; /* TBD this is broken but I don't handle it since
2252  we don't really use it */
2253  mtt.order = 0;
2254  mtt.page_shift = 0;
2255  for (i = 0; i < npages; ++i)
2256  pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2257 
2258  err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2259  ((u64 *)page_list + 2));
2260 
2261  if (rmtt)
2262  put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2263 
2264  return err;
2265 }
2266 
2267 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2268  struct mlx4_vhcr *vhcr,
2269  struct mlx4_cmd_mailbox *inbox,
2270  struct mlx4_cmd_mailbox *outbox,
2271  struct mlx4_cmd_info *cmd)
2272 {
2273  int eqn = vhcr->in_modifier;
2274  int res_id = eqn | (slave << 8);
2275  struct res_eq *eq;
2276  int err;
2277 
2278  err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2279  if (err)
2280  return err;
2281 
2282  err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2283  if (err)
2284  goto ex_abort;
2285 
2286  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2287  if (err)
2288  goto ex_put;
2289 
2290  atomic_dec(&eq->mtt->ref_count);
2291  put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2292  res_end_move(dev, slave, RES_EQ, res_id);
2293  rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2294 
2295  return 0;
2296 
2297 ex_put:
2298  put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2299 ex_abort:
2300  res_abort_move(dev, slave, RES_EQ, res_id);
2301 
2302  return err;
2303 }
2304 
2305 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2306 {
2307  struct mlx4_priv *priv = mlx4_priv(dev);
2308  struct mlx4_slave_event_eq_info *event_eq;
2309  struct mlx4_cmd_mailbox *mailbox;
2310  u32 in_modifier = 0;
2311  int err;
2312  int res_id;
2313  struct res_eq *req;
2314 
2315  if (!priv->mfunc.master.slave_state)
2316  return -EINVAL;
2317 
2318  event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2319 
2320  /* Create the event only if the slave is registered */
2321  if (event_eq->eqn < 0)
2322  return 0;
2323 
2324  mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2325  res_id = (slave << 8) | event_eq->eqn;
2326  err = get_res(dev, slave, res_id, RES_EQ, &req);
2327  if (err)
2328  goto unlock;
2329 
2330  if (req->com.from_state != RES_EQ_HW) {
2331  err = -EINVAL;
2332  goto put;
2333  }
2334 
2335  mailbox = mlx4_alloc_cmd_mailbox(dev);
2336  if (IS_ERR(mailbox)) {
2337  err = PTR_ERR(mailbox);
2338  goto put;
2339  }
2340 
2341  if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2342  ++event_eq->token;
2343  eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2344  }
2345 
2346  memcpy(mailbox->buf, (u8 *) eqe, 28);
2347 
2348  in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2349 
2350  err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2352  MLX4_CMD_NATIVE);
2353 
2354  put_res(dev, slave, res_id, RES_EQ);
2355  mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2356  mlx4_free_cmd_mailbox(dev, mailbox);
2357  return err;
2358 
2359 put:
2360  put_res(dev, slave, res_id, RES_EQ);
2361 
2362 unlock:
2363  mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2364  return err;
2365 }
2366 
2367 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2368  struct mlx4_vhcr *vhcr,
2369  struct mlx4_cmd_mailbox *inbox,
2370  struct mlx4_cmd_mailbox *outbox,
2371  struct mlx4_cmd_info *cmd)
2372 {
2373  int eqn = vhcr->in_modifier;
2374  int res_id = eqn | (slave << 8);
2375  struct res_eq *eq;
2376  int err;
2377 
2378  err = get_res(dev, slave, res_id, RES_EQ, &eq);
2379  if (err)
2380  return err;
2381 
2382  if (eq->com.from_state != RES_EQ_HW) {
2383  err = -EINVAL;
2384  goto ex_put;
2385  }
2386 
2387  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2388 
2389 ex_put:
2390  put_res(dev, slave, res_id, RES_EQ);
2391  return err;
2392 }
2393 
2394 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2395  struct mlx4_vhcr *vhcr,
2396  struct mlx4_cmd_mailbox *inbox,
2397  struct mlx4_cmd_mailbox *outbox,
2398  struct mlx4_cmd_info *cmd)
2399 {
2400  int err;
2401  int cqn = vhcr->in_modifier;
2402  struct mlx4_cq_context *cqc = inbox->buf;
2403  int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2404  struct res_cq *cq;
2405  struct res_mtt *mtt;
2406 
2407  err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2408  if (err)
2409  return err;
2410  err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2411  if (err)
2412  goto out_move;
2413  err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2414  if (err)
2415  goto out_put;
2416  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2417  if (err)
2418  goto out_put;
2419  atomic_inc(&mtt->ref_count);
2420  cq->mtt = mtt;
2421  put_res(dev, slave, mtt->com.res_id, RES_MTT);
2422  res_end_move(dev, slave, RES_CQ, cqn);
2423  return 0;
2424 
2425 out_put:
2426  put_res(dev, slave, mtt->com.res_id, RES_MTT);
2427 out_move:
2428  res_abort_move(dev, slave, RES_CQ, cqn);
2429  return err;
2430 }
2431 
2432 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2433  struct mlx4_vhcr *vhcr,
2434  struct mlx4_cmd_mailbox *inbox,
2435  struct mlx4_cmd_mailbox *outbox,
2436  struct mlx4_cmd_info *cmd)
2437 {
2438  int err;
2439  int cqn = vhcr->in_modifier;
2440  struct res_cq *cq;
2441 
2442  err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2443  if (err)
2444  return err;
2445  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2446  if (err)
2447  goto out_move;
2448  atomic_dec(&cq->mtt->ref_count);
2449  res_end_move(dev, slave, RES_CQ, cqn);
2450  return 0;
2451 
2452 out_move:
2453  res_abort_move(dev, slave, RES_CQ, cqn);
2454  return err;
2455 }
2456 
2457 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2458  struct mlx4_vhcr *vhcr,
2459  struct mlx4_cmd_mailbox *inbox,
2460  struct mlx4_cmd_mailbox *outbox,
2461  struct mlx4_cmd_info *cmd)
2462 {
2463  int cqn = vhcr->in_modifier;
2464  struct res_cq *cq;
2465  int err;
2466 
2467  err = get_res(dev, slave, cqn, RES_CQ, &cq);
2468  if (err)
2469  return err;
2470 
2471  if (cq->com.from_state != RES_CQ_HW)
2472  goto ex_put;
2473 
2474  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2475 ex_put:
2476  put_res(dev, slave, cqn, RES_CQ);
2477 
2478  return err;
2479 }
2480 
2481 static int handle_resize(struct mlx4_dev *dev, int slave,
2482  struct mlx4_vhcr *vhcr,
2483  struct mlx4_cmd_mailbox *inbox,
2484  struct mlx4_cmd_mailbox *outbox,
2485  struct mlx4_cmd_info *cmd,
2486  struct res_cq *cq)
2487 {
2488  int err;
2489  struct res_mtt *orig_mtt;
2490  struct res_mtt *mtt;
2491  struct mlx4_cq_context *cqc = inbox->buf;
2492  int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2493 
2494  err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2495  if (err)
2496  return err;
2497 
2498  if (orig_mtt != cq->mtt) {
2499  err = -EINVAL;
2500  goto ex_put;
2501  }
2502 
2503  err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2504  if (err)
2505  goto ex_put;
2506 
2507  err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2508  if (err)
2509  goto ex_put1;
2510  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2511  if (err)
2512  goto ex_put1;
2513  atomic_dec(&orig_mtt->ref_count);
2514  put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2515  atomic_inc(&mtt->ref_count);
2516  cq->mtt = mtt;
2517  put_res(dev, slave, mtt->com.res_id, RES_MTT);
2518  return 0;
2519 
2520 ex_put1:
2521  put_res(dev, slave, mtt->com.res_id, RES_MTT);
2522 ex_put:
2523  put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2524 
2525  return err;
2526 
2527 }
2528 
2529 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2530  struct mlx4_vhcr *vhcr,
2531  struct mlx4_cmd_mailbox *inbox,
2532  struct mlx4_cmd_mailbox *outbox,
2533  struct mlx4_cmd_info *cmd)
2534 {
2535  int cqn = vhcr->in_modifier;
2536  struct res_cq *cq;
2537  int err;
2538 
2539  err = get_res(dev, slave, cqn, RES_CQ, &cq);
2540  if (err)
2541  return err;
2542 
2543  if (cq->com.from_state != RES_CQ_HW)
2544  goto ex_put;
2545 
2546  if (vhcr->op_modifier == 0) {
2547  err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2548  goto ex_put;
2549  }
2550 
2551  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2552 ex_put:
2553  put_res(dev, slave, cqn, RES_CQ);
2554 
2555  return err;
2556 }
2557 
2558 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2559 {
2560  int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2561  int log_rq_stride = srqc->logstride & 7;
2562  int page_shift = (srqc->log_page_size & 0x3f) + 12;
2563 
2564  if (log_srq_size + log_rq_stride + 4 < page_shift)
2565  return 1;
2566 
2567  return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2568 }
2569 
2570 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2571  struct mlx4_vhcr *vhcr,
2572  struct mlx4_cmd_mailbox *inbox,
2573  struct mlx4_cmd_mailbox *outbox,
2574  struct mlx4_cmd_info *cmd)
2575 {
2576  int err;
2577  int srqn = vhcr->in_modifier;
2578  struct res_mtt *mtt;
2579  struct res_srq *srq;
2580  struct mlx4_srq_context *srqc = inbox->buf;
2581  int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2582 
2583  if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2584  return -EINVAL;
2585 
2586  err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2587  if (err)
2588  return err;
2589  err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2590  if (err)
2591  goto ex_abort;
2592  err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2593  mtt);
2594  if (err)
2595  goto ex_put_mtt;
2596 
2597  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2598  if (err)
2599  goto ex_put_mtt;
2600 
2601  atomic_inc(&mtt->ref_count);
2602  srq->mtt = mtt;
2603  put_res(dev, slave, mtt->com.res_id, RES_MTT);
2604  res_end_move(dev, slave, RES_SRQ, srqn);
2605  return 0;
2606 
2607 ex_put_mtt:
2608  put_res(dev, slave, mtt->com.res_id, RES_MTT);
2609 ex_abort:
2610  res_abort_move(dev, slave, RES_SRQ, srqn);
2611 
2612  return err;
2613 }
2614 
2615 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2616  struct mlx4_vhcr *vhcr,
2617  struct mlx4_cmd_mailbox *inbox,
2618  struct mlx4_cmd_mailbox *outbox,
2619  struct mlx4_cmd_info *cmd)
2620 {
2621  int err;
2622  int srqn = vhcr->in_modifier;
2623  struct res_srq *srq;
2624 
2625  err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2626  if (err)
2627  return err;
2628  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2629  if (err)
2630  goto ex_abort;
2631  atomic_dec(&srq->mtt->ref_count);
2632  if (srq->cq)
2633  atomic_dec(&srq->cq->ref_count);
2634  res_end_move(dev, slave, RES_SRQ, srqn);
2635 
2636  return 0;
2637 
2638 ex_abort:
2639  res_abort_move(dev, slave, RES_SRQ, srqn);
2640 
2641  return err;
2642 }
2643 
2644 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2645  struct mlx4_vhcr *vhcr,
2646  struct mlx4_cmd_mailbox *inbox,
2647  struct mlx4_cmd_mailbox *outbox,
2648  struct mlx4_cmd_info *cmd)
2649 {
2650  int err;
2651  int srqn = vhcr->in_modifier;
2652  struct res_srq *srq;
2653 
2654  err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2655  if (err)
2656  return err;
2657  if (srq->com.from_state != RES_SRQ_HW) {
2658  err = -EBUSY;
2659  goto out;
2660  }
2661  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2662 out:
2663  put_res(dev, slave, srqn, RES_SRQ);
2664  return err;
2665 }
2666 
2667 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2668  struct mlx4_vhcr *vhcr,
2669  struct mlx4_cmd_mailbox *inbox,
2670  struct mlx4_cmd_mailbox *outbox,
2671  struct mlx4_cmd_info *cmd)
2672 {
2673  int err;
2674  int srqn = vhcr->in_modifier;
2675  struct res_srq *srq;
2676 
2677  err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2678  if (err)
2679  return err;
2680 
2681  if (srq->com.from_state != RES_SRQ_HW) {
2682  err = -EBUSY;
2683  goto out;
2684  }
2685 
2686  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2687 out:
2688  put_res(dev, slave, srqn, RES_SRQ);
2689  return err;
2690 }
2691 
2692 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2693  struct mlx4_vhcr *vhcr,
2694  struct mlx4_cmd_mailbox *inbox,
2695  struct mlx4_cmd_mailbox *outbox,
2696  struct mlx4_cmd_info *cmd)
2697 {
2698  int err;
2699  int qpn = vhcr->in_modifier & 0x7fffff;
2700  struct res_qp *qp;
2701 
2702  err = get_res(dev, slave, qpn, RES_QP, &qp);
2703  if (err)
2704  return err;
2705  if (qp->com.from_state != RES_QP_HW) {
2706  err = -EBUSY;
2707  goto out;
2708  }
2709 
2710  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2711 out:
2712  put_res(dev, slave, qpn, RES_QP);
2713  return err;
2714 }
2715 
2716 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2717  struct mlx4_vhcr *vhcr,
2718  struct mlx4_cmd_mailbox *inbox,
2719  struct mlx4_cmd_mailbox *outbox,
2720  struct mlx4_cmd_info *cmd)
2721 {
2722  struct mlx4_qp_context *context = inbox->buf + 8;
2723  adjust_proxy_tun_qkey(dev, vhcr, context);
2724  update_pkey_index(dev, slave, inbox);
2725  return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2726 }
2727 
2728 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2729  struct mlx4_vhcr *vhcr,
2730  struct mlx4_cmd_mailbox *inbox,
2731  struct mlx4_cmd_mailbox *outbox,
2732  struct mlx4_cmd_info *cmd)
2733 {
2734  int err;
2735  struct mlx4_qp_context *qpc = inbox->buf + 8;
2736 
2737  err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2738  if (err)
2739  return err;
2740 
2741  update_pkey_index(dev, slave, inbox);
2742  update_gid(dev, inbox, (u8)slave);
2743  adjust_proxy_tun_qkey(dev, vhcr, qpc);
2744 
2745  return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2746 }
2747 
2748 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2749  struct mlx4_vhcr *vhcr,
2750  struct mlx4_cmd_mailbox *inbox,
2751  struct mlx4_cmd_mailbox *outbox,
2752  struct mlx4_cmd_info *cmd)
2753 {
2754  int err;
2755  struct mlx4_qp_context *context = inbox->buf + 8;
2756 
2757  err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
2758  if (err)
2759  return err;
2760 
2761  update_pkey_index(dev, slave, inbox);
2762  update_gid(dev, inbox, (u8)slave);
2763  adjust_proxy_tun_qkey(dev, vhcr, context);
2764  return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2765 }
2766 
2767 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2768  struct mlx4_vhcr *vhcr,
2769  struct mlx4_cmd_mailbox *inbox,
2770  struct mlx4_cmd_mailbox *outbox,
2771  struct mlx4_cmd_info *cmd)
2772 {
2773  int err;
2774  struct mlx4_qp_context *context = inbox->buf + 8;
2775 
2776  err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
2777  if (err)
2778  return err;
2779 
2780  update_pkey_index(dev, slave, inbox);
2781  update_gid(dev, inbox, (u8)slave);
2782  adjust_proxy_tun_qkey(dev, vhcr, context);
2783  return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2784 }
2785 
2786 
2787 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2788  struct mlx4_vhcr *vhcr,
2789  struct mlx4_cmd_mailbox *inbox,
2790  struct mlx4_cmd_mailbox *outbox,
2791  struct mlx4_cmd_info *cmd)
2792 {
2793  struct mlx4_qp_context *context = inbox->buf + 8;
2794  adjust_proxy_tun_qkey(dev, vhcr, context);
2795  return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2796 }
2797 
2798 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
2799  struct mlx4_vhcr *vhcr,
2800  struct mlx4_cmd_mailbox *inbox,
2801  struct mlx4_cmd_mailbox *outbox,
2802  struct mlx4_cmd_info *cmd)
2803 {
2804  int err;
2805  struct mlx4_qp_context *context = inbox->buf + 8;
2806 
2807  err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
2808  if (err)
2809  return err;
2810 
2811  adjust_proxy_tun_qkey(dev, vhcr, context);
2812  update_gid(dev, inbox, (u8)slave);
2813  update_pkey_index(dev, slave, inbox);
2814  return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2815 }
2816 
2817 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2818  struct mlx4_vhcr *vhcr,
2819  struct mlx4_cmd_mailbox *inbox,
2820  struct mlx4_cmd_mailbox *outbox,
2821  struct mlx4_cmd_info *cmd)
2822 {
2823  int err;
2824  struct mlx4_qp_context *context = inbox->buf + 8;
2825 
2826  err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
2827  if (err)
2828  return err;
2829 
2830  adjust_proxy_tun_qkey(dev, vhcr, context);
2831  update_gid(dev, inbox, (u8)slave);
2832  update_pkey_index(dev, slave, inbox);
2833  return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2834 }
2835 
2836 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2837  struct mlx4_vhcr *vhcr,
2838  struct mlx4_cmd_mailbox *inbox,
2839  struct mlx4_cmd_mailbox *outbox,
2840  struct mlx4_cmd_info *cmd)
2841 {
2842  int err;
2843  int qpn = vhcr->in_modifier & 0x7fffff;
2844  struct res_qp *qp;
2845 
2846  err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2847  if (err)
2848  return err;
2849  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2850  if (err)
2851  goto ex_abort;
2852 
2853  atomic_dec(&qp->mtt->ref_count);
2854  atomic_dec(&qp->rcq->ref_count);
2855  atomic_dec(&qp->scq->ref_count);
2856  if (qp->srq)
2857  atomic_dec(&qp->srq->ref_count);
2858  res_end_move(dev, slave, RES_QP, qpn);
2859  return 0;
2860 
2861 ex_abort:
2862  res_abort_move(dev, slave, RES_QP, qpn);
2863 
2864  return err;
2865 }
2866 
2867 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2868  struct res_qp *rqp, u8 *gid)
2869 {
2870  struct res_gid *res;
2871 
2872  list_for_each_entry(res, &rqp->mcg_list, list) {
2873  if (!memcmp(res->gid, gid, 16))
2874  return res;
2875  }
2876  return NULL;
2877 }
2878 
2879 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2880  u8 *gid, enum mlx4_protocol prot,
2881  enum mlx4_steer_type steer)
2882 {
2883  struct res_gid *res;
2884  int err;
2885 
2886  res = kzalloc(sizeof *res, GFP_KERNEL);
2887  if (!res)
2888  return -ENOMEM;
2889 
2890  spin_lock_irq(&rqp->mcg_spl);
2891  if (find_gid(dev, slave, rqp, gid)) {
2892  kfree(res);
2893  err = -EEXIST;
2894  } else {
2895  memcpy(res->gid, gid, 16);
2896  res->prot = prot;
2897  res->steer = steer;
2898  list_add_tail(&res->list, &rqp->mcg_list);
2899  err = 0;
2900  }
2901  spin_unlock_irq(&rqp->mcg_spl);
2902 
2903  return err;
2904 }
2905 
2906 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2907  u8 *gid, enum mlx4_protocol prot,
2908  enum mlx4_steer_type steer)
2909 {
2910  struct res_gid *res;
2911  int err;
2912 
2913  spin_lock_irq(&rqp->mcg_spl);
2914  res = find_gid(dev, slave, rqp, gid);
2915  if (!res || res->prot != prot || res->steer != steer)
2916  err = -EINVAL;
2917  else {
2918  list_del(&res->list);
2919  kfree(res);
2920  err = 0;
2921  }
2922  spin_unlock_irq(&rqp->mcg_spl);
2923 
2924  return err;
2925 }
2926 
2927 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2928  struct mlx4_vhcr *vhcr,
2929  struct mlx4_cmd_mailbox *inbox,
2930  struct mlx4_cmd_mailbox *outbox,
2931  struct mlx4_cmd_info *cmd)
2932 {
2933  struct mlx4_qp qp; /* dummy for calling attach/detach */
2934  u8 *gid = inbox->buf;
2935  enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2936  int err;
2937  int qpn;
2938  struct res_qp *rqp;
2939  int attach = vhcr->op_modifier;
2940  int block_loopback = vhcr->in_modifier >> 31;
2941  u8 steer_type_mask = 2;
2942  enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2943 
2944  qpn = vhcr->in_modifier & 0xffffff;
2945  err = get_res(dev, slave, qpn, RES_QP, &rqp);
2946  if (err)
2947  return err;
2948 
2949  qp.qpn = qpn;
2950  if (attach) {
2951  err = add_mcg_res(dev, slave, rqp, gid, prot, type);
2952  if (err)
2953  goto ex_put;
2954 
2955  err = mlx4_qp_attach_common(dev, &qp, gid,
2956  block_loopback, prot, type);
2957  if (err)
2958  goto ex_rem;
2959  } else {
2960  err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
2961  if (err)
2962  goto ex_put;
2963  err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2964  }
2965 
2966  put_res(dev, slave, qpn, RES_QP);
2967  return 0;
2968 
2969 ex_rem:
2970  /* ignore error return below, already in error */
2971  (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
2972 ex_put:
2973  put_res(dev, slave, qpn, RES_QP);
2974 
2975  return err;
2976 }
2977 
2978 /*
2979  * MAC validation for Flow Steering rules.
2980  * VF can attach rules only with a mac address which is assigned to it.
2981  */
2982 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
2983  struct list_head *rlist)
2984 {
2985  struct mac_res *res, *tmp;
2986  __be64 be_mac;
2987 
2988  /* make sure it isn't multicast or broadcast mac*/
2989  if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
2990  !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
2991  list_for_each_entry_safe(res, tmp, rlist, list) {
2992  be_mac = cpu_to_be64(res->mac << 16);
2993  if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
2994  return 0;
2995  }
2996  pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
2997  eth_header->eth.dst_mac, slave);
2998  return -EINVAL;
2999  }
3000  return 0;
3001 }
3002 
3003 /*
3004  * In case of missing eth header, append eth header with a MAC address
3005  * assigned to the VF.
3006  */
3007 static int add_eth_header(struct mlx4_dev *dev, int slave,
3008  struct mlx4_cmd_mailbox *inbox,
3009  struct list_head *rlist, int header_id)
3010 {
3011  struct mac_res *res, *tmp;
3012  u8 port;
3015  struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3016  struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3017  __be64 be_mac = 0;
3018  __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3019 
3020  ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3021  port = be32_to_cpu(ctrl->vf_vep_port) & 0xff;
3022  eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3023 
3024  /* Clear a space in the inbox for eth header */
3025  switch (header_id) {
3027  ip_header =
3028  (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3029  memmove(ip_header, eth_header,
3030  sizeof(*ip_header) + sizeof(*l4_header));
3031  break;
3034  l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3035  (eth_header + 1);
3036  memmove(l4_header, eth_header, sizeof(*l4_header));
3037  break;
3038  default:
3039  return -EINVAL;
3040  }
3041  list_for_each_entry_safe(res, tmp, rlist, list) {
3042  if (port == res->port) {
3043  be_mac = cpu_to_be64(res->mac << 16);
3044  break;
3045  }
3046  }
3047  if (!be_mac) {
3048  pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3049  port);
3050  return -EINVAL;
3051  }
3052 
3053  memset(eth_header, 0, sizeof(*eth_header));
3054  eth_header->size = sizeof(*eth_header) >> 2;
3056  memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3057  memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3058 
3059  return 0;
3060 
3061 }
3062 
3064  struct mlx4_vhcr *vhcr,
3065  struct mlx4_cmd_mailbox *inbox,
3066  struct mlx4_cmd_mailbox *outbox,
3067  struct mlx4_cmd_info *cmd)
3068 {
3069 
3070  struct mlx4_priv *priv = mlx4_priv(dev);
3071  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3072  struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3073  int err;
3075  struct _rule_hw *rule_header;
3076  int header_id;
3077 
3078  if (dev->caps.steering_mode !=
3080  return -EOPNOTSUPP;
3081 
3082  ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3083  rule_header = (struct _rule_hw *)(ctrl + 1);
3084  header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3085 
3086  switch (header_id) {
3088  if (validate_eth_header_mac(slave, rule_header, rlist))
3089  return -EINVAL;
3090  break;
3092  break;
3096  pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3097  if (add_eth_header(dev, slave, inbox, rlist, header_id))
3098  return -EINVAL;
3099  vhcr->in_modifier +=
3100  sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3101  break;
3102  default:
3103  pr_err("Corrupted mailbox.\n");
3104  return -EINVAL;
3105  }
3106 
3107  err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3108  vhcr->in_modifier, 0,
3110  MLX4_CMD_NATIVE);
3111  if (err)
3112  return err;
3113 
3114  err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
3115  if (err) {
3116  mlx4_err(dev, "Fail to add flow steering resources.\n ");
3117  /* detach rule*/
3118  mlx4_cmd(dev, vhcr->out_param, 0, 0,
3120  MLX4_CMD_NATIVE);
3121  }
3122  return err;
3123 }
3124 
3126  struct mlx4_vhcr *vhcr,
3127  struct mlx4_cmd_mailbox *inbox,
3128  struct mlx4_cmd_mailbox *outbox,
3129  struct mlx4_cmd_info *cmd)
3130 {
3131  int err;
3132 
3133  if (dev->caps.steering_mode !=
3135  return -EOPNOTSUPP;
3136 
3137  err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3138  if (err) {
3139  mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3140  return err;
3141  }
3142 
3143  err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3145  MLX4_CMD_NATIVE);
3146  return err;
3147 }
3148 
3149 enum {
3151 };
3152 
3153 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3154  struct mlx4_vhcr *vhcr,
3155  struct mlx4_cmd_mailbox *inbox,
3156  struct mlx4_cmd_mailbox *outbox,
3157  struct mlx4_cmd_info *cmd)
3158 {
3159  int err;
3160  int index = vhcr->in_modifier & 0xffff;
3161 
3162  err = get_res(dev, slave, index, RES_COUNTER, NULL);
3163  if (err)
3164  return err;
3165 
3166  err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3167  put_res(dev, slave, index, RES_COUNTER);
3168  return err;
3169 }
3170 
3171 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3172 {
3173  struct res_gid *rgid;
3174  struct res_gid *tmp;
3175  struct mlx4_qp qp; /* dummy for calling attach/detach */
3176 
3177  list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3178  qp.qpn = rqp->local_qpn;
3179  (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
3180  rgid->steer);
3181  list_del(&rgid->list);
3182  kfree(rgid);
3183  }
3184 }
3185 
3186 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3187  enum mlx4_resource type, int print)
3188 {
3189  struct mlx4_priv *priv = mlx4_priv(dev);
3190  struct mlx4_resource_tracker *tracker =
3191  &priv->mfunc.master.res_tracker;
3192  struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3193  struct res_common *r;
3194  struct res_common *tmp;
3195  int busy;
3196 
3197  busy = 0;
3198  spin_lock_irq(mlx4_tlock(dev));
3199  list_for_each_entry_safe(r, tmp, rlist, list) {
3200  if (r->owner == slave) {
3201  if (!r->removing) {
3202  if (r->state == RES_ANY_BUSY) {
3203  if (print)
3204  mlx4_dbg(dev,
3205  "%s id 0x%llx is busy\n",
3206  ResourceType(type),
3207  r->res_id);
3208  ++busy;
3209  } else {
3210  r->from_state = r->state;
3211  r->state = RES_ANY_BUSY;
3212  r->removing = 1;
3213  }
3214  }
3215  }
3216  }
3217  spin_unlock_irq(mlx4_tlock(dev));
3218 
3219  return busy;
3220 }
3221 
3222 static int move_all_busy(struct mlx4_dev *dev, int slave,
3223  enum mlx4_resource type)
3224 {
3225  unsigned long begin;
3226  int busy;
3227 
3228  begin = jiffies;
3229  do {
3230  busy = _move_all_busy(dev, slave, type, 0);
3231  if (time_after(jiffies, begin + 5 * HZ))
3232  break;
3233  if (busy)
3234  cond_resched();
3235  } while (busy);
3236 
3237  if (busy)
3238  busy = _move_all_busy(dev, slave, type, 1);
3239 
3240  return busy;
3241 }
3242 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3243 {
3244  struct mlx4_priv *priv = mlx4_priv(dev);
3245  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3246  struct list_head *qp_list =
3247  &tracker->slave_list[slave].res_list[RES_QP];
3248  struct res_qp *qp;
3249  struct res_qp *tmp;
3250  int state;
3251  u64 in_param;
3252  int qpn;
3253  int err;
3254 
3255  err = move_all_busy(dev, slave, RES_QP);
3256  if (err)
3257  mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3258  "for slave %d\n", slave);
3259 
3260  spin_lock_irq(mlx4_tlock(dev));
3261  list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3262  spin_unlock_irq(mlx4_tlock(dev));
3263  if (qp->com.owner == slave) {
3264  qpn = qp->com.res_id;
3265  detach_qp(dev, slave, qp);
3266  state = qp->com.from_state;
3267  while (state != 0) {
3268  switch (state) {
3269  case RES_QP_RESERVED:
3270  spin_lock_irq(mlx4_tlock(dev));
3271  rb_erase(&qp->com.node,
3272  &tracker->res_tree[RES_QP]);
3273  list_del(&qp->com.list);
3274  spin_unlock_irq(mlx4_tlock(dev));
3275  kfree(qp);
3276  state = 0;
3277  break;
3278  case RES_QP_MAPPED:
3279  if (!valid_reserved(dev, slave, qpn))
3280  __mlx4_qp_free_icm(dev, qpn);
3281  state = RES_QP_RESERVED;
3282  break;
3283  case RES_QP_HW:
3284  in_param = slave;
3285  err = mlx4_cmd(dev, in_param,
3286  qp->local_qpn, 2,
3289  MLX4_CMD_NATIVE);
3290  if (err)
3291  mlx4_dbg(dev, "rem_slave_qps: failed"
3292  " to move slave %d qpn %d to"
3293  " reset\n", slave,
3294  qp->local_qpn);
3295  atomic_dec(&qp->rcq->ref_count);
3296  atomic_dec(&qp->scq->ref_count);
3297  atomic_dec(&qp->mtt->ref_count);
3298  if (qp->srq)
3299  atomic_dec(&qp->srq->ref_count);
3300  state = RES_QP_MAPPED;
3301  break;
3302  default:
3303  state = 0;
3304  }
3305  }
3306  }
3307  spin_lock_irq(mlx4_tlock(dev));
3308  }
3309  spin_unlock_irq(mlx4_tlock(dev));
3310 }
3311 
3312 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3313 {
3314  struct mlx4_priv *priv = mlx4_priv(dev);
3315  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3316  struct list_head *srq_list =
3317  &tracker->slave_list[slave].res_list[RES_SRQ];
3318  struct res_srq *srq;
3319  struct res_srq *tmp;
3320  int state;
3321  u64 in_param;
3322  LIST_HEAD(tlist);
3323  int srqn;
3324  int err;
3325 
3326  err = move_all_busy(dev, slave, RES_SRQ);
3327  if (err)
3328  mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3329  "busy for slave %d\n", slave);
3330 
3331  spin_lock_irq(mlx4_tlock(dev));
3332  list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3333  spin_unlock_irq(mlx4_tlock(dev));
3334  if (srq->com.owner == slave) {
3335  srqn = srq->com.res_id;
3336  state = srq->com.from_state;
3337  while (state != 0) {
3338  switch (state) {
3339  case RES_SRQ_ALLOCATED:
3340  __mlx4_srq_free_icm(dev, srqn);
3341  spin_lock_irq(mlx4_tlock(dev));
3342  rb_erase(&srq->com.node,
3343  &tracker->res_tree[RES_SRQ]);
3344  list_del(&srq->com.list);
3345  spin_unlock_irq(mlx4_tlock(dev));
3346  kfree(srq);
3347  state = 0;
3348  break;
3349 
3350  case RES_SRQ_HW:
3351  in_param = slave;
3352  err = mlx4_cmd(dev, in_param, srqn, 1,
3355  MLX4_CMD_NATIVE);
3356  if (err)
3357  mlx4_dbg(dev, "rem_slave_srqs: failed"
3358  " to move slave %d srq %d to"
3359  " SW ownership\n",
3360  slave, srqn);
3361 
3362  atomic_dec(&srq->mtt->ref_count);
3363  if (srq->cq)
3364  atomic_dec(&srq->cq->ref_count);
3365  state = RES_SRQ_ALLOCATED;
3366  break;
3367 
3368  default:
3369  state = 0;
3370  }
3371  }
3372  }
3373  spin_lock_irq(mlx4_tlock(dev));
3374  }
3375  spin_unlock_irq(mlx4_tlock(dev));
3376 }
3377 
3378 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3379 {
3380  struct mlx4_priv *priv = mlx4_priv(dev);
3381  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3382  struct list_head *cq_list =
3383  &tracker->slave_list[slave].res_list[RES_CQ];
3384  struct res_cq *cq;
3385  struct res_cq *tmp;
3386  int state;
3387  u64 in_param;
3388  LIST_HEAD(tlist);
3389  int cqn;
3390  int err;
3391 
3392  err = move_all_busy(dev, slave, RES_CQ);
3393  if (err)
3394  mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3395  "busy for slave %d\n", slave);
3396 
3397  spin_lock_irq(mlx4_tlock(dev));
3398  list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3399  spin_unlock_irq(mlx4_tlock(dev));
3400  if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3401  cqn = cq->com.res_id;
3402  state = cq->com.from_state;
3403  while (state != 0) {
3404  switch (state) {
3405  case RES_CQ_ALLOCATED:
3406  __mlx4_cq_free_icm(dev, cqn);
3407  spin_lock_irq(mlx4_tlock(dev));
3408  rb_erase(&cq->com.node,
3409  &tracker->res_tree[RES_CQ]);
3410  list_del(&cq->com.list);
3411  spin_unlock_irq(mlx4_tlock(dev));
3412  kfree(cq);
3413  state = 0;
3414  break;
3415 
3416  case RES_CQ_HW:
3417  in_param = slave;
3418  err = mlx4_cmd(dev, in_param, cqn, 1,
3421  MLX4_CMD_NATIVE);
3422  if (err)
3423  mlx4_dbg(dev, "rem_slave_cqs: failed"
3424  " to move slave %d cq %d to"
3425  " SW ownership\n",
3426  slave, cqn);
3427  atomic_dec(&cq->mtt->ref_count);
3428  state = RES_CQ_ALLOCATED;
3429  break;
3430 
3431  default:
3432  state = 0;
3433  }
3434  }
3435  }
3436  spin_lock_irq(mlx4_tlock(dev));
3437  }
3438  spin_unlock_irq(mlx4_tlock(dev));
3439 }
3440 
3441 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3442 {
3443  struct mlx4_priv *priv = mlx4_priv(dev);
3444  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3445  struct list_head *mpt_list =
3446  &tracker->slave_list[slave].res_list[RES_MPT];
3447  struct res_mpt *mpt;
3448  struct res_mpt *tmp;
3449  int state;
3450  u64 in_param;
3451  LIST_HEAD(tlist);
3452  int mptn;
3453  int err;
3454 
3455  err = move_all_busy(dev, slave, RES_MPT);
3456  if (err)
3457  mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3458  "busy for slave %d\n", slave);
3459 
3460  spin_lock_irq(mlx4_tlock(dev));
3461  list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3462  spin_unlock_irq(mlx4_tlock(dev));
3463  if (mpt->com.owner == slave) {
3464  mptn = mpt->com.res_id;
3465  state = mpt->com.from_state;
3466  while (state != 0) {
3467  switch (state) {
3468  case RES_MPT_RESERVED:
3469  __mlx4_mr_release(dev, mpt->key);
3470  spin_lock_irq(mlx4_tlock(dev));
3471  rb_erase(&mpt->com.node,
3472  &tracker->res_tree[RES_MPT]);
3473  list_del(&mpt->com.list);
3474  spin_unlock_irq(mlx4_tlock(dev));
3475  kfree(mpt);
3476  state = 0;
3477  break;
3478 
3479  case RES_MPT_MAPPED:
3480  __mlx4_mr_free_icm(dev, mpt->key);
3481  state = RES_MPT_RESERVED;
3482  break;
3483 
3484  case RES_MPT_HW:
3485  in_param = slave;
3486  err = mlx4_cmd(dev, in_param, mptn, 0,
3489  MLX4_CMD_NATIVE);
3490  if (err)
3491  mlx4_dbg(dev, "rem_slave_mrs: failed"
3492  " to move slave %d mpt %d to"
3493  " SW ownership\n",
3494  slave, mptn);
3495  if (mpt->mtt)
3496  atomic_dec(&mpt->mtt->ref_count);
3497  state = RES_MPT_MAPPED;
3498  break;
3499  default:
3500  state = 0;
3501  }
3502  }
3503  }
3504  spin_lock_irq(mlx4_tlock(dev));
3505  }
3506  spin_unlock_irq(mlx4_tlock(dev));
3507 }
3508 
3509 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3510 {
3511  struct mlx4_priv *priv = mlx4_priv(dev);
3512  struct mlx4_resource_tracker *tracker =
3513  &priv->mfunc.master.res_tracker;
3514  struct list_head *mtt_list =
3515  &tracker->slave_list[slave].res_list[RES_MTT];
3516  struct res_mtt *mtt;
3517  struct res_mtt *tmp;
3518  int state;
3519  LIST_HEAD(tlist);
3520  int base;
3521  int err;
3522 
3523  err = move_all_busy(dev, slave, RES_MTT);
3524  if (err)
3525  mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3526  "busy for slave %d\n", slave);
3527 
3528  spin_lock_irq(mlx4_tlock(dev));
3529  list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3530  spin_unlock_irq(mlx4_tlock(dev));
3531  if (mtt->com.owner == slave) {
3532  base = mtt->com.res_id;
3533  state = mtt->com.from_state;
3534  while (state != 0) {
3535  switch (state) {
3536  case RES_MTT_ALLOCATED:
3537  __mlx4_free_mtt_range(dev, base,
3538  mtt->order);
3539  spin_lock_irq(mlx4_tlock(dev));
3540  rb_erase(&mtt->com.node,
3541  &tracker->res_tree[RES_MTT]);
3542  list_del(&mtt->com.list);
3543  spin_unlock_irq(mlx4_tlock(dev));
3544  kfree(mtt);
3545  state = 0;
3546  break;
3547 
3548  default:
3549  state = 0;
3550  }
3551  }
3552  }
3553  spin_lock_irq(mlx4_tlock(dev));
3554  }
3555  spin_unlock_irq(mlx4_tlock(dev));
3556 }
3557 
3558 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3559 {
3560  struct mlx4_priv *priv = mlx4_priv(dev);
3561  struct mlx4_resource_tracker *tracker =
3562  &priv->mfunc.master.res_tracker;
3563  struct list_head *fs_rule_list =
3564  &tracker->slave_list[slave].res_list[RES_FS_RULE];
3565  struct res_fs_rule *fs_rule;
3566  struct res_fs_rule *tmp;
3567  int state;
3568  u64 base;
3569  int err;
3570 
3571  err = move_all_busy(dev, slave, RES_FS_RULE);
3572  if (err)
3573  mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3574  slave);
3575 
3576  spin_lock_irq(mlx4_tlock(dev));
3577  list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3578  spin_unlock_irq(mlx4_tlock(dev));
3579  if (fs_rule->com.owner == slave) {
3580  base = fs_rule->com.res_id;
3581  state = fs_rule->com.from_state;
3582  while (state != 0) {
3583  switch (state) {
3584  case RES_FS_RULE_ALLOCATED:
3585  /* detach rule */
3586  err = mlx4_cmd(dev, base, 0, 0,
3589  MLX4_CMD_NATIVE);
3590 
3591  spin_lock_irq(mlx4_tlock(dev));
3592  rb_erase(&fs_rule->com.node,
3593  &tracker->res_tree[RES_FS_RULE]);
3594  list_del(&fs_rule->com.list);
3595  spin_unlock_irq(mlx4_tlock(dev));
3596  kfree(fs_rule);
3597  state = 0;
3598  break;
3599 
3600  default:
3601  state = 0;
3602  }
3603  }
3604  }
3605  spin_lock_irq(mlx4_tlock(dev));
3606  }
3607  spin_unlock_irq(mlx4_tlock(dev));
3608 }
3609 
3610 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3611 {
3612  struct mlx4_priv *priv = mlx4_priv(dev);
3613  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3614  struct list_head *eq_list =
3615  &tracker->slave_list[slave].res_list[RES_EQ];
3616  struct res_eq *eq;
3617  struct res_eq *tmp;
3618  int err;
3619  int state;
3620  LIST_HEAD(tlist);
3621  int eqn;
3622  struct mlx4_cmd_mailbox *mailbox;
3623 
3624  err = move_all_busy(dev, slave, RES_EQ);
3625  if (err)
3626  mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3627  "busy for slave %d\n", slave);
3628 
3629  spin_lock_irq(mlx4_tlock(dev));
3630  list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3631  spin_unlock_irq(mlx4_tlock(dev));
3632  if (eq->com.owner == slave) {
3633  eqn = eq->com.res_id;
3634  state = eq->com.from_state;
3635  while (state != 0) {
3636  switch (state) {
3637  case RES_EQ_RESERVED:
3638  spin_lock_irq(mlx4_tlock(dev));
3639  rb_erase(&eq->com.node,
3640  &tracker->res_tree[RES_EQ]);
3641  list_del(&eq->com.list);
3642  spin_unlock_irq(mlx4_tlock(dev));
3643  kfree(eq);
3644  state = 0;
3645  break;
3646 
3647  case RES_EQ_HW:
3648  mailbox = mlx4_alloc_cmd_mailbox(dev);
3649  if (IS_ERR(mailbox)) {
3650  cond_resched();
3651  continue;
3652  }
3653  err = mlx4_cmd_box(dev, slave, 0,
3654  eqn & 0xff, 0,
3657  MLX4_CMD_NATIVE);
3658  if (err)
3659  mlx4_dbg(dev, "rem_slave_eqs: failed"
3660  " to move slave %d eqs %d to"
3661  " SW ownership\n", slave, eqn);
3662  mlx4_free_cmd_mailbox(dev, mailbox);
3663  atomic_dec(&eq->mtt->ref_count);
3664  state = RES_EQ_RESERVED;
3665  break;
3666 
3667  default:
3668  state = 0;
3669  }
3670  }
3671  }
3672  spin_lock_irq(mlx4_tlock(dev));
3673  }
3674  spin_unlock_irq(mlx4_tlock(dev));
3675 }
3676 
3677 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3678 {
3679  struct mlx4_priv *priv = mlx4_priv(dev);
3680  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3681  struct list_head *counter_list =
3682  &tracker->slave_list[slave].res_list[RES_COUNTER];
3683  struct res_counter *counter;
3684  struct res_counter *tmp;
3685  int err;
3686  int index;
3687 
3688  err = move_all_busy(dev, slave, RES_COUNTER);
3689  if (err)
3690  mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3691  "busy for slave %d\n", slave);
3692 
3693  spin_lock_irq(mlx4_tlock(dev));
3694  list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3695  if (counter->com.owner == slave) {
3696  index = counter->com.res_id;
3697  rb_erase(&counter->com.node,
3698  &tracker->res_tree[RES_COUNTER]);
3699  list_del(&counter->com.list);
3700  kfree(counter);
3701  __mlx4_counter_free(dev, index);
3702  }
3703  }
3704  spin_unlock_irq(mlx4_tlock(dev));
3705 }
3706 
3707 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3708 {
3709  struct mlx4_priv *priv = mlx4_priv(dev);
3710  struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3711  struct list_head *xrcdn_list =
3712  &tracker->slave_list[slave].res_list[RES_XRCD];
3713  struct res_xrcdn *xrcd;
3714  struct res_xrcdn *tmp;
3715  int err;
3716  int xrcdn;
3717 
3718  err = move_all_busy(dev, slave, RES_XRCD);
3719  if (err)
3720  mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3721  "busy for slave %d\n", slave);
3722 
3723  spin_lock_irq(mlx4_tlock(dev));
3724  list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3725  if (xrcd->com.owner == slave) {
3726  xrcdn = xrcd->com.res_id;
3727  rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3728  list_del(&xrcd->com.list);
3729  kfree(xrcd);
3730  __mlx4_xrcd_free(dev, xrcdn);
3731  }
3732  }
3733  spin_unlock_irq(mlx4_tlock(dev));
3734 }
3735 
3736 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3737 {
3738  struct mlx4_priv *priv = mlx4_priv(dev);
3739 
3740  mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3741  /*VLAN*/
3742  rem_slave_macs(dev, slave);
3743  rem_slave_qps(dev, slave);
3744  rem_slave_srqs(dev, slave);
3745  rem_slave_cqs(dev, slave);
3746  rem_slave_mrs(dev, slave);
3747  rem_slave_eqs(dev, slave);
3748  rem_slave_mtts(dev, slave);
3749  rem_slave_counters(dev, slave);
3750  rem_slave_xrcdns(dev, slave);
3751  rem_slave_fs_rule(dev, slave);
3752  mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3753 }