Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bnx2x_sp.c
Go to the documentation of this file.
1 /* bnx2x_sp.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2011-2012 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <[email protected]>
16  * Written by: Vladislav Zolotarov
17  *
18  */
19 
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include <linux/module.h>
23 #include <linux/crc32.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/crc32c.h>
27 #include "bnx2x.h"
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_sp.h"
30 
31 #define BNX2X_MAX_EMUL_MULTI 16
32 
33 #define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
34 
35 /**** Exe Queue interfaces ****/
36 
48 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
49  struct bnx2x_exe_queue_obj *o,
50  int exe_len,
51  union bnx2x_qable_obj *owner,
53  exe_q_remove remove,
54  exe_q_optimize optimize,
56  exe_q_get get)
57 {
58  memset(o, 0, sizeof(*o));
59 
60  INIT_LIST_HEAD(&o->exe_queue);
61  INIT_LIST_HEAD(&o->pending_comp);
62 
63  spin_lock_init(&o->lock);
64 
65  o->exe_chunk_len = exe_len;
66  o->owner = owner;
67 
68  /* Owner specific callbacks */
69  o->validate = validate;
70  o->remove = remove;
71  o->optimize = optimize;
72  o->execute = exec;
73  o->get = get;
74 
75  DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
76  exe_len);
77 }
78 
79 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
80  struct bnx2x_exeq_elem *elem)
81 {
82  DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
83  kfree(elem);
84 }
85 
86 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
87 {
88  struct bnx2x_exeq_elem *elem;
89  int cnt = 0;
90 
91  spin_lock_bh(&o->lock);
92 
94  cnt++;
95 
96  spin_unlock_bh(&o->lock);
97 
98  return cnt;
99 }
100 
111 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
113  struct bnx2x_exeq_elem *elem,
114  bool restore)
115 {
116  int rc;
117 
118  spin_lock_bh(&o->lock);
119 
120  if (!restore) {
121  /* Try to cancel this element queue */
122  rc = o->optimize(bp, o->owner, elem);
123  if (rc)
124  goto free_and_exit;
125 
126  /* Check if this request is ok */
127  rc = o->validate(bp, o->owner, elem);
128  if (rc) {
129  DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
130  goto free_and_exit;
131  }
132  }
133 
134  /* If so, add it to the execution queue */
135  list_add_tail(&elem->link, &o->exe_queue);
136 
137  spin_unlock_bh(&o->lock);
138 
139  return 0;
140 
141 free_and_exit:
142  bnx2x_exe_queue_free_elem(bp, elem);
143 
144  spin_unlock_bh(&o->lock);
145 
146  return rc;
147 
148 }
149 
150 static inline void __bnx2x_exe_queue_reset_pending(
151  struct bnx2x *bp,
152  struct bnx2x_exe_queue_obj *o)
153 {
154  struct bnx2x_exeq_elem *elem;
155 
156  while (!list_empty(&o->pending_comp)) {
157  elem = list_first_entry(&o->pending_comp,
158  struct bnx2x_exeq_elem, link);
159 
160  list_del(&elem->link);
161  bnx2x_exe_queue_free_elem(bp, elem);
162  }
163 }
164 
165 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
166  struct bnx2x_exe_queue_obj *o)
167 {
168 
169  spin_lock_bh(&o->lock);
170 
171  __bnx2x_exe_queue_reset_pending(bp, o);
172 
173  spin_unlock_bh(&o->lock);
174 
175 }
176 
186 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
187  struct bnx2x_exe_queue_obj *o,
188  unsigned long *ramrod_flags)
189 {
190  struct bnx2x_exeq_elem *elem, spacer;
191  int cur_len = 0, rc;
192 
193  memset(&spacer, 0, sizeof(spacer));
194 
195  spin_lock_bh(&o->lock);
196 
197  /*
198  * Next step should not be performed until the current is finished,
199  * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
200  * properly clear object internals without sending any command to the FW
201  * which also implies there won't be any completion to clear the
202  * 'pending' list.
203  */
204  if (!list_empty(&o->pending_comp)) {
205  if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
206  DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
207  __bnx2x_exe_queue_reset_pending(bp, o);
208  } else {
209  spin_unlock_bh(&o->lock);
210  return 1;
211  }
212  }
213 
214  /*
215  * Run through the pending commands list and create a next
216  * execution chunk.
217  */
218  while (!list_empty(&o->exe_queue)) {
219  elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
220  link);
221  WARN_ON(!elem->cmd_len);
222 
223  if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
224  cur_len += elem->cmd_len;
225  /*
226  * Prevent from both lists being empty when moving an
227  * element. This will allow the call of
228  * bnx2x_exe_queue_empty() without locking.
229  */
230  list_add_tail(&spacer.link, &o->pending_comp);
231  mb();
232  list_move_tail(&elem->link, &o->pending_comp);
233  list_del(&spacer.link);
234  } else
235  break;
236  }
237 
238  /* Sanity check */
239  if (!cur_len) {
240  spin_unlock_bh(&o->lock);
241  return 0;
242  }
243 
244  rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
245  if (rc < 0)
246  /*
247  * In case of an error return the commands back to the queue
248  * and reset the pending_comp.
249  */
250  list_splice_init(&o->pending_comp, &o->exe_queue);
251  else if (!rc)
252  /*
253  * If zero is returned, means there are no outstanding pending
254  * completions and we may dismiss the pending list.
255  */
256  __bnx2x_exe_queue_reset_pending(bp, o);
257 
258  spin_unlock_bh(&o->lock);
259  return rc;
260 }
261 
262 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
263 {
264  bool empty = list_empty(&o->exe_queue);
265 
266  /* Don't reorder!!! */
267  mb();
268 
269  return empty && list_empty(&o->pending_comp);
270 }
271 
272 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
273  struct bnx2x *bp)
274 {
275  DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
276  return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
277 }
278 
279 /************************ raw_obj functions ***********************************/
280 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
281 {
282  return !!test_bit(o->state, o->pstate);
283 }
284 
285 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
286 {
288  clear_bit(o->state, o->pstate);
290 }
291 
292 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
293 {
295  set_bit(o->state, o->pstate);
297 }
298 
307 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
308  unsigned long *pstate)
309 {
310  /* can take a while if any port is running */
311  int cnt = 5000;
312 
313 
314  if (CHIP_REV_IS_EMUL(bp))
315  cnt *= 20;
316 
317  DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
318 
319  might_sleep();
320  while (cnt--) {
321  if (!test_bit(state, pstate)) {
322 #ifdef BNX2X_STOP_ON_ERROR
323  DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
324 #endif
325  return 0;
326  }
327 
328  usleep_range(1000, 1000);
329 
330  if (bp->panic)
331  return -EIO;
332  }
333 
334  /* timeout! */
335  BNX2X_ERR("timeout waiting for state %d\n", state);
336 #ifdef BNX2X_STOP_ON_ERROR
337  bnx2x_panic();
338 #endif
339 
340  return -EBUSY;
341 }
342 
343 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
344 {
345  return bnx2x_state_wait(bp, raw->state, raw->pstate);
346 }
347 
348 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
349 /* credit handling callbacks */
350 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
351 {
352  struct bnx2x_credit_pool_obj *mp = o->macs_pool;
353 
354  WARN_ON(!mp);
355 
356  return mp->get_entry(mp, offset);
357 }
358 
359 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
360 {
361  struct bnx2x_credit_pool_obj *mp = o->macs_pool;
362 
363  WARN_ON(!mp);
364 
365  return mp->get(mp, 1);
366 }
367 
368 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
369 {
370  struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
371 
372  WARN_ON(!vp);
373 
374  return vp->get_entry(vp, offset);
375 }
376 
377 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
378 {
379  struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
380 
381  WARN_ON(!vp);
382 
383  return vp->get(vp, 1);
384 }
385 
386 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
387 {
388  struct bnx2x_credit_pool_obj *mp = o->macs_pool;
389  struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
390 
391  if (!mp->get(mp, 1))
392  return false;
393 
394  if (!vp->get(vp, 1)) {
395  mp->put(mp, 1);
396  return false;
397  }
398 
399  return true;
400 }
401 
402 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
403 {
404  struct bnx2x_credit_pool_obj *mp = o->macs_pool;
405 
406  return mp->put_entry(mp, offset);
407 }
408 
409 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
410 {
411  struct bnx2x_credit_pool_obj *mp = o->macs_pool;
412 
413  return mp->put(mp, 1);
414 }
415 
416 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
417 {
418  struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
419 
420  return vp->put_entry(vp, offset);
421 }
422 
423 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
424 {
425  struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
426 
427  return vp->put(vp, 1);
428 }
429 
430 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
431 {
432  struct bnx2x_credit_pool_obj *mp = o->macs_pool;
433  struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
434 
435  if (!mp->put(mp, 1))
436  return false;
437 
438  if (!vp->put(vp, 1)) {
439  mp->get(mp, 1);
440  return false;
441  }
442 
443  return true;
444 }
445 
446 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
447  int n, u8 *buf)
448 {
450  u8 *next = buf;
451  int counter = 0;
452 
453  /* traverse list */
454  list_for_each_entry(pos, &o->head, link) {
455  if (counter < n) {
456  /* place leading zeroes in buffer */
457  memset(next, 0, MAC_LEADING_ZERO_CNT);
458 
459  /* place mac after leading zeroes*/
460  memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
461  ETH_ALEN);
462 
463  /* calculate address of next element and
464  * advance counter
465  */
466  counter++;
467  next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
468 
469  DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
470  counter, next, pos->u.mac.mac);
471  }
472  }
473  return counter * ETH_ALEN;
474 }
475 
476 /* check_add() callbacks */
477 static int bnx2x_check_mac_add(struct bnx2x *bp,
478  struct bnx2x_vlan_mac_obj *o,
480 {
482 
483  DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
484 
485  if (!is_valid_ether_addr(data->mac.mac))
486  return -EINVAL;
487 
488  /* Check if a requested MAC already exists */
489  list_for_each_entry(pos, &o->head, link)
490  if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
491  return -EEXIST;
492 
493  return 0;
494 }
495 
496 static int bnx2x_check_vlan_add(struct bnx2x *bp,
499 {
501 
502  DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
503 
504  list_for_each_entry(pos, &o->head, link)
505  if (data->vlan.vlan == pos->u.vlan.vlan)
506  return -EEXIST;
507 
508  return 0;
509 }
510 
511 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
514 {
516 
517  DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
518  data->vlan_mac.mac, data->vlan_mac.vlan);
519 
520  list_for_each_entry(pos, &o->head, link)
521  if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
522  (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
523  ETH_ALEN)))
524  return -EEXIST;
525 
526  return 0;
527 }
528 
529 
530 /* check_del() callbacks */
532  bnx2x_check_mac_del(struct bnx2x *bp,
535 {
537 
538  DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
539 
540  list_for_each_entry(pos, &o->head, link)
541  if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
542  return pos;
543 
544  return NULL;
545 }
546 
548  bnx2x_check_vlan_del(struct bnx2x *bp,
551 {
553 
554  DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
555 
556  list_for_each_entry(pos, &o->head, link)
557  if (data->vlan.vlan == pos->u.vlan.vlan)
558  return pos;
559 
560  return NULL;
561 }
562 
564  bnx2x_check_vlan_mac_del(struct bnx2x *bp,
567 {
569 
570  DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
571  data->vlan_mac.mac, data->vlan_mac.vlan);
572 
573  list_for_each_entry(pos, &o->head, link)
574  if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
575  (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
576  ETH_ALEN)))
577  return pos;
578 
579  return NULL;
580 }
581 
582 /* check_move() callback */
583 static bool bnx2x_check_move(struct bnx2x *bp,
584  struct bnx2x_vlan_mac_obj *src_o,
585  struct bnx2x_vlan_mac_obj *dst_o,
587 {
589  int rc;
590 
591  /* Check if we can delete the requested configuration from the first
592  * object.
593  */
594  pos = src_o->check_del(bp, src_o, data);
595 
596  /* check if configuration can be added */
597  rc = dst_o->check_add(bp, dst_o, data);
598 
599  /* If this classification can not be added (is already set)
600  * or can't be deleted - return an error.
601  */
602  if (rc || !pos)
603  return false;
604 
605  return true;
606 }
607 
608 static bool bnx2x_check_move_always_err(
609  struct bnx2x *bp,
610  struct bnx2x_vlan_mac_obj *src_o,
611  struct bnx2x_vlan_mac_obj *dst_o,
613 {
614  return false;
615 }
616 
617 
618 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
619 {
620  struct bnx2x_raw_obj *raw = &o->raw;
621  u8 rx_tx_flag = 0;
622 
623  if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
624  (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
625  rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
626 
627  if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
628  (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
629  rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
630 
631  return rx_tx_flag;
632 }
633 
634 
635 void bnx2x_set_mac_in_nig(struct bnx2x *bp,
636  bool add, unsigned char *dev_addr, int index)
637 {
638  u32 wb_data[2];
639  u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
641 
642  if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
643  return;
644 
645  if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
646  return;
647 
648  DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
649  (add ? "ADD" : "DELETE"), index);
650 
651  if (add) {
652  /* LLH_FUNC_MEM is a u64 WB register */
653  reg_offset += 8*index;
654 
655  wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
656  (dev_addr[4] << 8) | dev_addr[5]);
657  wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
658 
659  REG_WR_DMAE(bp, reg_offset, wb_data, 2);
660  }
661 
663  NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
664 }
665 
676 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
677  struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
679 {
680  struct bnx2x_raw_obj *raw = &o->raw;
681 
682  hdr->client_id = raw->cl_id;
683  hdr->func_id = raw->func_id;
684 
685  /* Rx or/and Tx (internal switching) configuration ? */
686  hdr->cmd_general_data |=
687  bnx2x_vlan_mac_get_rx_tx_flag(o);
688 
689  if (add)
691 
692  hdr->cmd_general_data |=
694 }
695 
707 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
708  struct eth_classify_header *hdr, int rule_cnt)
709 {
710  hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
711  hdr->rule_cnt = (u8)rule_cnt;
712 }
713 
714 
715 /* hw_config() callbacks */
716 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
717  struct bnx2x_vlan_mac_obj *o,
718  struct bnx2x_exeq_elem *elem, int rule_idx,
719  int cam_offset)
720 {
721  struct bnx2x_raw_obj *raw = &o->raw;
722  struct eth_classify_rules_ramrod_data *data =
723  (struct eth_classify_rules_ramrod_data *)(raw->rdata);
724  int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
725  union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
726  bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
727  unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
728  u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
729 
730  /*
731  * Set LLH CAM entry: currently only iSCSI and ETH macs are
732  * relevant. In addition, current implementation is tuned for a
733  * single ETH MAC.
734  *
735  * When multiple unicast ETH MACs PF configuration in switch
736  * independent mode is required (NetQ, multiple netdev MACs,
737  * etc.), consider better utilisation of 8 per function MAC
738  * entries in the LLH register. There is also
739  * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
740  * total number of CAM entries to 16.
741  *
742  * Currently we won't configure NIG for MACs other than a primary ETH
743  * MAC and iSCSI L2 MAC.
744  *
745  * If this MAC is moving from one Queue to another, no need to change
746  * NIG configuration.
747  */
748  if (cmd != BNX2X_VLAN_MAC_MOVE) {
749  if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
750  bnx2x_set_mac_in_nig(bp, add, mac,
752  else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
753  bnx2x_set_mac_in_nig(bp, add, mac,
755  }
756 
757  /* Reset the ramrod data buffer for the first rule */
758  if (rule_idx == 0)
759  memset(data, 0, sizeof(*data));
760 
761  /* Setup a command header */
762  bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
763  &rule_entry->mac.header);
764 
765  DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
766  (add ? "add" : "delete"), mac, raw->cl_id);
767 
768  /* Set a MAC itself */
769  bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
770  &rule_entry->mac.mac_mid,
771  &rule_entry->mac.mac_lsb, mac);
772 
773  /* MOVE: Add a rule that will add this MAC to the target Queue */
774  if (cmd == BNX2X_VLAN_MAC_MOVE) {
775  rule_entry++;
776  rule_cnt++;
777 
778  /* Setup ramrod data */
779  bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
780  elem->cmd_data.vlan_mac.target_obj,
782  &rule_entry->mac.header);
783 
784  /* Set a MAC itself */
785  bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
786  &rule_entry->mac.mac_mid,
787  &rule_entry->mac.mac_lsb, mac);
788  }
789 
790  /* Set the ramrod data header */
791  /* TODO: take this to the higher level in order to prevent multiple
792  writing */
793  bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
794  rule_cnt);
795 }
796 
808 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
809  struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
810  struct mac_configuration_hdr *hdr)
811 {
812  struct bnx2x_raw_obj *r = &o->raw;
813 
814  hdr->length = 1;
815  hdr->offset = (u8)cam_offset;
816  hdr->client_id = 0xff;
817  hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
818 }
819 
820 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
821  struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
823 {
824  struct bnx2x_raw_obj *r = &o->raw;
825  u32 cl_bit_vec = (1 << r->cl_id);
826 
827  cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
828  cfg_entry->pf_id = r->func_id;
829  cfg_entry->vlan_id = cpu_to_le16(vlan_id);
830 
831  if (add) {
834  SET_FLAG(cfg_entry->flags,
836 
837  /* Set a MAC in a ramrod data */
838  bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
839  &cfg_entry->middle_mac_addr,
840  &cfg_entry->lsb_mac_addr, mac);
841  } else
844 }
845 
846 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
847  struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
848  u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
849 {
850  struct mac_configuration_entry *cfg_entry = &config->config_table[0];
851  struct bnx2x_raw_obj *raw = &o->raw;
852 
853  bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
854  &config->hdr);
855  bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
856  cfg_entry);
857 
858  DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
859  (add ? "setting" : "clearing"),
860  mac, raw->cl_id, cam_offset);
861 }
862 
872 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
873  struct bnx2x_vlan_mac_obj *o,
874  struct bnx2x_exeq_elem *elem, int rule_idx,
875  int cam_offset)
876 {
877  struct bnx2x_raw_obj *raw = &o->raw;
878  struct mac_configuration_cmd *config =
879  (struct mac_configuration_cmd *)(raw->rdata);
880  /*
881  * 57710 and 57711 do not support MOVE command,
882  * so it's either ADD or DEL
883  */
884  bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
885  true : false;
886 
887  /* Reset the ramrod data buffer */
888  memset(config, 0, sizeof(*config));
889 
890  bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
891  cam_offset, add,
892  elem->cmd_data.vlan_mac.u.mac.mac, 0,
893  ETH_VLAN_FILTER_ANY_VLAN, config);
894 }
895 
896 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
897  struct bnx2x_vlan_mac_obj *o,
898  struct bnx2x_exeq_elem *elem, int rule_idx,
899  int cam_offset)
900 {
901  struct bnx2x_raw_obj *raw = &o->raw;
902  struct eth_classify_rules_ramrod_data *data =
903  (struct eth_classify_rules_ramrod_data *)(raw->rdata);
904  int rule_cnt = rule_idx + 1;
905  union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
906  int cmd = elem->cmd_data.vlan_mac.cmd;
907  bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
908  u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
909 
910  /* Reset the ramrod data buffer for the first rule */
911  if (rule_idx == 0)
912  memset(data, 0, sizeof(*data));
913 
914  /* Set a rule header */
915  bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
916  &rule_entry->vlan.header);
917 
918  DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
919  vlan);
920 
921  /* Set a VLAN itself */
922  rule_entry->vlan.vlan = cpu_to_le16(vlan);
923 
924  /* MOVE: Add a rule that will add this MAC to the target Queue */
925  if (cmd == BNX2X_VLAN_MAC_MOVE) {
926  rule_entry++;
927  rule_cnt++;
928 
929  /* Setup ramrod data */
930  bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
931  elem->cmd_data.vlan_mac.target_obj,
933  &rule_entry->vlan.header);
934 
935  /* Set a VLAN itself */
936  rule_entry->vlan.vlan = cpu_to_le16(vlan);
937  }
938 
939  /* Set the ramrod data header */
940  /* TODO: take this to the higher level in order to prevent multiple
941  writing */
942  bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
943  rule_cnt);
944 }
945 
946 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
947  struct bnx2x_vlan_mac_obj *o,
948  struct bnx2x_exeq_elem *elem,
949  int rule_idx, int cam_offset)
950 {
951  struct bnx2x_raw_obj *raw = &o->raw;
952  struct eth_classify_rules_ramrod_data *data =
953  (struct eth_classify_rules_ramrod_data *)(raw->rdata);
954  int rule_cnt = rule_idx + 1;
955  union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
956  int cmd = elem->cmd_data.vlan_mac.cmd;
957  bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
958  u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
959  u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
960 
961 
962  /* Reset the ramrod data buffer for the first rule */
963  if (rule_idx == 0)
964  memset(data, 0, sizeof(*data));
965 
966  /* Set a rule header */
967  bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
968  &rule_entry->pair.header);
969 
970  /* Set VLAN and MAC themselvs */
971  rule_entry->pair.vlan = cpu_to_le16(vlan);
972  bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
973  &rule_entry->pair.mac_mid,
974  &rule_entry->pair.mac_lsb, mac);
975 
976  /* MOVE: Add a rule that will add this MAC to the target Queue */
977  if (cmd == BNX2X_VLAN_MAC_MOVE) {
978  rule_entry++;
979  rule_cnt++;
980 
981  /* Setup ramrod data */
982  bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
983  elem->cmd_data.vlan_mac.target_obj,
985  &rule_entry->pair.header);
986 
987  /* Set a VLAN itself */
988  rule_entry->pair.vlan = cpu_to_le16(vlan);
989  bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
990  &rule_entry->pair.mac_mid,
991  &rule_entry->pair.mac_lsb, mac);
992  }
993 
994  /* Set the ramrod data header */
995  /* TODO: take this to the higher level in order to prevent multiple
996  writing */
997  bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
998  rule_cnt);
999 }
1000 
1010 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1011  struct bnx2x_vlan_mac_obj *o,
1012  struct bnx2x_exeq_elem *elem,
1013  int rule_idx, int cam_offset)
1014 {
1015  struct bnx2x_raw_obj *raw = &o->raw;
1016  struct mac_configuration_cmd *config =
1017  (struct mac_configuration_cmd *)(raw->rdata);
1018  /*
1019  * 57710 and 57711 do not support MOVE command,
1020  * so it's either ADD or DEL
1021  */
1022  bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1023  true : false;
1024 
1025  /* Reset the ramrod data buffer */
1026  memset(config, 0, sizeof(*config));
1027 
1028  bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1029  cam_offset, add,
1030  elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1031  elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1032  ETH_VLAN_FILTER_CLASSIFY, config);
1033 }
1034 
1035 #define list_next_entry(pos, member) \
1036  list_entry((pos)->member.next, typeof(*(pos)), member)
1037 
1057 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1059  struct bnx2x_vlan_mac_registry_elem **ppos)
1060 {
1062  struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1063 
1064  /* If list is empty - there is nothing to do here */
1065  if (list_empty(&o->head)) {
1066  *ppos = NULL;
1067  return 0;
1068  }
1069 
1070  /* make a step... */
1071  if (*ppos == NULL)
1072  *ppos = list_first_entry(&o->head,
1074  link);
1075  else
1076  *ppos = list_next_entry(*ppos, link);
1077 
1078  pos = *ppos;
1079 
1080  /* If it's the last step - return NULL */
1081  if (list_is_last(&pos->link, &o->head))
1082  *ppos = NULL;
1083 
1084  /* Prepare a 'user_req' */
1085  memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1086 
1087  /* Set the command */
1088  p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1089 
1090  /* Set vlan_mac_flags */
1091  p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1092 
1093  /* Set a restore bit */
1095 
1096  return bnx2x_config_vlan_mac(bp, p);
1097 }
1098 
1099 /*
1100  * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1101  * pointer to an element with a specific criteria and NULL if such an element
1102  * hasn't been found.
1103  */
1104 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1105  struct bnx2x_exe_queue_obj *o,
1106  struct bnx2x_exeq_elem *elem)
1107 {
1108  struct bnx2x_exeq_elem *pos;
1109  struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1110 
1111  /* Check pending for execution commands */
1113  if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1114  sizeof(*data)) &&
1115  (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1116  return pos;
1117 
1118  return NULL;
1119 }
1120 
1121 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1123  struct bnx2x_exeq_elem *elem)
1124 {
1125  struct bnx2x_exeq_elem *pos;
1126  struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1127 
1128  /* Check pending for execution commands */
1129  list_for_each_entry(pos, &o->exe_queue, link)
1130  if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1131  sizeof(*data)) &&
1132  (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1133  return pos;
1134 
1135  return NULL;
1136 }
1137 
1138 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1140  struct bnx2x_exeq_elem *elem)
1141 {
1142  struct bnx2x_exeq_elem *pos;
1143  struct bnx2x_vlan_mac_ramrod_data *data =
1144  &elem->cmd_data.vlan_mac.u.vlan_mac;
1145 
1146  /* Check pending for execution commands */
1147  list_for_each_entry(pos, &o->exe_queue, link)
1148  if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1149  sizeof(*data)) &&
1150  (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1151  return pos;
1152 
1153  return NULL;
1154 }
1155 
1169 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1170  union bnx2x_qable_obj *qo,
1171  struct bnx2x_exeq_elem *elem)
1172 {
1173  struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1174  struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1175  int rc;
1176 
1177  /* Check the registry */
1178  rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1179  if (rc) {
1180  DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1181  return rc;
1182  }
1183 
1184  /*
1185  * Check if there is a pending ADD command for this
1186  * MAC/VLAN/VLAN-MAC. Return an error if there is.
1187  */
1188  if (exeq->get(exeq, elem)) {
1189  DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1190  return -EEXIST;
1191  }
1192 
1193  /*
1194  * TODO: Check the pending MOVE from other objects where this
1195  * object is a destination object.
1196  */
1197 
1198  /* Consume the credit if not requested not to */
1200  &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1201  o->get_credit(o)))
1202  return -EINVAL;
1203 
1204  return 0;
1205 }
1206 
1219 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1220  union bnx2x_qable_obj *qo,
1221  struct bnx2x_exeq_elem *elem)
1222 {
1223  struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1225  struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1226  struct bnx2x_exeq_elem query_elem;
1227 
1228  /* If this classification can not be deleted (doesn't exist)
1229  * - return a BNX2X_EXIST.
1230  */
1231  pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1232  if (!pos) {
1233  DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1234  return -EEXIST;
1235  }
1236 
1237  /*
1238  * Check if there are pending DEL or MOVE commands for this
1239  * MAC/VLAN/VLAN-MAC. Return an error if so.
1240  */
1241  memcpy(&query_elem, elem, sizeof(query_elem));
1242 
1243  /* Check for MOVE commands */
1244  query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1245  if (exeq->get(exeq, &query_elem)) {
1246  BNX2X_ERR("There is a pending MOVE command already\n");
1247  return -EINVAL;
1248  }
1249 
1250  /* Check for DEL commands */
1251  if (exeq->get(exeq, elem)) {
1252  DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1253  return -EEXIST;
1254  }
1255 
1256  /* Return the credit to the credit pool if not requested not to */
1258  &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1259  o->put_credit(o))) {
1260  BNX2X_ERR("Failed to return a credit\n");
1261  return -EINVAL;
1262  }
1263 
1264  return 0;
1265 }
1266 
1279 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1280  union bnx2x_qable_obj *qo,
1281  struct bnx2x_exeq_elem *elem)
1282 {
1283  struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1284  struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1285  struct bnx2x_exeq_elem query_elem;
1286  struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1287  struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1288 
1289  /*
1290  * Check if we can perform this operation based on the current registry
1291  * state.
1292  */
1293  if (!src_o->check_move(bp, src_o, dest_o,
1294  &elem->cmd_data.vlan_mac.u)) {
1295  DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1296  return -EINVAL;
1297  }
1298 
1299  /*
1300  * Check if there is an already pending DEL or MOVE command for the
1301  * source object or ADD command for a destination object. Return an
1302  * error if so.
1303  */
1304  memcpy(&query_elem, elem, sizeof(query_elem));
1305 
1306  /* Check DEL on source */
1307  query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1308  if (src_exeq->get(src_exeq, &query_elem)) {
1309  BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1310  return -EINVAL;
1311  }
1312 
1313  /* Check MOVE on source */
1314  if (src_exeq->get(src_exeq, elem)) {
1315  DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1316  return -EEXIST;
1317  }
1318 
1319  /* Check ADD on destination */
1320  query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1321  if (dest_exeq->get(dest_exeq, &query_elem)) {
1322  BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1323  return -EINVAL;
1324  }
1325 
1326  /* Consume the credit if not requested not to */
1328  &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1329  dest_o->get_credit(dest_o)))
1330  return -EINVAL;
1331 
1333  &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1334  src_o->put_credit(src_o))) {
1335  /* return the credit taken from dest... */
1336  dest_o->put_credit(dest_o);
1337  return -EINVAL;
1338  }
1339 
1340  return 0;
1341 }
1342 
1343 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1344  union bnx2x_qable_obj *qo,
1345  struct bnx2x_exeq_elem *elem)
1346 {
1347  switch (elem->cmd_data.vlan_mac.cmd) {
1348  case BNX2X_VLAN_MAC_ADD:
1349  return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1350  case BNX2X_VLAN_MAC_DEL:
1351  return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1352  case BNX2X_VLAN_MAC_MOVE:
1353  return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1354  default:
1355  return -EINVAL;
1356  }
1357 }
1358 
1359 static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1360  union bnx2x_qable_obj *qo,
1361  struct bnx2x_exeq_elem *elem)
1362 {
1363  int rc = 0;
1364 
1365  /* If consumption wasn't required, nothing to do */
1367  &elem->cmd_data.vlan_mac.vlan_mac_flags))
1368  return 0;
1369 
1370  switch (elem->cmd_data.vlan_mac.cmd) {
1371  case BNX2X_VLAN_MAC_ADD:
1372  case BNX2X_VLAN_MAC_MOVE:
1373  rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1374  break;
1375  case BNX2X_VLAN_MAC_DEL:
1376  rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1377  break;
1378  default:
1379  return -EINVAL;
1380  }
1381 
1382  if (rc != true)
1383  return -EINVAL;
1384 
1385  return 0;
1386 }
1387 
1395 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1396  struct bnx2x_vlan_mac_obj *o)
1397 {
1398  int cnt = 5000, rc;
1399  struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1400  struct bnx2x_raw_obj *raw = &o->raw;
1401 
1402  while (cnt--) {
1403  /* Wait for the current command to complete */
1404  rc = raw->wait_comp(bp, raw);
1405  if (rc)
1406  return rc;
1407 
1408  /* Wait until there are no pending commands */
1409  if (!bnx2x_exe_queue_empty(exeq))
1410  usleep_range(1000, 1000);
1411  else
1412  return 0;
1413  }
1414 
1415  return -EBUSY;
1416 }
1417 
1427 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1428  struct bnx2x_vlan_mac_obj *o,
1429  union event_ring_elem *cqe,
1430  unsigned long *ramrod_flags)
1431 {
1432  struct bnx2x_raw_obj *r = &o->raw;
1433  int rc;
1434 
1435  /* Reset pending list */
1436  bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1437 
1438  /* Clear pending */
1439  r->clear_pending(r);
1440 
1441  /* If ramrod failed this is most likely a SW bug */
1442  if (cqe->message.error)
1443  return -EINVAL;
1444 
1445  /* Run the next bulk of pending commands if requeted */
1446  if (test_bit(RAMROD_CONT, ramrod_flags)) {
1447  rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1448  if (rc < 0)
1449  return rc;
1450  }
1451 
1452  /* If there is more work to do return PENDING */
1453  if (!bnx2x_exe_queue_empty(&o->exe_queue))
1454  return 1;
1455 
1456  return 0;
1457 }
1458 
1466 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1467  union bnx2x_qable_obj *qo,
1468  struct bnx2x_exeq_elem *elem)
1469 {
1470  struct bnx2x_exeq_elem query, *pos;
1471  struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1472  struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1473 
1474  memcpy(&query, elem, sizeof(query));
1475 
1476  switch (elem->cmd_data.vlan_mac.cmd) {
1477  case BNX2X_VLAN_MAC_ADD:
1478  query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1479  break;
1480  case BNX2X_VLAN_MAC_DEL:
1481  query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1482  break;
1483  default:
1484  /* Don't handle anything other than ADD or DEL */
1485  return 0;
1486  }
1487 
1488  /* If we found the appropriate element - delete it */
1489  pos = exeq->get(exeq, &query);
1490  if (pos) {
1491 
1492  /* Return the credit of the optimized command */
1494  &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1495  if ((query.cmd_data.vlan_mac.cmd ==
1496  BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1497  BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1498  return -EINVAL;
1499  } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1500  BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1501  return -EINVAL;
1502  }
1503  }
1504 
1505  DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1506  (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1507  "ADD" : "DEL");
1508 
1509  list_del(&pos->link);
1510  bnx2x_exe_queue_free_elem(bp, pos);
1511  return 1;
1512  }
1513 
1514  return 0;
1515 }
1516 
1528 static inline int bnx2x_vlan_mac_get_registry_elem(
1529  struct bnx2x *bp,
1530  struct bnx2x_vlan_mac_obj *o,
1531  struct bnx2x_exeq_elem *elem,
1532  bool restore,
1533  struct bnx2x_vlan_mac_registry_elem **re)
1534 {
1535  int cmd = elem->cmd_data.vlan_mac.cmd;
1536  struct bnx2x_vlan_mac_registry_elem *reg_elem;
1537 
1538  /* Allocate a new registry element if needed. */
1539  if (!restore &&
1540  ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1541  reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1542  if (!reg_elem)
1543  return -ENOMEM;
1544 
1545  /* Get a new CAM offset */
1546  if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1547  /*
1548  * This shell never happen, because we have checked the
1549  * CAM availiability in the 'validate'.
1550  */
1551  WARN_ON(1);
1552  kfree(reg_elem);
1553  return -EINVAL;
1554  }
1555 
1556  DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1557 
1558  /* Set a VLAN-MAC data */
1559  memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1560  sizeof(reg_elem->u));
1561 
1562  /* Copy the flags (needed for DEL and RESTORE flows) */
1563  reg_elem->vlan_mac_flags =
1564  elem->cmd_data.vlan_mac.vlan_mac_flags;
1565  } else /* DEL, RESTORE */
1566  reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1567 
1568  *re = reg_elem;
1569  return 0;
1570 }
1571 
1582 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1583  union bnx2x_qable_obj *qo,
1584  struct list_head *exe_chunk,
1585  unsigned long *ramrod_flags)
1586 {
1587  struct bnx2x_exeq_elem *elem;
1588  struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1589  struct bnx2x_raw_obj *r = &o->raw;
1590  int rc, idx = 0;
1591  bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1592  bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1593  struct bnx2x_vlan_mac_registry_elem *reg_elem;
1594  int cmd;
1595 
1596  /*
1597  * If DRIVER_ONLY execution is requested, cleanup a registry
1598  * and exit. Otherwise send a ramrod to FW.
1599  */
1600  if (!drv_only) {
1601  WARN_ON(r->check_pending(r));
1602 
1603  /* Set pending */
1604  r->set_pending(r);
1605 
1606  /* Fill tha ramrod data */
1607  list_for_each_entry(elem, exe_chunk, link) {
1608  cmd = elem->cmd_data.vlan_mac.cmd;
1609  /*
1610  * We will add to the target object in MOVE command, so
1611  * change the object for a CAM search.
1612  */
1613  if (cmd == BNX2X_VLAN_MAC_MOVE)
1614  cam_obj = elem->cmd_data.vlan_mac.target_obj;
1615  else
1616  cam_obj = o;
1617 
1618  rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1619  elem, restore,
1620  &reg_elem);
1621  if (rc)
1622  goto error_exit;
1623 
1624  WARN_ON(!reg_elem);
1625 
1626  /* Push a new entry into the registry */
1627  if (!restore &&
1628  ((cmd == BNX2X_VLAN_MAC_ADD) ||
1629  (cmd == BNX2X_VLAN_MAC_MOVE)))
1630  list_add(&reg_elem->link, &cam_obj->head);
1631 
1632  /* Configure a single command in a ramrod data buffer */
1633  o->set_one_rule(bp, o, elem, idx,
1634  reg_elem->cam_offset);
1635 
1636  /* MOVE command consumes 2 entries in the ramrod data */
1637  if (cmd == BNX2X_VLAN_MAC_MOVE)
1638  idx += 2;
1639  else
1640  idx++;
1641  }
1642 
1643  /*
1644  * No need for an explicit memory barrier here as long we would
1645  * need to ensure the ordering of writing to the SPQ element
1646  * and updating of the SPQ producer which involves a memory
1647  * read and we will have to put a full memory barrier there
1648  * (inside bnx2x_sp_post()).
1649  */
1650 
1651  rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1652  U64_HI(r->rdata_mapping),
1653  U64_LO(r->rdata_mapping),
1655  if (rc)
1656  goto error_exit;
1657  }
1658 
1659  /* Now, when we are done with the ramrod - clean up the registry */
1660  list_for_each_entry(elem, exe_chunk, link) {
1661  cmd = elem->cmd_data.vlan_mac.cmd;
1662  if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1663  (cmd == BNX2X_VLAN_MAC_MOVE)) {
1664  reg_elem = o->check_del(bp, o,
1665  &elem->cmd_data.vlan_mac.u);
1666 
1667  WARN_ON(!reg_elem);
1668 
1669  o->put_cam_offset(o, reg_elem->cam_offset);
1670  list_del(&reg_elem->link);
1671  kfree(reg_elem);
1672  }
1673  }
1674 
1675  if (!drv_only)
1676  return 1;
1677  else
1678  return 0;
1679 
1680 error_exit:
1681  r->clear_pending(r);
1682 
1683  /* Cleanup a registry in case of a failure */
1684  list_for_each_entry(elem, exe_chunk, link) {
1685  cmd = elem->cmd_data.vlan_mac.cmd;
1686 
1687  if (cmd == BNX2X_VLAN_MAC_MOVE)
1688  cam_obj = elem->cmd_data.vlan_mac.target_obj;
1689  else
1690  cam_obj = o;
1691 
1692  /* Delete all newly added above entries */
1693  if (!restore &&
1694  ((cmd == BNX2X_VLAN_MAC_ADD) ||
1695  (cmd == BNX2X_VLAN_MAC_MOVE))) {
1696  reg_elem = o->check_del(bp, cam_obj,
1697  &elem->cmd_data.vlan_mac.u);
1698  if (reg_elem) {
1699  list_del(&reg_elem->link);
1700  kfree(reg_elem);
1701  }
1702  }
1703  }
1704 
1705  return rc;
1706 }
1707 
1708 static inline int bnx2x_vlan_mac_push_new_cmd(
1709  struct bnx2x *bp,
1711 {
1712  struct bnx2x_exeq_elem *elem;
1713  struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1714  bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1715 
1716  /* Allocate the execution queue element */
1717  elem = bnx2x_exe_queue_alloc_elem(bp);
1718  if (!elem)
1719  return -ENOMEM;
1720 
1721  /* Set the command 'length' */
1722  switch (p->user_req.cmd) {
1723  case BNX2X_VLAN_MAC_MOVE:
1724  elem->cmd_len = 2;
1725  break;
1726  default:
1727  elem->cmd_len = 1;
1728  }
1729 
1730  /* Fill the object specific info */
1731  memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1732 
1733  /* Try to add a new command to the pending list */
1734  return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1735 }
1736 
1745  struct bnx2x *bp,
1746  struct bnx2x_vlan_mac_ramrod_params *p)
1747 {
1748  int rc = 0;
1749  struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1750  unsigned long *ramrod_flags = &p->ramrod_flags;
1751  bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1752  struct bnx2x_raw_obj *raw = &o->raw;
1753 
1754  /*
1755  * Add new elements to the execution list for commands that require it.
1756  */
1757  if (!cont) {
1758  rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1759  if (rc)
1760  return rc;
1761  }
1762 
1763  /*
1764  * If nothing will be executed further in this iteration we want to
1765  * return PENDING if there are pending commands
1766  */
1767  if (!bnx2x_exe_queue_empty(&o->exe_queue))
1768  rc = 1;
1769 
1770  if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1771  DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1772  raw->clear_pending(raw);
1773  }
1774 
1775  /* Execute commands if required */
1776  if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1777  test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1778  rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1779  if (rc < 0)
1780  return rc;
1781  }
1782 
1783  /*
1784  * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1785  * then user want to wait until the last command is done.
1786  */
1788  /*
1789  * Wait maximum for the current exe_queue length iterations plus
1790  * one (for the current pending command).
1791  */
1792  int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1793 
1794  while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1795  max_iterations--) {
1796 
1797  /* Wait for the current command to complete */
1798  rc = raw->wait_comp(bp, raw);
1799  if (rc)
1800  return rc;
1801 
1802  /* Make a next step */
1803  rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1804  ramrod_flags);
1805  if (rc < 0)
1806  return rc;
1807  }
1808 
1809  return 0;
1810  }
1811 
1812  return rc;
1813 }
1814 
1815 
1816 
1830 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1831  struct bnx2x_vlan_mac_obj *o,
1832  unsigned long *vlan_mac_flags,
1833  unsigned long *ramrod_flags)
1834 {
1835  struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1836  int rc = 0;
1838  struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1839  struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1840 
1841  /* Clear pending commands first */
1842 
1843  spin_lock_bh(&exeq->lock);
1844 
1845  list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1846  if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1847  *vlan_mac_flags) {
1848  rc = exeq->remove(bp, exeq->owner, exeq_pos);
1849  if (rc) {
1850  BNX2X_ERR("Failed to remove command\n");
1851  spin_unlock_bh(&exeq->lock);
1852  return rc;
1853  }
1854  list_del(&exeq_pos->link);
1855  }
1856  }
1857 
1858  spin_unlock_bh(&exeq->lock);
1859 
1860  /* Prepare a command request */
1861  memset(&p, 0, sizeof(p));
1862  p.vlan_mac_obj = o;
1863  p.ramrod_flags = *ramrod_flags;
1864  p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1865 
1866  /*
1867  * Add all but the last VLAN-MAC to the execution queue without actually
1868  * execution anything.
1869  */
1873 
1874  list_for_each_entry(pos, &o->head, link) {
1875  if (pos->vlan_mac_flags == *vlan_mac_flags) {
1876  p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1877  memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1878  rc = bnx2x_config_vlan_mac(bp, &p);
1879  if (rc < 0) {
1880  BNX2X_ERR("Failed to add a new DEL command\n");
1881  return rc;
1882  }
1883  }
1884  }
1885 
1886  p.ramrod_flags = *ramrod_flags;
1888 
1889  return bnx2x_config_vlan_mac(bp, &p);
1890 }
1891 
1892 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1893  u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1894  unsigned long *pstate, bnx2x_obj_type type)
1895 {
1896  raw->func_id = func_id;
1897  raw->cid = cid;
1898  raw->cl_id = cl_id;
1899  raw->rdata = rdata;
1900  raw->rdata_mapping = rdata_mapping;
1901  raw->state = state;
1902  raw->pstate = pstate;
1903  raw->obj_type = type;
1904  raw->check_pending = bnx2x_raw_check_pending;
1905  raw->clear_pending = bnx2x_raw_clear_pending;
1906  raw->set_pending = bnx2x_raw_set_pending;
1907  raw->wait_comp = bnx2x_raw_wait;
1908 }
1909 
1910 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1911  u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1912  int state, unsigned long *pstate, bnx2x_obj_type type,
1913  struct bnx2x_credit_pool_obj *macs_pool,
1914  struct bnx2x_credit_pool_obj *vlans_pool)
1915 {
1916  INIT_LIST_HEAD(&o->head);
1917 
1918  o->macs_pool = macs_pool;
1919  o->vlans_pool = vlans_pool;
1920 
1921  o->delete_all = bnx2x_vlan_mac_del_all;
1922  o->restore = bnx2x_vlan_mac_restore;
1923  o->complete = bnx2x_complete_vlan_mac;
1924  o->wait = bnx2x_wait_vlan_mac;
1925 
1926  bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1927  state, pstate, type);
1928 }
1929 
1930 
1931 void bnx2x_init_mac_obj(struct bnx2x *bp,
1932  struct bnx2x_vlan_mac_obj *mac_obj,
1933  u8 cl_id, u32 cid, u8 func_id, void *rdata,
1934  dma_addr_t rdata_mapping, int state,
1935  unsigned long *pstate, bnx2x_obj_type type,
1936  struct bnx2x_credit_pool_obj *macs_pool)
1937 {
1938  union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1939 
1940  bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1941  rdata_mapping, state, pstate, type,
1942  macs_pool, NULL);
1943 
1944  /* CAM credit pool handling */
1945  mac_obj->get_credit = bnx2x_get_credit_mac;
1946  mac_obj->put_credit = bnx2x_put_credit_mac;
1947  mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1948  mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1949 
1950  if (CHIP_IS_E1x(bp)) {
1951  mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1952  mac_obj->check_del = bnx2x_check_mac_del;
1953  mac_obj->check_add = bnx2x_check_mac_add;
1954  mac_obj->check_move = bnx2x_check_move_always_err;
1956 
1957  /* Exe Queue */
1958  bnx2x_exe_queue_init(bp,
1959  &mac_obj->exe_queue, 1, qable_obj,
1960  bnx2x_validate_vlan_mac,
1961  bnx2x_remove_vlan_mac,
1962  bnx2x_optimize_vlan_mac,
1963  bnx2x_execute_vlan_mac,
1964  bnx2x_exeq_get_mac);
1965  } else {
1966  mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1967  mac_obj->check_del = bnx2x_check_mac_del;
1968  mac_obj->check_add = bnx2x_check_mac_add;
1969  mac_obj->check_move = bnx2x_check_move;
1970  mac_obj->ramrod_cmd =
1972  mac_obj->get_n_elements = bnx2x_get_n_elements;
1973 
1974  /* Exe Queue */
1975  bnx2x_exe_queue_init(bp,
1976  &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1977  qable_obj, bnx2x_validate_vlan_mac,
1978  bnx2x_remove_vlan_mac,
1979  bnx2x_optimize_vlan_mac,
1980  bnx2x_execute_vlan_mac,
1981  bnx2x_exeq_get_mac);
1982  }
1983 }
1984 
1985 void bnx2x_init_vlan_obj(struct bnx2x *bp,
1986  struct bnx2x_vlan_mac_obj *vlan_obj,
1987  u8 cl_id, u32 cid, u8 func_id, void *rdata,
1988  dma_addr_t rdata_mapping, int state,
1989  unsigned long *pstate, bnx2x_obj_type type,
1990  struct bnx2x_credit_pool_obj *vlans_pool)
1991 {
1992  union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1993 
1994  bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1995  rdata_mapping, state, pstate, type, NULL,
1996  vlans_pool);
1997 
1998  vlan_obj->get_credit = bnx2x_get_credit_vlan;
1999  vlan_obj->put_credit = bnx2x_put_credit_vlan;
2000  vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2001  vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2002 
2003  if (CHIP_IS_E1x(bp)) {
2004  BNX2X_ERR("Do not support chips others than E2 and newer\n");
2005  BUG();
2006  } else {
2007  vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
2008  vlan_obj->check_del = bnx2x_check_vlan_del;
2009  vlan_obj->check_add = bnx2x_check_vlan_add;
2010  vlan_obj->check_move = bnx2x_check_move;
2011  vlan_obj->ramrod_cmd =
2013 
2014  /* Exe Queue */
2015  bnx2x_exe_queue_init(bp,
2016  &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2017  qable_obj, bnx2x_validate_vlan_mac,
2018  bnx2x_remove_vlan_mac,
2019  bnx2x_optimize_vlan_mac,
2020  bnx2x_execute_vlan_mac,
2021  bnx2x_exeq_get_vlan);
2022  }
2023 }
2024 
2026  struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2027  u8 cl_id, u32 cid, u8 func_id, void *rdata,
2028  dma_addr_t rdata_mapping, int state,
2029  unsigned long *pstate, bnx2x_obj_type type,
2030  struct bnx2x_credit_pool_obj *macs_pool,
2031  struct bnx2x_credit_pool_obj *vlans_pool)
2032 {
2033  union bnx2x_qable_obj *qable_obj =
2034  (union bnx2x_qable_obj *)vlan_mac_obj;
2035 
2036  bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2037  rdata_mapping, state, pstate, type,
2038  macs_pool, vlans_pool);
2039 
2040  /* CAM pool handling */
2041  vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2042  vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2043  /*
2044  * CAM offset is relevant for 57710 and 57711 chips only which have a
2045  * single CAM for both MACs and VLAN-MAC pairs. So the offset
2046  * will be taken from MACs' pool object only.
2047  */
2048  vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2049  vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2050 
2051  if (CHIP_IS_E1(bp)) {
2052  BNX2X_ERR("Do not support chips others than E2\n");
2053  BUG();
2054  } else if (CHIP_IS_E1H(bp)) {
2055  vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
2056  vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2057  vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2058  vlan_mac_obj->check_move = bnx2x_check_move_always_err;
2059  vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2060 
2061  /* Exe Queue */
2062  bnx2x_exe_queue_init(bp,
2063  &vlan_mac_obj->exe_queue, 1, qable_obj,
2064  bnx2x_validate_vlan_mac,
2065  bnx2x_remove_vlan_mac,
2066  bnx2x_optimize_vlan_mac,
2067  bnx2x_execute_vlan_mac,
2068  bnx2x_exeq_get_vlan_mac);
2069  } else {
2070  vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
2071  vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2072  vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2073  vlan_mac_obj->check_move = bnx2x_check_move;
2074  vlan_mac_obj->ramrod_cmd =
2076 
2077  /* Exe Queue */
2078  bnx2x_exe_queue_init(bp,
2079  &vlan_mac_obj->exe_queue,
2081  qable_obj, bnx2x_validate_vlan_mac,
2082  bnx2x_remove_vlan_mac,
2083  bnx2x_optimize_vlan_mac,
2084  bnx2x_execute_vlan_mac,
2085  bnx2x_exeq_get_vlan_mac);
2086  }
2087 
2088 }
2089 
2090 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2091 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2092  struct tstorm_eth_mac_filter_config *mac_filters,
2093  u16 pf_id)
2094 {
2095  size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2096 
2099 
2100  __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2101 }
2102 
2103 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2104  struct bnx2x_rx_mode_ramrod_params *p)
2105 {
2106  /* update the bp MAC filter structure */
2107  u32 mask = (1 << p->cl_id);
2108 
2109  struct tstorm_eth_mac_filter_config *mac_filters =
2110  (struct tstorm_eth_mac_filter_config *)p->rdata;
2111 
2112  /* initial seeting is drop-all */
2113  u8 drop_all_ucast = 1, drop_all_mcast = 1;
2114  u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2115  u8 unmatched_unicast = 0;
2116 
2117  /* In e1x there we only take into account rx acceot flag since tx switching
2118  * isn't enabled. */
2120  /* accept matched ucast */
2121  drop_all_ucast = 0;
2122 
2124  /* accept matched mcast */
2125  drop_all_mcast = 0;
2126 
2128  /* accept all mcast */
2129  drop_all_ucast = 0;
2130  accp_all_ucast = 1;
2131  }
2133  /* accept all mcast */
2134  drop_all_mcast = 0;
2135  accp_all_mcast = 1;
2136  }
2138  /* accept (all) bcast */
2139  accp_all_bcast = 1;
2141  /* accept unmatched unicasts */
2142  unmatched_unicast = 1;
2143 
2144  mac_filters->ucast_drop_all = drop_all_ucast ?
2145  mac_filters->ucast_drop_all | mask :
2146  mac_filters->ucast_drop_all & ~mask;
2147 
2148  mac_filters->mcast_drop_all = drop_all_mcast ?
2149  mac_filters->mcast_drop_all | mask :
2150  mac_filters->mcast_drop_all & ~mask;
2151 
2152  mac_filters->ucast_accept_all = accp_all_ucast ?
2153  mac_filters->ucast_accept_all | mask :
2154  mac_filters->ucast_accept_all & ~mask;
2155 
2156  mac_filters->mcast_accept_all = accp_all_mcast ?
2157  mac_filters->mcast_accept_all | mask :
2158  mac_filters->mcast_accept_all & ~mask;
2159 
2160  mac_filters->bcast_accept_all = accp_all_bcast ?
2161  mac_filters->bcast_accept_all | mask :
2162  mac_filters->bcast_accept_all & ~mask;
2163 
2164  mac_filters->unmatched_unicast = unmatched_unicast ?
2165  mac_filters->unmatched_unicast | mask :
2166  mac_filters->unmatched_unicast & ~mask;
2167 
2168  DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2169  "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2170  mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2171  mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2172  mac_filters->bcast_accept_all);
2173 
2174  /* write the MAC filter structure*/
2175  __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2176 
2177  /* The operation is completed */
2178  clear_bit(p->state, p->pstate);
2180 
2181  return 0;
2182 }
2183 
2184 /* Setup ramrod data */
2185 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2186  struct eth_classify_header *hdr,
2187  u8 rule_cnt)
2188 {
2189  hdr->echo = cid;
2190  hdr->rule_cnt = rule_cnt;
2191 }
2192 
2193 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2194  unsigned long accept_flags,
2195  struct eth_filter_rules_cmd *cmd,
2196  bool clear_accept_all)
2197 {
2198  u16 state;
2199 
2200  /* start with 'drop-all' */
2203 
2204  if (accept_flags) {
2205  if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
2207 
2208  if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
2210 
2211  if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
2214  }
2215 
2216  if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
2219  }
2220  if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
2222 
2223  if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
2226  }
2227  if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
2229  }
2230 
2231  /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2232  if (clear_accept_all) {
2237  }
2238 
2239  cmd->state = cpu_to_le16(state);
2240 
2241 }
2242 
2243 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2244  struct bnx2x_rx_mode_ramrod_params *p)
2245 {
2246  struct eth_filter_rules_ramrod_data *data = p->rdata;
2247  int rc;
2248  u8 rule_idx = 0;
2249 
2250  /* Reset the ramrod data buffer */
2251  memset(data, 0, sizeof(*data));
2252 
2253  /* Setup ramrod data */
2254 
2255  /* Tx (internal switching) */
2256  if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2257  data->rules[rule_idx].client_id = p->cl_id;
2258  data->rules[rule_idx].func_id = p->func_id;
2259 
2260  data->rules[rule_idx].cmd_general_data =
2262 
2263  bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2264  &(data->rules[rule_idx++]), false);
2265  }
2266 
2267  /* Rx */
2268  if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2269  data->rules[rule_idx].client_id = p->cl_id;
2270  data->rules[rule_idx].func_id = p->func_id;
2271 
2272  data->rules[rule_idx].cmd_general_data =
2274 
2275  bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2276  &(data->rules[rule_idx++]), false);
2277  }
2278 
2279 
2280  /*
2281  * If FCoE Queue configuration has been requested configure the Rx and
2282  * internal switching modes for this queue in separate rules.
2283  *
2284  * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2285  * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2286  */
2288  /* Tx (internal switching) */
2289  if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2290  data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2291  data->rules[rule_idx].func_id = p->func_id;
2292 
2293  data->rules[rule_idx].cmd_general_data =
2295 
2296  bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2297  &(data->rules[rule_idx++]),
2298  true);
2299  }
2300 
2301  /* Rx */
2302  if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2303  data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2304  data->rules[rule_idx].func_id = p->func_id;
2305 
2306  data->rules[rule_idx].cmd_general_data =
2308 
2309  bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2310  &(data->rules[rule_idx++]),
2311  true);
2312  }
2313  }
2314 
2315  /*
2316  * Set the ramrod header (most importantly - number of rules to
2317  * configure).
2318  */
2319  bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2320 
2321  DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2322  data->header.rule_cnt, p->rx_accept_flags,
2323  p->tx_accept_flags);
2324 
2325  /*
2326  * No need for an explicit memory barrier here as long we would
2327  * need to ensure the ordering of writing to the SPQ element
2328  * and updating of the SPQ producer which involves a memory
2329  * read and we will have to put a full memory barrier there
2330  * (inside bnx2x_sp_post()).
2331  */
2332 
2333  /* Send a ramrod */
2335  U64_HI(p->rdata_mapping),
2336  U64_LO(p->rdata_mapping),
2338  if (rc)
2339  return rc;
2340 
2341  /* Ramrod completion is pending */
2342  return 1;
2343 }
2344 
2345 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2346  struct bnx2x_rx_mode_ramrod_params *p)
2347 {
2348  return bnx2x_state_wait(bp, p->state, p->pstate);
2349 }
2350 
2351 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2352  struct bnx2x_rx_mode_ramrod_params *p)
2353 {
2354  /* Do nothing */
2355  return 0;
2356 }
2357 
2359  struct bnx2x_rx_mode_ramrod_params *p)
2360 {
2361  int rc;
2362 
2363  /* Configure the new classification in the chip */
2364  rc = p->rx_mode_obj->config_rx_mode(bp, p);
2365  if (rc < 0)
2366  return rc;
2367 
2368  /* Wait for a ramrod completion if was requested */
2370  rc = p->rx_mode_obj->wait_comp(bp, p);
2371  if (rc)
2372  return rc;
2373  }
2374 
2375  return rc;
2376 }
2377 
2379  struct bnx2x_rx_mode_obj *o)
2380 {
2381  if (CHIP_IS_E1x(bp)) {
2382  o->wait_comp = bnx2x_empty_rx_mode_wait;
2383  o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2384  } else {
2385  o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2386  o->config_rx_mode = bnx2x_set_rx_mode_e2;
2387  }
2388 }
2389 
2390 /********************* Multicast verbs: SET, CLEAR ****************************/
2391 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2392 {
2393  return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2394 }
2395 
2397  struct list_head link;
2398  u8 mac[ETH_ALEN];
2399  u8 pad[2]; /* For a natural alignment of the following buffer */
2400 };
2401 
2403  struct list_head link;
2404  int type; /* BNX2X_MCAST_CMD_X */
2405  union {
2407  u32 macs_num; /* Needed for DEL command */
2408  int next_bin; /* Needed for RESTORE flow with aprox match */
2409  } data;
2410 
2411  bool done; /* set to true, when the command has been handled,
2412  * practically used in 57712 handling only, where one pending
2413  * command may be handled in a few operations. As long as for
2414  * other chips every operation handling is completed in a
2415  * single ramrod, there is no need to utilize this field.
2416  */
2417 };
2418 
2419 static int bnx2x_mcast_wait(struct bnx2x *bp,
2420  struct bnx2x_mcast_obj *o)
2421 {
2422  if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2423  o->raw.wait_comp(bp, &o->raw))
2424  return -EBUSY;
2425 
2426  return 0;
2427 }
2428 
2429 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2430  struct bnx2x_mcast_obj *o,
2431  struct bnx2x_mcast_ramrod_params *p,
2432  int cmd)
2433 {
2434  int total_sz;
2435  struct bnx2x_pending_mcast_cmd *new_cmd;
2436  struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2437  struct bnx2x_mcast_list_elem *pos;
2438  int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2439  p->mcast_list_len : 0);
2440 
2441  /* If the command is empty ("handle pending commands only"), break */
2442  if (!p->mcast_list_len)
2443  return 0;
2444 
2445  total_sz = sizeof(*new_cmd) +
2446  macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2447 
2448  /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2449  new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2450 
2451  if (!new_cmd)
2452  return -ENOMEM;
2453 
2454  DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2455  cmd, macs_list_len);
2456 
2457  INIT_LIST_HEAD(&new_cmd->data.macs_head);
2458 
2459  new_cmd->type = cmd;
2460  new_cmd->done = false;
2461 
2462  switch (cmd) {
2463  case BNX2X_MCAST_CMD_ADD:
2464  cur_mac = (struct bnx2x_mcast_mac_elem *)
2465  ((u8 *)new_cmd + sizeof(*new_cmd));
2466 
2467  /* Push the MACs of the current command into the pendig command
2468  * MACs list: FIFO
2469  */
2470  list_for_each_entry(pos, &p->mcast_list, link) {
2471  memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2472  list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2473  cur_mac++;
2474  }
2475 
2476  break;
2477 
2478  case BNX2X_MCAST_CMD_DEL:
2479  new_cmd->data.macs_num = p->mcast_list_len;
2480  break;
2481 
2483  new_cmd->data.next_bin = 0;
2484  break;
2485 
2486  default:
2487  kfree(new_cmd);
2488  BNX2X_ERR("Unknown command: %d\n", cmd);
2489  return -EINVAL;
2490  }
2491 
2492  /* Push the new pending command to the tail of the pending list: FIFO */
2493  list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2494 
2495  o->set_sched(o);
2496 
2497  return 1;
2498 }
2499 
2508 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2509 {
2510  int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2511 
2512  for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2513  if (o->registry.aprox_match.vec[i])
2514  for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2515  int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2517  vec, cur_bit)) {
2518  return cur_bit;
2519  }
2520  }
2521  inner_start = 0;
2522  }
2523 
2524  /* None found */
2525  return -1;
2526 }
2527 
2535 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2536 {
2537  int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2538 
2539  if (cur_bit >= 0)
2540  BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2541 
2542  return cur_bit;
2543 }
2544 
2545 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2546 {
2547  struct bnx2x_raw_obj *raw = &o->raw;
2548  u8 rx_tx_flag = 0;
2549 
2550  if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2551  (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2552  rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2553 
2554  if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2555  (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2556  rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2557 
2558  return rx_tx_flag;
2559 }
2560 
2561 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2562  struct bnx2x_mcast_obj *o, int idx,
2563  union bnx2x_mcast_config_data *cfg_data,
2564  int cmd)
2565 {
2566  struct bnx2x_raw_obj *r = &o->raw;
2567  struct eth_multicast_rules_ramrod_data *data =
2568  (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2569  u8 func_id = r->func_id;
2570  u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2571  int bin;
2572 
2573  if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2574  rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2575 
2576  data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2577 
2578  /* Get a bin and update a bins' vector */
2579  switch (cmd) {
2580  case BNX2X_MCAST_CMD_ADD:
2581  bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2582  BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2583  break;
2584 
2585  case BNX2X_MCAST_CMD_DEL:
2586  /* If there were no more bins to clear
2587  * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2588  * clear any (0xff) bin.
2589  * See bnx2x_mcast_validate_e2() for explanation when it may
2590  * happen.
2591  */
2592  bin = bnx2x_mcast_clear_first_bin(o);
2593  break;
2594 
2596  bin = cfg_data->bin;
2597  break;
2598 
2599  default:
2600  BNX2X_ERR("Unknown command: %d\n", cmd);
2601  return;
2602  }
2603 
2604  DP(BNX2X_MSG_SP, "%s bin %d\n",
2605  ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2606  "Setting" : "Clearing"), bin);
2607 
2608  data->rules[idx].bin_id = (u8)bin;
2609  data->rules[idx].func_id = func_id;
2610  data->rules[idx].engine_id = o->engine_id;
2611 }
2612 
2623 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2624  struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2625  int *rdata_idx)
2626 {
2627  int cur_bin, cnt = *rdata_idx;
2628  union bnx2x_mcast_config_data cfg_data = {0};
2629 
2630  /* go through the registry and configure the bins from it */
2631  for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2632  cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2633 
2634  cfg_data.bin = (u8)cur_bin;
2635  o->set_one_rule(bp, o, cnt, &cfg_data,
2637 
2638  cnt++;
2639 
2640  DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2641 
2642  /* Break if we reached the maximum number
2643  * of rules.
2644  */
2645  if (cnt >= o->max_cmd_len)
2646  break;
2647  }
2648 
2649  *rdata_idx = cnt;
2650 
2651  return cur_bin;
2652 }
2653 
2654 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2655  struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2656  int *line_idx)
2657 {
2658  struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2659  int cnt = *line_idx;
2660  union bnx2x_mcast_config_data cfg_data = {0};
2661 
2662  list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2663  link) {
2664 
2665  cfg_data.mac = &pmac_pos->mac[0];
2666  o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2667 
2668  cnt++;
2669 
2670  DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2671  pmac_pos->mac);
2672 
2673  list_del(&pmac_pos->link);
2674 
2675  /* Break if we reached the maximum number
2676  * of rules.
2677  */
2678  if (cnt >= o->max_cmd_len)
2679  break;
2680  }
2681 
2682  *line_idx = cnt;
2683 
2684  /* if no more MACs to configure - we are done */
2685  if (list_empty(&cmd_pos->data.macs_head))
2686  cmd_pos->done = true;
2687 }
2688 
2689 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2690  struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2691  int *line_idx)
2692 {
2693  int cnt = *line_idx;
2694 
2695  while (cmd_pos->data.macs_num) {
2696  o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2697 
2698  cnt++;
2699 
2700  cmd_pos->data.macs_num--;
2701 
2702  DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2703  cmd_pos->data.macs_num, cnt);
2704 
2705  /* Break if we reached the maximum
2706  * number of rules.
2707  */
2708  if (cnt >= o->max_cmd_len)
2709  break;
2710  }
2711 
2712  *line_idx = cnt;
2713 
2714  /* If we cleared all bins - we are done */
2715  if (!cmd_pos->data.macs_num)
2716  cmd_pos->done = true;
2717 }
2718 
2719 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2720  struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2721  int *line_idx)
2722 {
2723  cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2724  line_idx);
2725 
2726  if (cmd_pos->data.next_bin < 0)
2727  /* If o->set_restore returned -1 we are done */
2728  cmd_pos->done = true;
2729  else
2730  /* Start from the next bin next time */
2731  cmd_pos->data.next_bin++;
2732 }
2733 
2734 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2735  struct bnx2x_mcast_ramrod_params *p)
2736 {
2737  struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2738  int cnt = 0;
2739  struct bnx2x_mcast_obj *o = p->mcast_obj;
2740 
2741  list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2742  link) {
2743  switch (cmd_pos->type) {
2744  case BNX2X_MCAST_CMD_ADD:
2745  bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2746  break;
2747 
2748  case BNX2X_MCAST_CMD_DEL:
2749  bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2750  break;
2751 
2753  bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2754  &cnt);
2755  break;
2756 
2757  default:
2758  BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2759  return -EINVAL;
2760  }
2761 
2762  /* If the command has been completed - remove it from the list
2763  * and free the memory
2764  */
2765  if (cmd_pos->done) {
2766  list_del(&cmd_pos->link);
2767  kfree(cmd_pos);
2768  }
2769 
2770  /* Break if we reached the maximum number of rules */
2771  if (cnt >= o->max_cmd_len)
2772  break;
2773  }
2774 
2775  return cnt;
2776 }
2777 
2778 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2779  struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2780  int *line_idx)
2781 {
2782  struct bnx2x_mcast_list_elem *mlist_pos;
2783  union bnx2x_mcast_config_data cfg_data = {0};
2784  int cnt = *line_idx;
2785 
2786  list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2787  cfg_data.mac = mlist_pos->mac;
2788  o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2789 
2790  cnt++;
2791 
2792  DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2793  mlist_pos->mac);
2794  }
2795 
2796  *line_idx = cnt;
2797 }
2798 
2799 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2800  struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2801  int *line_idx)
2802 {
2803  int cnt = *line_idx, i;
2804 
2805  for (i = 0; i < p->mcast_list_len; i++) {
2806  o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2807 
2808  cnt++;
2809 
2810  DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2811  p->mcast_list_len - i - 1);
2812  }
2813 
2814  *line_idx = cnt;
2815 }
2816 
2829 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2830  struct bnx2x_mcast_ramrod_params *p, int cmd,
2831  int start_cnt)
2832 {
2833  struct bnx2x_mcast_obj *o = p->mcast_obj;
2834  int cnt = start_cnt;
2835 
2836  DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2837 
2838  switch (cmd) {
2839  case BNX2X_MCAST_CMD_ADD:
2840  bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2841  break;
2842 
2843  case BNX2X_MCAST_CMD_DEL:
2844  bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2845  break;
2846 
2848  o->hdl_restore(bp, o, 0, &cnt);
2849  break;
2850 
2851  default:
2852  BNX2X_ERR("Unknown command: %d\n", cmd);
2853  return -EINVAL;
2854  }
2855 
2856  /* The current command has been handled */
2857  p->mcast_list_len = 0;
2858 
2859  return cnt;
2860 }
2861 
2862 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2863  struct bnx2x_mcast_ramrod_params *p,
2864  int cmd)
2865 {
2866  struct bnx2x_mcast_obj *o = p->mcast_obj;
2867  int reg_sz = o->get_registry_size(o);
2868 
2869  switch (cmd) {
2870  /* DEL command deletes all currently configured MACs */
2871  case BNX2X_MCAST_CMD_DEL:
2872  o->set_registry_size(o, 0);
2873  /* Don't break */
2874 
2875  /* RESTORE command will restore the entire multicast configuration */
2877  /* Here we set the approximate amount of work to do, which in
2878  * fact may be only less as some MACs in postponed ADD
2879  * command(s) scheduled before this command may fall into
2880  * the same bin and the actual number of bins set in the
2881  * registry would be less than we estimated here. See
2882  * bnx2x_mcast_set_one_rule_e2() for further details.
2883  */
2884  p->mcast_list_len = reg_sz;
2885  break;
2886 
2887  case BNX2X_MCAST_CMD_ADD:
2888  case BNX2X_MCAST_CMD_CONT:
2889  /* Here we assume that all new MACs will fall into new bins.
2890  * However we will correct the real registry size after we
2891  * handle all pending commands.
2892  */
2893  o->set_registry_size(o, reg_sz + p->mcast_list_len);
2894  break;
2895 
2896  default:
2897  BNX2X_ERR("Unknown command: %d\n", cmd);
2898  return -EINVAL;
2899 
2900  }
2901 
2902  /* Increase the total number of MACs pending to be configured */
2904 
2905  return 0;
2906 }
2907 
2908 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2909  struct bnx2x_mcast_ramrod_params *p,
2910  int old_num_bins)
2911 {
2912  struct bnx2x_mcast_obj *o = p->mcast_obj;
2913 
2914  o->set_registry_size(o, old_num_bins);
2916 }
2917 
2925 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2926  struct bnx2x_mcast_ramrod_params *p,
2927  u8 len)
2928 {
2929  struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2930  struct eth_multicast_rules_ramrod_data *data =
2931  (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2932 
2933  data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
2935  data->header.rule_cnt = len;
2936 }
2937 
2949 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2950  struct bnx2x_mcast_obj *o)
2951 {
2952  int i, cnt = 0;
2953  u64 elem;
2954 
2955  for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2956  elem = o->registry.aprox_match.vec[i];
2957  for (; elem; cnt++)
2958  elem &= elem - 1;
2959  }
2960 
2961  o->set_registry_size(o, cnt);
2962 
2963  return 0;
2964 }
2965 
2966 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2967  struct bnx2x_mcast_ramrod_params *p,
2968  int cmd)
2969 {
2970  struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2971  struct bnx2x_mcast_obj *o = p->mcast_obj;
2972  struct eth_multicast_rules_ramrod_data *data =
2973  (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2974  int cnt = 0, rc;
2975 
2976  /* Reset the ramrod data buffer */
2977  memset(data, 0, sizeof(*data));
2978 
2979  cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2980 
2981  /* If there are no more pending commands - clear SCHEDULED state */
2982  if (list_empty(&o->pending_cmds_head))
2983  o->clear_sched(o);
2984 
2985  /* The below may be true iff there was enough room in ramrod
2986  * data for all pending commands and for the current
2987  * command. Otherwise the current command would have been added
2988  * to the pending commands and p->mcast_list_len would have been
2989  * zeroed.
2990  */
2991  if (p->mcast_list_len > 0)
2992  cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2993 
2994  /* We've pulled out some MACs - update the total number of
2995  * outstanding.
2996  */
2997  o->total_pending_num -= cnt;
2998 
2999  /* send a ramrod */
3000  WARN_ON(o->total_pending_num < 0);
3001  WARN_ON(cnt > o->max_cmd_len);
3002 
3003  bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
3004 
3005  /* Update a registry size if there are no more pending operations.
3006  *
3007  * We don't want to change the value of the registry size if there are
3008  * pending operations because we want it to always be equal to the
3009  * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3010  * set bins after the last requested operation in order to properly
3011  * evaluate the size of the next DEL/RESTORE operation.
3012  *
3013  * Note that we update the registry itself during command(s) handling
3014  * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3015  * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3016  * with a limited amount of update commands (per MAC/bin) and we don't
3017  * know in this scope what the actual state of bins configuration is
3018  * going to be after this ramrod.
3019  */
3020  if (!o->total_pending_num)
3021  bnx2x_mcast_refresh_registry_e2(bp, o);
3022 
3023  /*
3024  * If CLEAR_ONLY was requested - don't send a ramrod and clear
3025  * RAMROD_PENDING status immediately.
3026  */
3028  raw->clear_pending(raw);
3029  return 0;
3030  } else {
3031  /*
3032  * No need for an explicit memory barrier here as long we would
3033  * need to ensure the ordering of writing to the SPQ element
3034  * and updating of the SPQ producer which involves a memory
3035  * read and we will have to put a full memory barrier there
3036  * (inside bnx2x_sp_post()).
3037  */
3038 
3039  /* Send a ramrod */
3041  raw->cid, U64_HI(raw->rdata_mapping),
3042  U64_LO(raw->rdata_mapping),
3044  if (rc)
3045  return rc;
3046 
3047  /* Ramrod completion is pending */
3048  return 1;
3049  }
3050 }
3051 
3052 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3053  struct bnx2x_mcast_ramrod_params *p,
3054  int cmd)
3055 {
3056  /* Mark, that there is a work to do */
3057  if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3058  p->mcast_list_len = 1;
3059 
3060  return 0;
3061 }
3062 
3063 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3064  struct bnx2x_mcast_ramrod_params *p,
3065  int old_num_bins)
3066 {
3067  /* Do nothing */
3068 }
3069 
3070 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3071 do { \
3072  (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3073 } while (0)
3074 
3075 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3076  struct bnx2x_mcast_obj *o,
3077  struct bnx2x_mcast_ramrod_params *p,
3078  u32 *mc_filter)
3079 {
3080  struct bnx2x_mcast_list_elem *mlist_pos;
3081  int bit;
3082 
3083  list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3084  bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3085  BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3086 
3087  DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3088  mlist_pos->mac, bit);
3089 
3090  /* bookkeeping... */
3092  bit);
3093  }
3094 }
3095 
3096 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3097  struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3098  u32 *mc_filter)
3099 {
3100  int bit;
3101 
3102  for (bit = bnx2x_mcast_get_next_bin(o, 0);
3103  bit >= 0;
3104  bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3105  BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3106  DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3107  }
3108 }
3109 
3110 /* On 57711 we write the multicast MACs' aproximate match
3111  * table by directly into the TSTORM's internal RAM. So we don't
3112  * really need to handle any tricks to make it work.
3113  */
3114 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3115  struct bnx2x_mcast_ramrod_params *p,
3116  int cmd)
3117 {
3118  int i;
3119  struct bnx2x_mcast_obj *o = p->mcast_obj;
3120  struct bnx2x_raw_obj *r = &o->raw;
3121 
3122  /* If CLEAR_ONLY has been requested - clear the registry
3123  * and clear a pending bit.
3124  */
3126  u32 mc_filter[MC_HASH_SIZE] = {0};
3127 
3128  /* Set the multicast filter bits before writing it into
3129  * the internal memory.
3130  */
3131  switch (cmd) {
3132  case BNX2X_MCAST_CMD_ADD:
3133  bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3134  break;
3135 
3136  case BNX2X_MCAST_CMD_DEL:
3137  DP(BNX2X_MSG_SP,
3138  "Invalidating multicast MACs configuration\n");
3139 
3140  /* clear the registry */
3141  memset(o->registry.aprox_match.vec, 0,
3142  sizeof(o->registry.aprox_match.vec));
3143  break;
3144 
3146  bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3147  break;
3148 
3149  default:
3150  BNX2X_ERR("Unknown command: %d\n", cmd);
3151  return -EINVAL;
3152  }
3153 
3154  /* Set the mcast filter in the internal memory */
3155  for (i = 0; i < MC_HASH_SIZE; i++)
3156  REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3157  } else
3158  /* clear the registry */
3159  memset(o->registry.aprox_match.vec, 0,
3160  sizeof(o->registry.aprox_match.vec));
3161 
3162  /* We are done */
3163  r->clear_pending(r);
3164 
3165  return 0;
3166 }
3167 
3168 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3169  struct bnx2x_mcast_ramrod_params *p,
3170  int cmd)
3171 {
3172  struct bnx2x_mcast_obj *o = p->mcast_obj;
3173  int reg_sz = o->get_registry_size(o);
3174 
3175  switch (cmd) {
3176  /* DEL command deletes all currently configured MACs */
3177  case BNX2X_MCAST_CMD_DEL:
3178  o->set_registry_size(o, 0);
3179  /* Don't break */
3180 
3181  /* RESTORE command will restore the entire multicast configuration */
3183  p->mcast_list_len = reg_sz;
3184  DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3185  cmd, p->mcast_list_len);
3186  break;
3187 
3188  case BNX2X_MCAST_CMD_ADD:
3189  case BNX2X_MCAST_CMD_CONT:
3190  /* Multicast MACs on 57710 are configured as unicast MACs and
3191  * there is only a limited number of CAM entries for that
3192  * matter.
3193  */
3194  if (p->mcast_list_len > o->max_cmd_len) {
3195  BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3196  o->max_cmd_len);
3197  return -EINVAL;
3198  }
3199  /* Every configured MAC should be cleared if DEL command is
3200  * called. Only the last ADD command is relevant as long as
3201  * every ADD commands overrides the previous configuration.
3202  */
3203  DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3204  if (p->mcast_list_len > 0)
3206 
3207  break;
3208 
3209  default:
3210  BNX2X_ERR("Unknown command: %d\n", cmd);
3211  return -EINVAL;
3212 
3213  }
3214 
3215  /* We want to ensure that commands are executed one by one for 57710.
3216  * Therefore each none-empty command will consume o->max_cmd_len.
3217  */
3218  if (p->mcast_list_len)
3219  o->total_pending_num += o->max_cmd_len;
3220 
3221  return 0;
3222 }
3223 
3224 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3225  struct bnx2x_mcast_ramrod_params *p,
3226  int old_num_macs)
3227 {
3228  struct bnx2x_mcast_obj *o = p->mcast_obj;
3229 
3230  o->set_registry_size(o, old_num_macs);
3231 
3232  /* If current command hasn't been handled yet and we are
3233  * here means that it's meant to be dropped and we have to
3234  * update the number of outstandling MACs accordingly.
3235  */
3236  if (p->mcast_list_len)
3237  o->total_pending_num -= o->max_cmd_len;
3238 }
3239 
3240 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3241  struct bnx2x_mcast_obj *o, int idx,
3242  union bnx2x_mcast_config_data *cfg_data,
3243  int cmd)
3244 {
3245  struct bnx2x_raw_obj *r = &o->raw;
3246  struct mac_configuration_cmd *data =
3247  (struct mac_configuration_cmd *)(r->rdata);
3248 
3249  /* copy mac */
3250  if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3251  bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3252  &data->config_table[idx].middle_mac_addr,
3253  &data->config_table[idx].lsb_mac_addr,
3254  cfg_data->mac);
3255 
3256  data->config_table[idx].vlan_id = 0;
3257  data->config_table[idx].pf_id = r->func_id;
3258  data->config_table[idx].clients_bit_vector =
3259  cpu_to_le32(1 << r->cl_id);
3260 
3261  SET_FLAG(data->config_table[idx].flags,
3264  }
3265 }
3266 
3274 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3275  struct bnx2x_mcast_ramrod_params *p,
3276  u8 len)
3277 {
3278  struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3279  struct mac_configuration_cmd *data =
3280  (struct mac_configuration_cmd *)(r->rdata);
3281 
3282  u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3283  BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3284  BNX2X_MAX_MULTICAST*(1 + r->func_id));
3285 
3286  data->hdr.offset = offset;
3287  data->hdr.client_id = 0xff;
3288  data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
3290  data->hdr.length = len;
3291 }
3292 
3306 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3307  struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3308  int *rdata_idx)
3309 {
3310  struct bnx2x_mcast_mac_elem *elem;
3311  int i = 0;
3312  union bnx2x_mcast_config_data cfg_data = {0};
3313 
3314  /* go through the registry and configure the MACs from it. */
3315  list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3316  cfg_data.mac = &elem->mac[0];
3317  o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3318 
3319  i++;
3320 
3321  DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3322  cfg_data.mac);
3323  }
3324 
3325  *rdata_idx = i;
3326 
3327  return -1;
3328 }
3329 
3330 
3331 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3332  struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3333 {
3334  struct bnx2x_pending_mcast_cmd *cmd_pos;
3335  struct bnx2x_mcast_mac_elem *pmac_pos;
3336  struct bnx2x_mcast_obj *o = p->mcast_obj;
3337  union bnx2x_mcast_config_data cfg_data = {0};
3338  int cnt = 0;
3339 
3340 
3341  /* If nothing to be done - return */
3342  if (list_empty(&o->pending_cmds_head))
3343  return 0;
3344 
3345  /* Handle the first command */
3346  cmd_pos = list_first_entry(&o->pending_cmds_head,
3347  struct bnx2x_pending_mcast_cmd, link);
3348 
3349  switch (cmd_pos->type) {
3350  case BNX2X_MCAST_CMD_ADD:
3351  list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3352  cfg_data.mac = &pmac_pos->mac[0];
3353  o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3354 
3355  cnt++;
3356 
3357  DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3358  pmac_pos->mac);
3359  }
3360  break;
3361 
3362  case BNX2X_MCAST_CMD_DEL:
3363  cnt = cmd_pos->data.macs_num;
3364  DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3365  break;
3366 
3368  o->hdl_restore(bp, o, 0, &cnt);
3369  break;
3370 
3371  default:
3372  BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3373  return -EINVAL;
3374  }
3375 
3376  list_del(&cmd_pos->link);
3377  kfree(cmd_pos);
3378 
3379  return cnt;
3380 }
3381 
3390 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3391  __le16 *fw_lo, u8 *mac)
3392 {
3393  mac[1] = ((u8 *)fw_hi)[0];
3394  mac[0] = ((u8 *)fw_hi)[1];
3395  mac[3] = ((u8 *)fw_mid)[0];
3396  mac[2] = ((u8 *)fw_mid)[1];
3397  mac[5] = ((u8 *)fw_lo)[0];
3398  mac[4] = ((u8 *)fw_lo)[1];
3399 }
3400 
3412 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3413  struct bnx2x_mcast_obj *o)
3414 {
3415  struct bnx2x_raw_obj *raw = &o->raw;
3416  struct bnx2x_mcast_mac_elem *elem;
3417  struct mac_configuration_cmd *data =
3418  (struct mac_configuration_cmd *)(raw->rdata);
3419 
3420  /* If first entry contains a SET bit - the command was ADD,
3421  * otherwise - DEL_ALL
3422  */
3423  if (GET_FLAG(data->config_table[0].flags,
3425  int i, len = data->hdr.length;
3426 
3427  /* Break if it was a RESTORE command */
3428  if (!list_empty(&o->registry.exact_match.macs))
3429  return 0;
3430 
3431  elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3432  if (!elem) {
3433  BNX2X_ERR("Failed to allocate registry memory\n");
3434  return -ENOMEM;
3435  }
3436 
3437  for (i = 0; i < len; i++, elem++) {
3438  bnx2x_get_fw_mac_addr(
3439  &data->config_table[i].msb_mac_addr,
3440  &data->config_table[i].middle_mac_addr,
3441  &data->config_table[i].lsb_mac_addr,
3442  elem->mac);
3443  DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3444  elem->mac);
3445  list_add_tail(&elem->link,
3446  &o->registry.exact_match.macs);
3447  }
3448  } else {
3449  elem = list_first_entry(&o->registry.exact_match.macs,
3450  struct bnx2x_mcast_mac_elem, link);
3451  DP(BNX2X_MSG_SP, "Deleting a registry\n");
3452  kfree(elem);
3453  INIT_LIST_HEAD(&o->registry.exact_match.macs);
3454  }
3455 
3456  return 0;
3457 }
3458 
3459 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3460  struct bnx2x_mcast_ramrod_params *p,
3461  int cmd)
3462 {
3463  struct bnx2x_mcast_obj *o = p->mcast_obj;
3464  struct bnx2x_raw_obj *raw = &o->raw;
3465  struct mac_configuration_cmd *data =
3466  (struct mac_configuration_cmd *)(raw->rdata);
3467  int cnt = 0, i, rc;
3468 
3469  /* Reset the ramrod data buffer */
3470  memset(data, 0, sizeof(*data));
3471 
3472  /* First set all entries as invalid */
3473  for (i = 0; i < o->max_cmd_len ; i++)
3474  SET_FLAG(data->config_table[i].flags,
3477 
3478  /* Handle pending commands first */
3479  cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3480 
3481  /* If there are no more pending commands - clear SCHEDULED state */
3482  if (list_empty(&o->pending_cmds_head))
3483  o->clear_sched(o);
3484 
3485  /* The below may be true iff there were no pending commands */
3486  if (!cnt)
3487  cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3488 
3489  /* For 57710 every command has o->max_cmd_len length to ensure that
3490  * commands are done one at a time.
3491  */
3492  o->total_pending_num -= o->max_cmd_len;
3493 
3494  /* send a ramrod */
3495 
3496  WARN_ON(cnt > o->max_cmd_len);
3497 
3498  /* Set ramrod header (in particular, a number of entries to update) */
3499  bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3500 
3501  /* update a registry: we need the registry contents to be always up
3502  * to date in order to be able to execute a RESTORE opcode. Here
3503  * we use the fact that for 57710 we sent one command at a time
3504  * hence we may take the registry update out of the command handling
3505  * and do it in a simpler way here.
3506  */
3507  rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3508  if (rc)
3509  return rc;
3510 
3511  /*
3512  * If CLEAR_ONLY was requested - don't send a ramrod and clear
3513  * RAMROD_PENDING status immediately.
3514  */
3516  raw->clear_pending(raw);
3517  return 0;
3518  } else {
3519  /*
3520  * No need for an explicit memory barrier here as long we would
3521  * need to ensure the ordering of writing to the SPQ element
3522  * and updating of the SPQ producer which involves a memory
3523  * read and we will have to put a full memory barrier there
3524  * (inside bnx2x_sp_post()).
3525  */
3526 
3527  /* Send a ramrod */
3529  U64_HI(raw->rdata_mapping),
3530  U64_LO(raw->rdata_mapping),
3532  if (rc)
3533  return rc;
3534 
3535  /* Ramrod completion is pending */
3536  return 1;
3537  }
3538 
3539 }
3540 
3541 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3542 {
3543  return o->registry.exact_match.num_macs_set;
3544 }
3545 
3546 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3547 {
3548  return o->registry.aprox_match.num_bins_set;
3549 }
3550 
3551 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3552  int n)
3553 {
3554  o->registry.exact_match.num_macs_set = n;
3555 }
3556 
3557 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3558  int n)
3559 {
3560  o->registry.aprox_match.num_bins_set = n;
3561 }
3562 
3563 int bnx2x_config_mcast(struct bnx2x *bp,
3564  struct bnx2x_mcast_ramrod_params *p,
3565  int cmd)
3566 {
3567  struct bnx2x_mcast_obj *o = p->mcast_obj;
3568  struct bnx2x_raw_obj *r = &o->raw;
3569  int rc = 0, old_reg_size;
3570 
3571  /* This is needed to recover number of currently configured mcast macs
3572  * in case of failure.
3573  */
3574  old_reg_size = o->get_registry_size(o);
3575 
3576  /* Do some calculations and checks */
3577  rc = o->validate(bp, p, cmd);
3578  if (rc)
3579  return rc;
3580 
3581  /* Return if there is no work to do */
3582  if ((!p->mcast_list_len) && (!o->check_sched(o)))
3583  return 0;
3584 
3585  DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3587 
3588  /* Enqueue the current command to the pending list if we can't complete
3589  * it in the current iteration
3590  */
3591  if (r->check_pending(r) ||
3592  ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3593  rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3594  if (rc < 0)
3595  goto error_exit1;
3596 
3597  /* As long as the current command is in a command list we
3598  * don't need to handle it separately.
3599  */
3600  p->mcast_list_len = 0;
3601  }
3602 
3603  if (!r->check_pending(r)) {
3604 
3605  /* Set 'pending' state */
3606  r->set_pending(r);
3607 
3608  /* Configure the new classification in the chip */
3609  rc = o->config_mcast(bp, p, cmd);
3610  if (rc < 0)
3611  goto error_exit2;
3612 
3613  /* Wait for a ramrod completion if was requested */
3615  rc = o->wait_comp(bp, o);
3616  }
3617 
3618  return rc;
3619 
3620 error_exit2:
3621  r->clear_pending(r);
3622 
3623 error_exit1:
3624  o->revert(bp, p, old_reg_size);
3625 
3626  return rc;
3627 }
3628 
3629 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3630 {
3632  clear_bit(o->sched_state, o->raw.pstate);
3634 }
3635 
3636 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3637 {
3639  set_bit(o->sched_state, o->raw.pstate);
3641 }
3642 
3643 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3644 {
3645  return !!test_bit(o->sched_state, o->raw.pstate);
3646 }
3647 
3648 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3649 {
3650  return o->raw.check_pending(&o->raw) || o->check_sched(o);
3651 }
3652 
3653 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3654  struct bnx2x_mcast_obj *mcast_obj,
3655  u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3656  u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3657  int state, unsigned long *pstate, bnx2x_obj_type type)
3658 {
3659  memset(mcast_obj, 0, sizeof(*mcast_obj));
3660 
3661  bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3662  rdata, rdata_mapping, state, pstate, type);
3663 
3664  mcast_obj->engine_id = engine_id;
3665 
3666  INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3667 
3669  mcast_obj->check_sched = bnx2x_mcast_check_sched;
3670  mcast_obj->set_sched = bnx2x_mcast_set_sched;
3671  mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3672 
3673  if (CHIP_IS_E1(bp)) {
3674  mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3675  mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3676  mcast_obj->hdl_restore =
3677  bnx2x_mcast_handle_restore_cmd_e1;
3678  mcast_obj->check_pending = bnx2x_mcast_check_pending;
3679 
3680  if (CHIP_REV_IS_SLOW(bp))
3681  mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3682  else
3683  mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3684 
3685  mcast_obj->wait_comp = bnx2x_mcast_wait;
3686  mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3687  mcast_obj->validate = bnx2x_mcast_validate_e1;
3688  mcast_obj->revert = bnx2x_mcast_revert_e1;
3689  mcast_obj->get_registry_size =
3690  bnx2x_mcast_get_registry_size_exact;
3691  mcast_obj->set_registry_size =
3692  bnx2x_mcast_set_registry_size_exact;
3693 
3694  /* 57710 is the only chip that uses the exact match for mcast
3695  * at the moment.
3696  */
3697  INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3698 
3699  } else if (CHIP_IS_E1H(bp)) {
3700  mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3701  mcast_obj->enqueue_cmd = NULL;
3702  mcast_obj->hdl_restore = NULL;
3703  mcast_obj->check_pending = bnx2x_mcast_check_pending;
3704 
3705  /* 57711 doesn't send a ramrod, so it has unlimited credit
3706  * for one command.
3707  */
3708  mcast_obj->max_cmd_len = -1;
3709  mcast_obj->wait_comp = bnx2x_mcast_wait;
3710  mcast_obj->set_one_rule = NULL;
3711  mcast_obj->validate = bnx2x_mcast_validate_e1h;
3712  mcast_obj->revert = bnx2x_mcast_revert_e1h;
3713  mcast_obj->get_registry_size =
3714  bnx2x_mcast_get_registry_size_aprox;
3715  mcast_obj->set_registry_size =
3716  bnx2x_mcast_set_registry_size_aprox;
3717  } else {
3718  mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3719  mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3720  mcast_obj->hdl_restore =
3721  bnx2x_mcast_handle_restore_cmd_e2;
3722  mcast_obj->check_pending = bnx2x_mcast_check_pending;
3723  /* TODO: There should be a proper HSI define for this number!!!
3724  */
3725  mcast_obj->max_cmd_len = 16;
3726  mcast_obj->wait_comp = bnx2x_mcast_wait;
3727  mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3728  mcast_obj->validate = bnx2x_mcast_validate_e2;
3729  mcast_obj->revert = bnx2x_mcast_revert_e2;
3730  mcast_obj->get_registry_size =
3731  bnx2x_mcast_get_registry_size_aprox;
3732  mcast_obj->set_registry_size =
3733  bnx2x_mcast_set_registry_size_aprox;
3734  }
3735 }
3736 
3737 /*************************** Credit handling **********************************/
3738 
3749 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3750 {
3751  int c, old;
3752 
3753  c = atomic_read(v);
3754  for (;;) {
3755  if (unlikely(c + a >= u))
3756  return false;
3757 
3758  old = atomic_cmpxchg((v), c, c + a);
3759  if (likely(old == c))
3760  break;
3761  c = old;
3762  }
3763 
3764  return true;
3765 }
3766 
3777 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3778 {
3779  int c, old;
3780 
3781  c = atomic_read(v);
3782  for (;;) {
3783  if (unlikely(c - a < u))
3784  return false;
3785 
3786  old = atomic_cmpxchg((v), c, c - a);
3787  if (likely(old == c))
3788  break;
3789  c = old;
3790  }
3791 
3792  return true;
3793 }
3794 
3795 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3796 {
3797  bool rc;
3798 
3799  smp_mb();
3800  rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3801  smp_mb();
3802 
3803  return rc;
3804 }
3805 
3806 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3807 {
3808  bool rc;
3809 
3810  smp_mb();
3811 
3812  /* Don't let to refill if credit + cnt > pool_sz */
3813  rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3814 
3815  smp_mb();
3816 
3817  return rc;
3818 }
3819 
3820 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3821 {
3822  int cur_credit;
3823 
3824  smp_mb();
3825  cur_credit = atomic_read(&o->credit);
3826 
3827  return cur_credit;
3828 }
3829 
3830 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3831  int cnt)
3832 {
3833  return true;
3834 }
3835 
3836 
3837 static bool bnx2x_credit_pool_get_entry(
3838  struct bnx2x_credit_pool_obj *o,
3839  int *offset)
3840 {
3841  int idx, vec, i;
3842 
3843  *offset = -1;
3844 
3845  /* Find "internal cam-offset" then add to base for this object... */
3846  for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3847 
3848  /* Skip the current vector if there are no free entries in it */
3849  if (!o->pool_mirror[vec])
3850  continue;
3851 
3852  /* If we've got here we are going to find a free entry */
3853  for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3854  i < BIT_VEC64_ELEM_SZ; idx++, i++)
3855 
3856  if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3857  /* Got one!! */
3859  *offset = o->base_pool_offset + idx;
3860  return true;
3861  }
3862  }
3863 
3864  return false;
3865 }
3866 
3867 static bool bnx2x_credit_pool_put_entry(
3868  struct bnx2x_credit_pool_obj *o,
3869  int offset)
3870 {
3871  if (offset < o->base_pool_offset)
3872  return false;
3873 
3874  offset -= o->base_pool_offset;
3875 
3876  if (offset >= o->pool_sz)
3877  return false;
3878 
3879  /* Return the entry to the pool */
3880  BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3881 
3882  return true;
3883 }
3884 
3885 static bool bnx2x_credit_pool_put_entry_always_true(
3886  struct bnx2x_credit_pool_obj *o,
3887  int offset)
3888 {
3889  return true;
3890 }
3891 
3892 static bool bnx2x_credit_pool_get_entry_always_true(
3893  struct bnx2x_credit_pool_obj *o,
3894  int *offset)
3895 {
3896  *offset = -1;
3897  return true;
3898 }
3910 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3911  int base, int credit)
3912 {
3913  /* Zero the object first */
3914  memset(p, 0, sizeof(*p));
3915 
3916  /* Set the table to all 1s */
3917  memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3918 
3919  /* Init a pool as full */
3920  atomic_set(&p->credit, credit);
3921 
3922  /* The total poll size */
3923  p->pool_sz = credit;
3924 
3925  p->base_pool_offset = base;
3926 
3927  /* Commit the change */
3928  smp_mb();
3929 
3930  p->check = bnx2x_credit_pool_check;
3931 
3932  /* if pool credit is negative - disable the checks */
3933  if (credit >= 0) {
3934  p->put = bnx2x_credit_pool_put;
3935  p->get = bnx2x_credit_pool_get;
3936  p->put_entry = bnx2x_credit_pool_put_entry;
3937  p->get_entry = bnx2x_credit_pool_get_entry;
3938  } else {
3939  p->put = bnx2x_credit_pool_always_true;
3940  p->get = bnx2x_credit_pool_always_true;
3941  p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3942  p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3943  }
3944 
3945  /* If base is negative - disable entries handling */
3946  if (base < 0) {
3947  p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3948  p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3949  }
3950 }
3951 
3953  struct bnx2x_credit_pool_obj *p, u8 func_id,
3954  u8 func_num)
3955 {
3956 /* TODO: this will be defined in consts as well... */
3957 #define BNX2X_CAM_SIZE_EMUL 5
3958 
3959  int cam_sz;
3960 
3961  if (CHIP_IS_E1(bp)) {
3962  /* In E1, Multicast is saved in cam... */
3963  if (!CHIP_REV_IS_SLOW(bp))
3964  cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3965  else
3967 
3968  bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3969 
3970  } else if (CHIP_IS_E1H(bp)) {
3971  /* CAM credit is equaly divided between all active functions
3972  * on the PORT!.
3973  */
3974  if ((func_num > 0)) {
3975  if (!CHIP_REV_IS_SLOW(bp))
3976  cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3977  else
3978  cam_sz = BNX2X_CAM_SIZE_EMUL;
3979  bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3980  } else {
3981  /* this should never happen! Block MAC operations. */
3982  bnx2x_init_credit_pool(p, 0, 0);
3983  }
3984 
3985  } else {
3986 
3987  /*
3988  * CAM credit is equaly divided between all active functions
3989  * on the PATH.
3990  */
3991  if ((func_num > 0)) {
3992  if (!CHIP_REV_IS_SLOW(bp))
3993  cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3994  else
3995  cam_sz = BNX2X_CAM_SIZE_EMUL;
3996 
3997  /*
3998  * No need for CAM entries handling for 57712 and
3999  * newer.
4000  */
4001  bnx2x_init_credit_pool(p, -1, cam_sz);
4002  } else {
4003  /* this should never happen! Block MAC operations. */
4004  bnx2x_init_credit_pool(p, 0, 0);
4005  }
4006 
4007  }
4008 }
4009 
4011  struct bnx2x_credit_pool_obj *p,
4012  u8 func_id,
4013  u8 func_num)
4014 {
4015  if (CHIP_IS_E1x(bp)) {
4016  /*
4017  * There is no VLAN credit in HW on 57710 and 57711 only
4018  * MAC / MAC-VLAN can be set
4019  */
4020  bnx2x_init_credit_pool(p, 0, -1);
4021  } else {
4022  /*
4023  * CAM credit is equaly divided between all active functions
4024  * on the PATH.
4025  */
4026  if (func_num > 0) {
4027  int credit = MAX_VLAN_CREDIT_E2 / func_num;
4028  bnx2x_init_credit_pool(p, func_id * credit, credit);
4029  } else
4030  /* this should never happen! Block VLAN operations. */
4031  bnx2x_init_credit_pool(p, 0, 0);
4032  }
4033 }
4034 
4035 /****************** RSS Configuration ******************/
4044 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4045  struct bnx2x_config_rss_params *p)
4046 {
4047  int i;
4048 
4049  DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4050  DP(BNX2X_MSG_SP, "0x0000: ");
4051  for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4052  DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4053 
4054  /* Print 4 bytes in a line */
4055  if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4056  (((i + 1) & 0x3) == 0)) {
4057  DP_CONT(BNX2X_MSG_SP, "\n");
4058  DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4059  }
4060  }
4061 
4062  DP_CONT(BNX2X_MSG_SP, "\n");
4063 }
4064 
4073 static int bnx2x_setup_rss(struct bnx2x *bp,
4074  struct bnx2x_config_rss_params *p)
4075 {
4076  struct bnx2x_rss_config_obj *o = p->rss_obj;
4077  struct bnx2x_raw_obj *r = &o->raw;
4078  struct eth_rss_update_ramrod_data *data =
4079  (struct eth_rss_update_ramrod_data *)(r->rdata);
4080  u8 rss_mode = 0;
4081  int rc;
4082 
4083  memset(data, 0, sizeof(*data));
4084 
4085  DP(BNX2X_MSG_SP, "Configuring RSS\n");
4086 
4087  /* Set an echo field */
4088  data->echo = (r->cid & BNX2X_SWCID_MASK) |
4089  (r->state << BNX2X_SWCID_SHIFT);
4090 
4091  /* RSS mode */
4093  rss_mode = ETH_RSS_MODE_DISABLED;
4094  else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4095  rss_mode = ETH_RSS_MODE_REGULAR;
4096 
4097  data->rss_mode = rss_mode;
4098 
4099  DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4100 
4101  /* RSS capabilities */
4102  if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4103  data->capabilities |=
4105 
4107  data->capabilities |=
4109 
4111  data->capabilities |=
4113 
4114  if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4115  data->capabilities |=
4117 
4119  data->capabilities |=
4121 
4123  data->capabilities |=
4125 
4126  /* Hashing mask */
4127  data->rss_result_mask = p->rss_result_mask;
4128 
4129  /* RSS engine ID */
4130  data->rss_engine_id = o->engine_id;
4131 
4132  DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4133 
4134  /* Indirection table */
4136  T_ETH_INDIRECTION_TABLE_SIZE);
4137 
4138  /* Remember the last configuration */
4139  memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4140 
4141  /* Print the indirection table */
4142  if (netif_msg_ifup(bp))
4143  bnx2x_debug_print_ind_table(bp, p);
4144 
4145  /* RSS keys */
4147  memcpy(&data->rss_key[0], &p->rss_key[0],
4148  sizeof(data->rss_key));
4150  }
4151 
4152  /*
4153  * No need for an explicit memory barrier here as long we would
4154  * need to ensure the ordering of writing to the SPQ element
4155  * and updating of the SPQ producer which involves a memory
4156  * read and we will have to put a full memory barrier there
4157  * (inside bnx2x_sp_post()).
4158  */
4159 
4160  /* Send a ramrod */
4162  U64_HI(r->rdata_mapping),
4163  U64_LO(r->rdata_mapping),
4165 
4166  if (rc < 0)
4167  return rc;
4168 
4169  return 1;
4170 }
4171 
4173  u8 *ind_table)
4174 {
4175  memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4176 }
4177 
4178 int bnx2x_config_rss(struct bnx2x *bp,
4179  struct bnx2x_config_rss_params *p)
4180 {
4181  int rc;
4182  struct bnx2x_rss_config_obj *o = p->rss_obj;
4183  struct bnx2x_raw_obj *r = &o->raw;
4184 
4185  /* Do nothing if only driver cleanup was requested */
4187  return 0;
4188 
4189  r->set_pending(r);
4190 
4191  rc = o->config_rss(bp, p);
4192  if (rc < 0) {
4193  r->clear_pending(r);
4194  return rc;
4195  }
4196 
4198  rc = r->wait_comp(bp, r);
4199 
4200  return rc;
4201 }
4202 
4203 
4205  struct bnx2x_rss_config_obj *rss_obj,
4206  u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4208  int state, unsigned long *pstate,
4209  bnx2x_obj_type type)
4210 {
4211  bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4212  rdata_mapping, state, pstate, type);
4213 
4214  rss_obj->engine_id = engine_id;
4215  rss_obj->config_rss = bnx2x_setup_rss;
4216 }
4217 
4218 /********************** Queue state object ***********************************/
4219 
4234 {
4235  struct bnx2x_queue_sp_obj *o = params->q_obj;
4236  int rc, pending_bit;
4237  unsigned long *pending = &o->pending;
4238 
4239  /* Check that the requested transition is legal */
4240  if (o->check_transition(bp, o, params))
4241  return -EINVAL;
4242 
4243  /* Set "pending" bit */
4244  pending_bit = o->set_pending(o, params);
4245 
4246  /* Don't send a command if only driver cleanup was requested */
4247  if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4248  o->complete_cmd(bp, o, pending_bit);
4249  else {
4250  /* Send a ramrod */
4251  rc = o->send_cmd(bp, params);
4252  if (rc) {
4254  clear_bit(pending_bit, pending);
4256  return rc;
4257  }
4258 
4259  if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4260  rc = o->wait_comp(bp, o, pending_bit);
4261  if (rc)
4262  return rc;
4263 
4264  return 0;
4265  }
4266  }
4267 
4268  return !!test_bit(pending_bit, pending);
4269 }
4270 
4271 
4272 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4274 {
4275  enum bnx2x_queue_cmd cmd = params->cmd, bit;
4276 
4277  /* ACTIVATE and DEACTIVATE commands are implemented on top of
4278  * UPDATE command.
4279  */
4280  if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4281  (cmd == BNX2X_Q_CMD_DEACTIVATE))
4282  bit = BNX2X_Q_CMD_UPDATE;
4283  else
4284  bit = cmd;
4285 
4286  set_bit(bit, &obj->pending);
4287  return bit;
4288 }
4289 
4290 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4291  struct bnx2x_queue_sp_obj *o,
4292  enum bnx2x_queue_cmd cmd)
4293 {
4294  return bnx2x_state_wait(bp, cmd, &o->pending);
4295 }
4296 
4306 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4307  struct bnx2x_queue_sp_obj *o,
4308  enum bnx2x_queue_cmd cmd)
4309 {
4310  unsigned long cur_pending = o->pending;
4311 
4312  if (!test_and_clear_bit(cmd, &cur_pending)) {
4313  BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4314  cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4315  o->state, cur_pending, o->next_state);
4316  return -EINVAL;
4317  }
4318 
4319  if (o->next_tx_only >= o->max_cos)
4320  /* >= becuase tx only must always be smaller than cos since the
4321  * primary connection suports COS 0
4322  */
4323  BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4324  o->next_tx_only, o->max_cos);
4325 
4326  DP(BNX2X_MSG_SP,
4327  "Completing command %d for queue %d, setting state to %d\n",
4328  cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4329 
4330  if (o->next_tx_only) /* print num tx-only if any exist */
4331  DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4333 
4334  o->state = o->next_state;
4335  o->num_tx_only = o->next_tx_only;
4337 
4338  /* It's important that o->state and o->next_state are
4339  * updated before o->pending.
4340  */
4341  wmb();
4342 
4343  clear_bit(cmd, &o->pending);
4345 
4346  return 0;
4347 }
4348 
4349 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4350  struct bnx2x_queue_state_params *cmd_params,
4351  struct client_init_ramrod_data *data)
4352 {
4353  struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4354 
4355  /* Rx data */
4356 
4357  /* IPv6 TPA supported for E2 and above only */
4358  data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
4360 }
4361 
4362 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4363  struct bnx2x_queue_sp_obj *o,
4364  struct bnx2x_general_setup_params *params,
4365  struct client_init_general_data *gen_data,
4366  unsigned long *flags)
4367 {
4368  gen_data->client_id = o->cl_id;
4369 
4370  if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4371  gen_data->statistics_counter_id =
4372  params->stat_id;
4373  gen_data->statistics_en_flg = 1;
4374  gen_data->statistics_zero_flg =
4376  } else
4377  gen_data->statistics_counter_id =
4379 
4380  gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4381  gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4382  gen_data->sp_client_id = params->spcl_id;
4383  gen_data->mtu = cpu_to_le16(params->mtu);
4384  gen_data->func_id = o->func_id;
4385 
4386 
4387  gen_data->cos = params->cos;
4388 
4389  gen_data->traffic_type =
4390  test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4392 
4393  DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4394  gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4395 }
4396 
4397 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4398  struct bnx2x_txq_setup_params *params,
4399  struct client_init_tx_data *tx_data,
4400  unsigned long *flags)
4401 {
4402  tx_data->enforce_security_flg =
4403  test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4404  tx_data->default_vlan =
4405  cpu_to_le16(params->default_vlan);
4406  tx_data->default_vlan_flg =
4408  tx_data->tx_switching_flg =
4410  tx_data->anti_spoofing_flg =
4412  tx_data->force_default_pri_flg =
4414 
4415  tx_data->tx_status_block_id = params->fw_sb_id;
4416  tx_data->tx_sb_index_number = params->sb_cq_index;
4417  tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4418 
4419  tx_data->tx_bd_page_base.lo =
4420  cpu_to_le32(U64_LO(params->dscr_map));
4421  tx_data->tx_bd_page_base.hi =
4422  cpu_to_le32(U64_HI(params->dscr_map));
4423 
4424  /* Don't configure any Tx switching mode during queue SETUP */
4425  tx_data->state = 0;
4426 }
4427 
4428 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4429  struct rxq_pause_params *params,
4430  struct client_init_rx_data *rx_data)
4431 {
4432  /* flow control data */
4433  rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4434  rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4435  rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4436  rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4437  rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4438  rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4439  rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4440 }
4441 
4442 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4443  struct bnx2x_rxq_setup_params *params,
4444  struct client_init_rx_data *rx_data,
4445  unsigned long *flags)
4446 {
4447  rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4449  rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4451  rx_data->vmqueue_mode_en_flg = 0;
4452 
4454  params->cache_line_log;
4455  rx_data->enable_dynamic_hc =
4456  test_bit(BNX2X_Q_FLG_DHC, flags);
4457  rx_data->max_sges_for_packet = params->max_sges_pkt;
4458  rx_data->client_qzone_id = params->cl_qzone_id;
4459  rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4460 
4461  /* Always start in DROP_ALL mode */
4464 
4465  /* We don't set drop flags */
4466  rx_data->drop_ip_cs_err_flg = 0;
4467  rx_data->drop_tcp_cs_err_flg = 0;
4468  rx_data->drop_ttl0_flg = 0;
4469  rx_data->drop_udp_cs_err_flg = 0;
4471  test_bit(BNX2X_Q_FLG_VLAN, flags);
4473  test_bit(BNX2X_Q_FLG_OV, flags);
4474  rx_data->status_block_id = params->fw_sb_id;
4475  rx_data->rx_sb_index_number = params->sb_cq_index;
4476  rx_data->max_tpa_queues = params->max_tpa_queues;
4477  rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4478  rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4479  rx_data->bd_page_base.lo =
4480  cpu_to_le32(U64_LO(params->dscr_map));
4481  rx_data->bd_page_base.hi =
4482  cpu_to_le32(U64_HI(params->dscr_map));
4483  rx_data->sge_page_base.lo =
4484  cpu_to_le32(U64_LO(params->sge_map));
4485  rx_data->sge_page_base.hi =
4486  cpu_to_le32(U64_HI(params->sge_map));
4487  rx_data->cqe_page_base.lo =
4488  cpu_to_le32(U64_LO(params->rcq_map));
4489  rx_data->cqe_page_base.hi =
4490  cpu_to_le32(U64_HI(params->rcq_map));
4491  rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4492 
4493  if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4494  rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4495  rx_data->is_approx_mcast = 1;
4496  }
4497 
4498  rx_data->rss_engine_id = params->rss_engine_id;
4499 
4500  /* silent vlan removal */
4501  rx_data->silent_vlan_removal_flg =
4503  rx_data->silent_vlan_value =
4505  rx_data->silent_vlan_mask =
4507 
4508 }
4509 
4510 /* initialize the general, tx and rx parts of a queue object */
4511 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4512  struct bnx2x_queue_state_params *cmd_params,
4513  struct client_init_ramrod_data *data)
4514 {
4515  bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4516  &cmd_params->params.setup.gen_params,
4517  &data->general,
4518  &cmd_params->params.setup.flags);
4519 
4520  bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4521  &cmd_params->params.setup.txq_params,
4522  &data->tx,
4523  &cmd_params->params.setup.flags);
4524 
4525  bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4526  &cmd_params->params.setup.rxq_params,
4527  &data->rx,
4528  &cmd_params->params.setup.flags);
4529 
4530  bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4531  &cmd_params->params.setup.pause_params,
4532  &data->rx);
4533 }
4534 
4535 /* initialize the general and tx parts of a tx-only queue object */
4536 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4537  struct bnx2x_queue_state_params *cmd_params,
4538  struct tx_queue_init_ramrod_data *data)
4539 {
4540  bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4541  &cmd_params->params.tx_only.gen_params,
4542  &data->general,
4543  &cmd_params->params.tx_only.flags);
4544 
4545  bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4546  &cmd_params->params.tx_only.txq_params,
4547  &data->tx,
4548  &cmd_params->params.tx_only.flags);
4549 
4550  DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4551  cmd_params->q_obj->cids[0],
4552  data->tx.tx_bd_page_base.lo,
4553  data->tx.tx_bd_page_base.hi);
4554 }
4555 
4567 static inline int bnx2x_q_init(struct bnx2x *bp,
4568  struct bnx2x_queue_state_params *params)
4569 {
4570  struct bnx2x_queue_sp_obj *o = params->q_obj;
4571  struct bnx2x_queue_init_params *init = &params->params.init;
4572  u16 hc_usec;
4573  u8 cos;
4574 
4575  /* Tx HC configuration */
4576  if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4577  test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4578  hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4579 
4580  bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4581  init->tx.sb_cq_index,
4582  !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4583  hc_usec);
4584  }
4585 
4586  /* Rx HC configuration */
4587  if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4588  test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4589  hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4590 
4591  bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4592  init->rx.sb_cq_index,
4593  !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4594  hc_usec);
4595  }
4596 
4597  /* Set CDU context validation values */
4598  for (cos = 0; cos < o->max_cos; cos++) {
4599  DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4600  o->cids[cos], cos);
4601  DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4602  bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4603  }
4604 
4605  /* As no ramrod is sent, complete the command immediately */
4606  o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4607 
4608  mmiowb();
4609  smp_mb();
4610 
4611  return 0;
4612 }
4613 
4614 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4615  struct bnx2x_queue_state_params *params)
4616 {
4617  struct bnx2x_queue_sp_obj *o = params->q_obj;
4618  struct client_init_ramrod_data *rdata =
4619  (struct client_init_ramrod_data *)o->rdata;
4620  dma_addr_t data_mapping = o->rdata_mapping;
4621  int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4622 
4623  /* Clear the ramrod data */
4624  memset(rdata, 0, sizeof(*rdata));
4625 
4626  /* Fill the ramrod data */
4627  bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4628 
4629  /*
4630  * No need for an explicit memory barrier here as long we would
4631  * need to ensure the ordering of writing to the SPQ element
4632  * and updating of the SPQ producer which involves a memory
4633  * read and we will have to put a full memory barrier there
4634  * (inside bnx2x_sp_post()).
4635  */
4636 
4637  return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4638  U64_HI(data_mapping),
4639  U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4640 }
4641 
4642 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4643  struct bnx2x_queue_state_params *params)
4644 {
4645  struct bnx2x_queue_sp_obj *o = params->q_obj;
4646  struct client_init_ramrod_data *rdata =
4647  (struct client_init_ramrod_data *)o->rdata;
4648  dma_addr_t data_mapping = o->rdata_mapping;
4649  int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4650 
4651  /* Clear the ramrod data */
4652  memset(rdata, 0, sizeof(*rdata));
4653 
4654  /* Fill the ramrod data */
4655  bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4656  bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4657 
4658  /*
4659  * No need for an explicit memory barrier here as long we would
4660  * need to ensure the ordering of writing to the SPQ element
4661  * and updating of the SPQ producer which involves a memory
4662  * read and we will have to put a full memory barrier there
4663  * (inside bnx2x_sp_post()).
4664  */
4665 
4666  return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4667  U64_HI(data_mapping),
4668  U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4669 }
4670 
4671 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4672  struct bnx2x_queue_state_params *params)
4673 {
4674  struct bnx2x_queue_sp_obj *o = params->q_obj;
4675  struct tx_queue_init_ramrod_data *rdata =
4676  (struct tx_queue_init_ramrod_data *)o->rdata;
4677  dma_addr_t data_mapping = o->rdata_mapping;
4678  int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4679  struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4680  &params->params.tx_only;
4681  u8 cid_index = tx_only_params->cid_index;
4682 
4683 
4684  if (cid_index >= o->max_cos) {
4685  BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4686  o->cl_id, cid_index);
4687  return -EINVAL;
4688  }
4689 
4690  DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4691  tx_only_params->gen_params.cos,
4692  tx_only_params->gen_params.spcl_id);
4693 
4694  /* Clear the ramrod data */
4695  memset(rdata, 0, sizeof(*rdata));
4696 
4697  /* Fill the ramrod data */
4698  bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4699 
4700  DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4701  o->cids[cid_index], rdata->general.client_id,
4702  rdata->general.sp_client_id, rdata->general.cos);
4703 
4704  /*
4705  * No need for an explicit memory barrier here as long we would
4706  * need to ensure the ordering of writing to the SPQ element
4707  * and updating of the SPQ producer which involves a memory
4708  * read and we will have to put a full memory barrier there
4709  * (inside bnx2x_sp_post()).
4710  */
4711 
4712  return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4713  U64_HI(data_mapping),
4714  U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4715 }
4716 
4717 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4718  struct bnx2x_queue_sp_obj *obj,
4719  struct bnx2x_queue_update_params *params,
4720  struct client_update_ramrod_data *data)
4721 {
4722  /* Client ID of the client to update */
4723  data->client_id = obj->cl_id;
4724 
4725  /* Function ID of the client to update */
4726  data->func_id = obj->func_id;
4727 
4728  /* Default VLAN value */
4729  data->default_vlan = cpu_to_le16(params->def_vlan);
4730 
4731  /* Inner VLAN stripping */
4736  &params->update_flags);
4737 
4738  /* Outer VLAN sripping */
4743  &params->update_flags);
4744 
4745  /* Drop packets that have source MAC that doesn't belong to this
4746  * Queue.
4747  */
4748  data->anti_spoofing_enable_flg =
4750  data->anti_spoofing_change_flg =
4752 
4753  /* Activate/Deactivate */
4754  data->activate_flg =
4756  data->activate_change_flg =
4758 
4759  /* Enable default VLAN */
4760  data->default_vlan_enable_flg =
4762  data->default_vlan_change_flg =
4764  &params->update_flags);
4765 
4766  /* silent vlan removal */
4767  data->silent_vlan_change_flg =
4769  &params->update_flags);
4770  data->silent_vlan_removal_flg =
4774 }
4775 
4776 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4777  struct bnx2x_queue_state_params *params)
4778 {
4779  struct bnx2x_queue_sp_obj *o = params->q_obj;
4780  struct client_update_ramrod_data *rdata =
4781  (struct client_update_ramrod_data *)o->rdata;
4782  dma_addr_t data_mapping = o->rdata_mapping;
4783  struct bnx2x_queue_update_params *update_params =
4784  &params->params.update;
4785  u8 cid_index = update_params->cid_index;
4786 
4787  if (cid_index >= o->max_cos) {
4788  BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4789  o->cl_id, cid_index);
4790  return -EINVAL;
4791  }
4792 
4793 
4794  /* Clear the ramrod data */
4795  memset(rdata, 0, sizeof(*rdata));
4796 
4797  /* Fill the ramrod data */
4798  bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4799 
4800  /*
4801  * No need for an explicit memory barrier here as long we would
4802  * need to ensure the ordering of writing to the SPQ element
4803  * and updating of the SPQ producer which involves a memory
4804  * read and we will have to put a full memory barrier there
4805  * (inside bnx2x_sp_post()).
4806  */
4807 
4809  o->cids[cid_index], U64_HI(data_mapping),
4810  U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4811 }
4812 
4821 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4822  struct bnx2x_queue_state_params *params)
4823 {
4824  struct bnx2x_queue_update_params *update = &params->params.update;
4825 
4826  memset(update, 0, sizeof(*update));
4827 
4829 
4830  return bnx2x_q_send_update(bp, params);
4831 }
4832 
4841 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4842  struct bnx2x_queue_state_params *params)
4843 {
4844  struct bnx2x_queue_update_params *update = &params->params.update;
4845 
4846  memset(update, 0, sizeof(*update));
4847 
4850 
4851  return bnx2x_q_send_update(bp, params);
4852 }
4853 
4854 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4855  struct bnx2x_queue_state_params *params)
4856 {
4857  /* TODO: Not implemented yet. */
4858  return -1;
4859 }
4860 
4861 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4862  struct bnx2x_queue_state_params *params)
4863 {
4864  struct bnx2x_queue_sp_obj *o = params->q_obj;
4865 
4867  o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4869 }
4870 
4871 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4872  struct bnx2x_queue_state_params *params)
4873 {
4874  struct bnx2x_queue_sp_obj *o = params->q_obj;
4875  u8 cid_idx = params->params.cfc_del.cid_index;
4876 
4877  if (cid_idx >= o->max_cos) {
4878  BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4879  o->cl_id, cid_idx);
4880  return -EINVAL;
4881  }
4882 
4884  o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4885 }
4886 
4887 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4888  struct bnx2x_queue_state_params *params)
4889 {
4890  struct bnx2x_queue_sp_obj *o = params->q_obj;
4891  u8 cid_index = params->params.terminate.cid_index;
4892 
4893  if (cid_index >= o->max_cos) {
4894  BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4895  o->cl_id, cid_index);
4896  return -EINVAL;
4897  }
4898 
4900  o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4901 }
4902 
4903 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4904  struct bnx2x_queue_state_params *params)
4905 {
4906  struct bnx2x_queue_sp_obj *o = params->q_obj;
4907 
4909  o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4911 }
4912 
4913 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4914  struct bnx2x_queue_state_params *params)
4915 {
4916  switch (params->cmd) {
4917  case BNX2X_Q_CMD_INIT:
4918  return bnx2x_q_init(bp, params);
4920  return bnx2x_q_send_setup_tx_only(bp, params);
4922  return bnx2x_q_send_deactivate(bp, params);
4923  case BNX2X_Q_CMD_ACTIVATE:
4924  return bnx2x_q_send_activate(bp, params);
4925  case BNX2X_Q_CMD_UPDATE:
4926  return bnx2x_q_send_update(bp, params);
4928  return bnx2x_q_send_update_tpa(bp, params);
4929  case BNX2X_Q_CMD_HALT:
4930  return bnx2x_q_send_halt(bp, params);
4931  case BNX2X_Q_CMD_CFC_DEL:
4932  return bnx2x_q_send_cfc_del(bp, params);
4933  case BNX2X_Q_CMD_TERMINATE:
4934  return bnx2x_q_send_terminate(bp, params);
4935  case BNX2X_Q_CMD_EMPTY:
4936  return bnx2x_q_send_empty(bp, params);
4937  default:
4938  BNX2X_ERR("Unknown command: %d\n", params->cmd);
4939  return -EINVAL;
4940  }
4941 }
4942 
4943 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4944  struct bnx2x_queue_state_params *params)
4945 {
4946  switch (params->cmd) {
4947  case BNX2X_Q_CMD_SETUP:
4948  return bnx2x_q_send_setup_e1x(bp, params);
4949  case BNX2X_Q_CMD_INIT:
4952  case BNX2X_Q_CMD_ACTIVATE:
4953  case BNX2X_Q_CMD_UPDATE:
4955  case BNX2X_Q_CMD_HALT:
4956  case BNX2X_Q_CMD_CFC_DEL:
4957  case BNX2X_Q_CMD_TERMINATE:
4958  case BNX2X_Q_CMD_EMPTY:
4959  return bnx2x_queue_send_cmd_cmn(bp, params);
4960  default:
4961  BNX2X_ERR("Unknown command: %d\n", params->cmd);
4962  return -EINVAL;
4963  }
4964 }
4965 
4966 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4967  struct bnx2x_queue_state_params *params)
4968 {
4969  switch (params->cmd) {
4970  case BNX2X_Q_CMD_SETUP:
4971  return bnx2x_q_send_setup_e2(bp, params);
4972  case BNX2X_Q_CMD_INIT:
4975  case BNX2X_Q_CMD_ACTIVATE:
4976  case BNX2X_Q_CMD_UPDATE:
4978  case BNX2X_Q_CMD_HALT:
4979  case BNX2X_Q_CMD_CFC_DEL:
4980  case BNX2X_Q_CMD_TERMINATE:
4981  case BNX2X_Q_CMD_EMPTY:
4982  return bnx2x_queue_send_cmd_cmn(bp, params);
4983  default:
4984  BNX2X_ERR("Unknown command: %d\n", params->cmd);
4985  return -EINVAL;
4986  }
4987 }
4988 
5005 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5006  struct bnx2x_queue_sp_obj *o,
5007  struct bnx2x_queue_state_params *params)
5008 {
5009  enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5010  enum bnx2x_queue_cmd cmd = params->cmd;
5011  struct bnx2x_queue_update_params *update_params =
5012  &params->params.update;
5013  u8 next_tx_only = o->num_tx_only;
5014 
5015  /*
5016  * Forget all pending for completion commands if a driver only state
5017  * transition has been requested.
5018  */
5019  if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5020  o->pending = 0;
5022  }
5023 
5024  /*
5025  * Don't allow a next state transition if we are in the middle of
5026  * the previous one.
5027  */
5028  if (o->pending)
5029  return -EBUSY;
5030 
5031  switch (state) {
5032  case BNX2X_Q_STATE_RESET:
5033  if (cmd == BNX2X_Q_CMD_INIT)
5035 
5036  break;
5038  if (cmd == BNX2X_Q_CMD_SETUP) {
5040  &params->params.setup.flags))
5042  else
5044  }
5045 
5046  break;
5047  case BNX2X_Q_STATE_ACTIVE:
5048  if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5050 
5051  else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5052  (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5054 
5055  else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5057  next_tx_only = 1;
5058  }
5059 
5060  else if (cmd == BNX2X_Q_CMD_HALT)
5062 
5063  else if (cmd == BNX2X_Q_CMD_UPDATE) {
5064  /* If "active" state change is requested, update the
5065  * state accordingly.
5066  */
5068  &update_params->update_flags) &&
5070  &update_params->update_flags))
5072  else
5074  }
5075 
5076  break;
5078  if (cmd == BNX2X_Q_CMD_TERMINATE)
5080 
5081  else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5083  next_tx_only = o->num_tx_only + 1;
5084  }
5085 
5086  else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5087  (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5089 
5090  else if (cmd == BNX2X_Q_CMD_UPDATE) {
5091  /* If "active" state change is requested, update the
5092  * state accordingly.
5093  */
5095  &update_params->update_flags) &&
5097  &update_params->update_flags))
5099  else
5101  }
5102 
5103  break;
5105  if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5106  next_tx_only = o->num_tx_only - 1;
5107  if (next_tx_only == 0)
5109  else
5111  }
5112 
5113  break;
5115  if (cmd == BNX2X_Q_CMD_ACTIVATE)
5117 
5118  else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5119  (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5121 
5122  else if (cmd == BNX2X_Q_CMD_HALT)
5124 
5125  else if (cmd == BNX2X_Q_CMD_UPDATE) {
5126  /* If "active" state change is requested, update the
5127  * state accordingly.
5128  */
5130  &update_params->update_flags) &&
5132  &update_params->update_flags)){
5133  if (o->num_tx_only == 0)
5135  else /* tx only queues exist for this queue */
5137  } else
5139  }
5140 
5141  break;
5142  case BNX2X_Q_STATE_STOPPED:
5143  if (cmd == BNX2X_Q_CMD_TERMINATE)
5145 
5146  break;
5148  if (cmd == BNX2X_Q_CMD_CFC_DEL)
5150 
5151  break;
5152  default:
5153  BNX2X_ERR("Illegal state: %d\n", state);
5154  }
5155 
5156  /* Transition is assured */
5157  if (next_state != BNX2X_Q_STATE_MAX) {
5158  DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5159  state, cmd, next_state);
5160  o->next_state = next_state;
5161  o->next_tx_only = next_tx_only;
5162  return 0;
5163  }
5164 
5165  DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5166 
5167  return -EINVAL;
5168 }
5169 
5170 void bnx2x_init_queue_obj(struct bnx2x *bp,
5171  struct bnx2x_queue_sp_obj *obj,
5172  u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5173  void *rdata,
5174  dma_addr_t rdata_mapping, unsigned long type)
5175 {
5176  memset(obj, 0, sizeof(*obj));
5177 
5178  /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5179  BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5180 
5181  memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5182  obj->max_cos = cid_cnt;
5183  obj->cl_id = cl_id;
5184  obj->func_id = func_id;
5185  obj->rdata = rdata;
5186  obj->rdata_mapping = rdata_mapping;
5187  obj->type = type;
5189 
5190  if (CHIP_IS_E1x(bp))
5191  obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5192  else
5193  obj->send_cmd = bnx2x_queue_send_cmd_e2;
5194 
5195  obj->check_transition = bnx2x_queue_chk_transition;
5196 
5197  obj->complete_cmd = bnx2x_queue_comp_cmd;
5198  obj->wait_comp = bnx2x_queue_wait_comp;
5199  obj->set_pending = bnx2x_queue_set_pending;
5200 }
5201 
5202 /********************** Function state object *********************************/
5204  struct bnx2x_func_sp_obj *o)
5205 {
5206  /* in the middle of transaction - return INVALID state */
5207  if (o->pending)
5208  return BNX2X_F_STATE_MAX;
5209 
5210  /*
5211  * unsure the order of reading of o->pending and o->state
5212  * o->pending should be read first
5213  */
5214  rmb();
5215 
5216  return o->state;
5217 }
5218 
5219 static int bnx2x_func_wait_comp(struct bnx2x *bp,
5220  struct bnx2x_func_sp_obj *o,
5221  enum bnx2x_func_cmd cmd)
5222 {
5223  return bnx2x_state_wait(bp, cmd, &o->pending);
5224 }
5225 
5236 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5237  struct bnx2x_func_sp_obj *o,
5238  enum bnx2x_func_cmd cmd)
5239 {
5240  unsigned long cur_pending = o->pending;
5241 
5242  if (!test_and_clear_bit(cmd, &cur_pending)) {
5243  BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5244  cmd, BP_FUNC(bp), o->state,
5245  cur_pending, o->next_state);
5246  return -EINVAL;
5247  }
5248 
5249  DP(BNX2X_MSG_SP,
5250  "Completing command %d for func %d, setting state to %d\n",
5251  cmd, BP_FUNC(bp), o->next_state);
5252 
5253  o->state = o->next_state;
5255 
5256  /* It's important that o->state and o->next_state are
5257  * updated before o->pending.
5258  */
5259  wmb();
5260 
5261  clear_bit(cmd, &o->pending);
5263 
5264  return 0;
5265 }
5266 
5276 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5277  struct bnx2x_func_sp_obj *o,
5278  enum bnx2x_func_cmd cmd)
5279 {
5280  /* Complete the state machine part first, check if it's a
5281  * legal completion.
5282  */
5283  int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5284  return rc;
5285 }
5286 
5302 static int bnx2x_func_chk_transition(struct bnx2x *bp,
5303  struct bnx2x_func_sp_obj *o,
5304  struct bnx2x_func_state_params *params)
5305 {
5306  enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5307  enum bnx2x_func_cmd cmd = params->cmd;
5308 
5309  /*
5310  * Forget all pending for completion commands if a driver only state
5311  * transition has been requested.
5312  */
5313  if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5314  o->pending = 0;
5316  }
5317 
5318  /*
5319  * Don't allow a next state transition if we are in the middle of
5320  * the previous one.
5321  */
5322  if (o->pending)
5323  return -EBUSY;
5324 
5325  switch (state) {
5326  case BNX2X_F_STATE_RESET:
5327  if (cmd == BNX2X_F_CMD_HW_INIT)
5329 
5330  break;
5332  if (cmd == BNX2X_F_CMD_START)
5334 
5335  else if (cmd == BNX2X_F_CMD_HW_RESET)
5337 
5338  break;
5339  case BNX2X_F_STATE_STARTED:
5340  if (cmd == BNX2X_F_CMD_STOP)
5342  /* afex ramrods can be sent only in started mode, and only
5343  * if not pending for function_stop ramrod completion
5344  * for these events - next state remained STARTED.
5345  */
5346  else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5349 
5350  else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5353  else if (cmd == BNX2X_F_CMD_TX_STOP)
5355 
5356  break;
5358  if (cmd == BNX2X_F_CMD_TX_START)
5360 
5361  break;
5362  default:
5363  BNX2X_ERR("Unknown state: %d\n", state);
5364  }
5365 
5366  /* Transition is assured */
5367  if (next_state != BNX2X_F_STATE_MAX) {
5368  DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5369  state, cmd, next_state);
5370  o->next_state = next_state;
5371  return 0;
5372  }
5373 
5374  DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5375  state, cmd);
5376 
5377  return -EINVAL;
5378 }
5379 
5390 static inline int bnx2x_func_init_func(struct bnx2x *bp,
5391  const struct bnx2x_func_sp_drv_ops *drv)
5392 {
5393  return drv->init_hw_func(bp);
5394 }
5395 
5407 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5408  const struct bnx2x_func_sp_drv_ops *drv)
5409 {
5410  int rc = drv->init_hw_port(bp);
5411  if (rc)
5412  return rc;
5413 
5414  return bnx2x_func_init_func(bp, drv);
5415 }
5416 
5427 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5428  const struct bnx2x_func_sp_drv_ops *drv)
5429 {
5430  int rc = drv->init_hw_cmn_chip(bp);
5431  if (rc)
5432  return rc;
5433 
5434  return bnx2x_func_init_port(bp, drv);
5435 }
5436 
5447 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5448  const struct bnx2x_func_sp_drv_ops *drv)
5449 {
5450  int rc = drv->init_hw_cmn(bp);
5451  if (rc)
5452  return rc;
5453 
5454  return bnx2x_func_init_port(bp, drv);
5455 }
5456 
5457 static int bnx2x_func_hw_init(struct bnx2x *bp,
5458  struct bnx2x_func_state_params *params)
5459 {
5460  u32 load_code = params->params.hw_init.load_phase;
5461  struct bnx2x_func_sp_obj *o = params->f_obj;
5462  const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5463  int rc = 0;
5464 
5465  DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5466  BP_ABS_FUNC(bp), load_code);
5467 
5468  /* Prepare buffers for unzipping the FW */
5469  rc = drv->gunzip_init(bp);
5470  if (rc)
5471  return rc;
5472 
5473  /* Prepare FW */
5474  rc = drv->init_fw(bp);
5475  if (rc) {
5476  BNX2X_ERR("Error loading firmware\n");
5477  goto init_err;
5478  }
5479 
5480  /* Handle the beginning of COMMON_XXX pases separatelly... */
5481  switch (load_code) {
5483  rc = bnx2x_func_init_cmn_chip(bp, drv);
5484  if (rc)
5485  goto init_err;
5486 
5487  break;
5489  rc = bnx2x_func_init_cmn(bp, drv);
5490  if (rc)
5491  goto init_err;
5492 
5493  break;
5495  rc = bnx2x_func_init_port(bp, drv);
5496  if (rc)
5497  goto init_err;
5498 
5499  break;
5501  rc = bnx2x_func_init_func(bp, drv);
5502  if (rc)
5503  goto init_err;
5504 
5505  break;
5506  default:
5507  BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5508  rc = -EINVAL;
5509  }
5510 
5511 init_err:
5512  drv->gunzip_end(bp);
5513 
5514  /* In case of success, complete the comand immediatelly: no ramrods
5515  * have been sent.
5516  */
5517  if (!rc)
5518  o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5519 
5520  return rc;
5521 }
5522 
5532 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5533  const struct bnx2x_func_sp_drv_ops *drv)
5534 {
5535  drv->reset_hw_func(bp);
5536 }
5537 
5553 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5554  const struct bnx2x_func_sp_drv_ops *drv)
5555 {
5556  drv->reset_hw_port(bp);
5557  bnx2x_func_reset_func(bp, drv);
5558 }
5559 
5570 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5571  const struct bnx2x_func_sp_drv_ops *drv)
5572 {
5573  bnx2x_func_reset_port(bp, drv);
5574  drv->reset_hw_cmn(bp);
5575 }
5576 
5577 
5578 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5579  struct bnx2x_func_state_params *params)
5580 {
5581  u32 reset_phase = params->params.hw_reset.reset_phase;
5582  struct bnx2x_func_sp_obj *o = params->f_obj;
5583  const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5584 
5585  DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5586  reset_phase);
5587 
5588  switch (reset_phase) {
5590  bnx2x_func_reset_cmn(bp, drv);
5591  break;
5593  bnx2x_func_reset_port(bp, drv);
5594  break;
5596  bnx2x_func_reset_func(bp, drv);
5597  break;
5598  default:
5599  BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5600  reset_phase);
5601  break;
5602  }
5603 
5604  /* Complete the comand immediatelly: no ramrods have been sent. */
5606 
5607  return 0;
5608 }
5609 
5610 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5611  struct bnx2x_func_state_params *params)
5612 {
5613  struct bnx2x_func_sp_obj *o = params->f_obj;
5614  struct function_start_data *rdata =
5615  (struct function_start_data *)o->rdata;
5616  dma_addr_t data_mapping = o->rdata_mapping;
5617  struct bnx2x_func_start_params *start_params = &params->params.start;
5618 
5619  memset(rdata, 0, sizeof(*rdata));
5620 
5621  /* Fill the ramrod data with provided parameters */
5622  rdata->function_mode = (u8)start_params->mf_mode;
5623  rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5624  rdata->path_id = BP_PATH(bp);
5625  rdata->network_cos_mode = start_params->network_cos_mode;
5626 
5627  /*
5628  * No need for an explicit memory barrier here as long we would
5629  * need to ensure the ordering of writing to the SPQ element
5630  * and updating of the SPQ producer which involves a memory
5631  * read and we will have to put a full memory barrier there
5632  * (inside bnx2x_sp_post()).
5633  */
5634 
5636  U64_HI(data_mapping),
5637  U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5638 }
5639 
5640 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5641  struct bnx2x_func_state_params *params)
5642 {
5643  struct bnx2x_func_sp_obj *o = params->f_obj;
5644  struct function_update_data *rdata =
5645  (struct function_update_data *)o->afex_rdata;
5646  dma_addr_t data_mapping = o->afex_rdata_mapping;
5647  struct bnx2x_func_afex_update_params *afex_update_params =
5648  &params->params.afex_update;
5649 
5650  memset(rdata, 0, sizeof(*rdata));
5651 
5652  /* Fill the ramrod data with provided parameters */
5653  rdata->vif_id_change_flg = 1;
5654  rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5655  rdata->afex_default_vlan_change_flg = 1;
5656  rdata->afex_default_vlan =
5657  cpu_to_le16(afex_update_params->afex_default_vlan);
5658  rdata->allowed_priorities_change_flg = 1;
5659  rdata->allowed_priorities = afex_update_params->allowed_priorities;
5660 
5661  /* No need for an explicit memory barrier here as long we would
5662  * need to ensure the ordering of writing to the SPQ element
5663  * and updating of the SPQ producer which involves a memory
5664  * read and we will have to put a full memory barrier there
5665  * (inside bnx2x_sp_post()).
5666  */
5667  DP(BNX2X_MSG_SP,
5668  "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5669  rdata->vif_id,
5670  rdata->afex_default_vlan, rdata->allowed_priorities);
5671 
5673  U64_HI(data_mapping),
5674  U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5675 }
5676 
5677 static
5678 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5679  struct bnx2x_func_state_params *params)
5680 {
5681  struct bnx2x_func_sp_obj *o = params->f_obj;
5682  struct afex_vif_list_ramrod_data *rdata =
5684  struct bnx2x_func_afex_viflists_params *afex_viflist_params =
5685  &params->params.afex_viflists;
5686  u64 *p_rdata = (u64 *)rdata;
5687 
5688  memset(rdata, 0, sizeof(*rdata));
5689 
5690  /* Fill the ramrod data with provided parameters */
5691  rdata->vif_list_index = afex_viflist_params->vif_list_index;
5692  rdata->func_bit_map = afex_viflist_params->func_bit_map;
5693  rdata->afex_vif_list_command =
5694  afex_viflist_params->afex_vif_list_command;
5695  rdata->func_to_clear = afex_viflist_params->func_to_clear;
5696 
5697  /* send in echo type of sub command */
5698  rdata->echo = afex_viflist_params->afex_vif_list_command;
5699 
5700  /* No need for an explicit memory barrier here as long we would
5701  * need to ensure the ordering of writing to the SPQ element
5702  * and updating of the SPQ producer which involves a memory
5703  * read and we will have to put a full memory barrier there
5704  * (inside bnx2x_sp_post()).
5705  */
5706 
5707  DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5708  rdata->afex_vif_list_command, rdata->vif_list_index,
5709  rdata->func_bit_map, rdata->func_to_clear);
5710 
5711  /* this ramrod sends data directly and not through DMA mapping */
5713  U64_HI(*p_rdata), U64_LO(*p_rdata),
5715 }
5716 
5717 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5718  struct bnx2x_func_state_params *params)
5719 {
5722 }
5723 
5724 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5725  struct bnx2x_func_state_params *params)
5726 {
5729 }
5730 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5731  struct bnx2x_func_state_params *params)
5732 {
5733  struct bnx2x_func_sp_obj *o = params->f_obj;
5734  struct flow_control_configuration *rdata =
5735  (struct flow_control_configuration *)o->rdata;
5736  dma_addr_t data_mapping = o->rdata_mapping;
5737  struct bnx2x_func_tx_start_params *tx_start_params =
5738  &params->params.tx_start;
5739  int i;
5740 
5741  memset(rdata, 0, sizeof(*rdata));
5742 
5743  rdata->dcb_enabled = tx_start_params->dcb_enabled;
5744  rdata->dcb_version = tx_start_params->dcb_version;
5745  rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5746 
5747  for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5748  rdata->traffic_type_to_priority_cos[i] =
5749  tx_start_params->traffic_type_to_priority_cos[i];
5750 
5752  U64_HI(data_mapping),
5753  U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5754 }
5755 
5756 static int bnx2x_func_send_cmd(struct bnx2x *bp,
5757  struct bnx2x_func_state_params *params)
5758 {
5759  switch (params->cmd) {
5760  case BNX2X_F_CMD_HW_INIT:
5761  return bnx2x_func_hw_init(bp, params);
5762  case BNX2X_F_CMD_START:
5763  return bnx2x_func_send_start(bp, params);
5764  case BNX2X_F_CMD_STOP:
5765  return bnx2x_func_send_stop(bp, params);
5766  case BNX2X_F_CMD_HW_RESET:
5767  return bnx2x_func_hw_reset(bp, params);
5769  return bnx2x_func_send_afex_update(bp, params);
5771  return bnx2x_func_send_afex_viflists(bp, params);
5772  case BNX2X_F_CMD_TX_STOP:
5773  return bnx2x_func_send_tx_stop(bp, params);
5774  case BNX2X_F_CMD_TX_START:
5775  return bnx2x_func_send_tx_start(bp, params);
5776  default:
5777  BNX2X_ERR("Unknown command: %d\n", params->cmd);
5778  return -EINVAL;
5779  }
5780 }
5781 
5782 void bnx2x_init_func_obj(struct bnx2x *bp,
5783  struct bnx2x_func_sp_obj *obj,
5784  void *rdata, dma_addr_t rdata_mapping,
5785  void *afex_rdata, dma_addr_t afex_rdata_mapping,
5786  struct bnx2x_func_sp_drv_ops *drv_iface)
5787 {
5788  memset(obj, 0, sizeof(*obj));
5789 
5791 
5792  obj->rdata = rdata;
5793  obj->rdata_mapping = rdata_mapping;
5794  obj->afex_rdata = afex_rdata;
5795  obj->afex_rdata_mapping = afex_rdata_mapping;
5796  obj->send_cmd = bnx2x_func_send_cmd;
5797  obj->check_transition = bnx2x_func_chk_transition;
5798  obj->complete_cmd = bnx2x_func_comp_cmd;
5799  obj->wait_comp = bnx2x_func_wait_comp;
5800 
5801  obj->drv = drv_iface;
5802 }
5803 
5818  struct bnx2x_func_state_params *params)
5819 {
5820  struct bnx2x_func_sp_obj *o = params->f_obj;
5821  int rc;
5822  enum bnx2x_func_cmd cmd = params->cmd;
5823  unsigned long *pending = &o->pending;
5824 
5826 
5827  /* Check that the requested transition is legal */
5828  if (o->check_transition(bp, o, params)) {
5830  return -EINVAL;
5831  }
5832 
5833  /* Set "pending" bit */
5834  set_bit(cmd, pending);
5835 
5836  /* Don't send a command if only driver cleanup was requested */
5837  if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5838  bnx2x_func_state_change_comp(bp, o, cmd);
5840  } else {
5841  /* Send a ramrod */
5842  rc = o->send_cmd(bp, params);
5843 
5845 
5846  if (rc) {
5848  clear_bit(cmd, pending);
5850  return rc;
5851  }
5852 
5853  if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5854  rc = o->wait_comp(bp, o, cmd);
5855  if (rc)
5856  return rc;
5857 
5858  return 0;
5859  }
5860  }
5861