Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
be_cmds.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation. The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17 
18 #include <linux/module.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 
22 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
23 {
24  return wrb->payload.embedded_payload;
25 }
26 
27 static void be_mcc_notify(struct be_adapter *adapter)
28 {
29  struct be_queue_info *mccq = &adapter->mcc_obj.q;
30  u32 val = 0;
31 
32  if (be_error(adapter))
33  return;
34 
35  val |= mccq->id & DB_MCCQ_RING_ID_MASK;
36  val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
37 
38  wmb();
39  iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
40 }
41 
42 /* To check if valid bit is set, check the entire word as we don't know
43  * the endianness of the data (old entry is host endian while a new entry is
44  * little endian) */
45 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
46 {
47  if (compl->flags != 0) {
48  compl->flags = le32_to_cpu(compl->flags);
49  BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
50  return true;
51  } else {
52  return false;
53  }
54 }
55 
56 /* Need to reset the entire word that houses the valid bit */
57 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
58 {
59  compl->flags = 0;
60 }
61 
62 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
63 {
64  unsigned long addr;
65 
66  addr = tag1;
67  addr = ((addr << 16) << 16) | tag0;
68  return (void *)addr;
69 }
70 
71 static int be_mcc_compl_process(struct be_adapter *adapter,
72  struct be_mcc_compl *compl)
73 {
74  u16 compl_status, extd_status;
75  struct be_cmd_resp_hdr *resp_hdr;
76  u8 opcode = 0, subsystem = 0;
77 
78  /* Just swap the status to host endian; mcc tag is opaquely copied
79  * from mcc_wrb */
80  be_dws_le_to_cpu(compl, 4);
81 
82  compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
84 
85  resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
86 
87  if (resp_hdr) {
88  opcode = resp_hdr->opcode;
89  subsystem = resp_hdr->subsystem;
90  }
91 
92  if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
93  (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
95  adapter->flash_status = compl_status;
96  complete(&adapter->flash_compl);
97  }
98 
99  if (compl_status == MCC_STATUS_SUCCESS) {
100  if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
101  (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
103  be_parse_stats(adapter);
104  adapter->stats_cmd_sent = false;
105  }
109  (void *)resp_hdr;
110  adapter->drv_stats.be_on_die_temperature =
111  resp->on_die_temperature;
112  }
113  } else {
115  adapter->be_get_temp_freq = 0;
116 
117  if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
118  compl_status == MCC_STATUS_ILLEGAL_REQUEST)
119  goto done;
120 
121  if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
122  dev_warn(&adapter->pdev->dev,
123  "VF is not privileged to issue opcode %d-%d\n",
124  opcode, subsystem);
125  } else {
126  extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
128  dev_err(&adapter->pdev->dev,
129  "opcode %d-%d failed:status %d-%d\n",
130  opcode, subsystem, compl_status, extd_status);
131  }
132  }
133 done:
134  return compl_status;
135 }
136 
137 /* Link state evt is a string of bytes; no need for endian swapping */
138 static void be_async_link_state_process(struct be_adapter *adapter,
140 {
141  /* When link status changes, link speed must be re-queried from FW */
142  adapter->phy.link_speed = -1;
143 
144  /* Ignore physical link event */
145  if (lancer_chip(adapter) &&
147  return;
148 
149  /* For the initial link status do not rely on the ASYNC event as
150  * it may not be received in some cases.
151  */
152  if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
154 }
155 
156 /* Grp5 CoS Priority evt */
157 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
159 {
160  if (evt->valid) {
161  adapter->vlan_prio_bmap = evt->available_priority_bmap;
162  adapter->recommended_prio &= ~VLAN_PRIO_MASK;
163  adapter->recommended_prio =
165  }
166 }
167 
168 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
169 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
171 {
172  if (adapter->phy.link_speed >= 0 &&
173  evt->physical_port == adapter->port_num)
174  adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
175 }
176 
177 /*Grp5 PVID evt*/
178 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
179  struct be_async_event_grp5_pvid_state *evt)
180 {
181  if (evt->enabled)
182  adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
183  else
184  adapter->pvid = 0;
185 }
186 
187 static void be_async_grp5_evt_process(struct be_adapter *adapter,
188  u32 trailer, struct be_mcc_compl *evt)
189 {
190  u8 event_type = 0;
191 
192  event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
194 
195  switch (event_type) {
197  be_async_grp5_cos_priority_process(adapter,
198  (struct be_async_event_grp5_cos_priority *)evt);
199  break;
201  be_async_grp5_qos_speed_process(adapter,
202  (struct be_async_event_grp5_qos_link_speed *)evt);
203  break;
205  be_async_grp5_pvid_state_process(adapter,
206  (struct be_async_event_grp5_pvid_state *)evt);
207  break;
208  default:
209  dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
210  break;
211  }
212 }
213 
214 static inline bool is_link_state_evt(u32 trailer)
215 {
216  return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
219 }
220 
221 static inline bool is_grp5_evt(u32 trailer)
222 {
223  return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
226 }
227 
228 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
229 {
230  struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
231  struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
232 
233  if (be_mcc_compl_is_new(compl)) {
234  queue_tail_inc(mcc_cq);
235  return compl;
236  }
237  return NULL;
238 }
239 
240 void be_async_mcc_enable(struct be_adapter *adapter)
241 {
242  spin_lock_bh(&adapter->mcc_cq_lock);
243 
244  be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
245  adapter->mcc_obj.rearm_cq = true;
246 
247  spin_unlock_bh(&adapter->mcc_cq_lock);
248 }
249 
250 void be_async_mcc_disable(struct be_adapter *adapter)
251 {
252  adapter->mcc_obj.rearm_cq = false;
253 }
254 
255 int be_process_mcc(struct be_adapter *adapter)
256 {
257  struct be_mcc_compl *compl;
258  int num = 0, status = 0;
259  struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
260 
261  spin_lock(&adapter->mcc_cq_lock);
262  while ((compl = be_mcc_compl_get(adapter))) {
263  if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
264  /* Interpret flags as an async trailer */
265  if (is_link_state_evt(compl->flags))
266  be_async_link_state_process(adapter,
267  (struct be_async_event_link_state *) compl);
268  else if (is_grp5_evt(compl->flags))
269  be_async_grp5_evt_process(adapter,
270  compl->flags, compl);
271  } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
272  status = be_mcc_compl_process(adapter, compl);
273  atomic_dec(&mcc_obj->q.used);
274  }
275  be_mcc_compl_use(compl);
276  num++;
277  }
278 
279  if (num)
280  be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
281 
282  spin_unlock(&adapter->mcc_cq_lock);
283  return status;
284 }
285 
286 /* Wait till no more pending mcc requests are present */
287 static int be_mcc_wait_compl(struct be_adapter *adapter)
288 {
289 #define mcc_timeout 120000 /* 12s timeout */
290  int i, status = 0;
291  struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
292 
293  for (i = 0; i < mcc_timeout; i++) {
294  if (be_error(adapter))
295  return -EIO;
296 
298  status = be_process_mcc(adapter);
299  local_bh_enable();
300 
301  if (atomic_read(&mcc_obj->q.used) == 0)
302  break;
303  udelay(100);
304  }
305  if (i == mcc_timeout) {
306  dev_err(&adapter->pdev->dev, "FW not responding\n");
307  adapter->fw_timeout = true;
308  return -EIO;
309  }
310  return status;
311 }
312 
313 /* Notify MCC requests and wait for completion */
314 static int be_mcc_notify_wait(struct be_adapter *adapter)
315 {
316  int status;
317  struct be_mcc_wrb *wrb;
318  struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
319  u16 index = mcc_obj->q.head;
320  struct be_cmd_resp_hdr *resp;
321 
322  index_dec(&index, mcc_obj->q.len);
323  wrb = queue_index_node(&mcc_obj->q, index);
324 
325  resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
326 
327  be_mcc_notify(adapter);
328 
329  status = be_mcc_wait_compl(adapter);
330  if (status == -EIO)
331  goto out;
332 
333  status = resp->status;
334 out:
335  return status;
336 }
337 
338 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
339 {
340  int msecs = 0;
341  u32 ready;
342 
343  do {
344  if (be_error(adapter))
345  return -EIO;
346 
347  ready = ioread32(db);
348  if (ready == 0xffffffff)
349  return -1;
350 
351  ready &= MPU_MAILBOX_DB_RDY_MASK;
352  if (ready)
353  break;
354 
355  if (msecs > 4000) {
356  dev_err(&adapter->pdev->dev, "FW not responding\n");
357  adapter->fw_timeout = true;
358  be_detect_error(adapter);
359  return -1;
360  }
361 
362  msleep(1);
363  msecs++;
364  } while (true);
365 
366  return 0;
367 }
368 
369 /*
370  * Insert the mailbox address into the doorbell in two steps
371  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
372  */
373 static int be_mbox_notify_wait(struct be_adapter *adapter)
374 {
375  int status;
376  u32 val = 0;
377  void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
378  struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
379  struct be_mcc_mailbox *mbox = mbox_mem->va;
380  struct be_mcc_compl *compl = &mbox->compl;
381 
382  /* wait for ready to be set */
383  status = be_mbox_db_ready_wait(adapter, db);
384  if (status != 0)
385  return status;
386 
387  val |= MPU_MAILBOX_DB_HI_MASK;
388  /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
389  val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
390  iowrite32(val, db);
391 
392  /* wait for ready to be set */
393  status = be_mbox_db_ready_wait(adapter, db);
394  if (status != 0)
395  return status;
396 
397  val = 0;
398  /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
399  val |= (u32)(mbox_mem->dma >> 4) << 2;
400  iowrite32(val, db);
401 
402  status = be_mbox_db_ready_wait(adapter, db);
403  if (status != 0)
404  return status;
405 
406  /* A cq entry has been made now */
407  if (be_mcc_compl_is_new(compl)) {
408  status = be_mcc_compl_process(adapter, &mbox->compl);
409  be_mcc_compl_use(compl);
410  if (status)
411  return status;
412  } else {
413  dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
414  return -1;
415  }
416  return 0;
417 }
418 
419 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
420 {
421  u32 sem;
422 
423  if (lancer_chip(adapter))
424  sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
425  else
426  sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
427 
428  *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
430  return -1;
431  else
432  return 0;
433 }
434 
435 int lancer_wait_ready(struct be_adapter *adapter)
436 {
437 #define SLIPORT_READY_TIMEOUT 30
438  u32 sliport_status;
439  int status = 0, i;
440 
441  for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
442  sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
443  if (sliport_status & SLIPORT_STATUS_RDY_MASK)
444  break;
445 
446  msleep(1000);
447  }
448 
449  if (i == SLIPORT_READY_TIMEOUT)
450  status = -1;
451 
452  return status;
453 }
454 
456 {
457  int status;
458  u32 sliport_status, err, reset_needed;
459  status = lancer_wait_ready(adapter);
460  if (!status) {
461  sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
462  err = sliport_status & SLIPORT_STATUS_ERR_MASK;
463  reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
464  if (err && reset_needed) {
466  adapter->db + SLIPORT_CONTROL_OFFSET);
467 
468  /* check adapter has corrected the error */
469  status = lancer_wait_ready(adapter);
470  sliport_status = ioread32(adapter->db +
472  sliport_status &= (SLIPORT_STATUS_ERR_MASK |
474  if (status || sliport_status)
475  status = -1;
476  } else if (err || reset_needed) {
477  status = -1;
478  }
479  }
480  return status;
481 }
482 
483 int be_fw_wait_ready(struct be_adapter *adapter)
484 {
485  u16 stage;
486  int status, timeout = 0;
487  struct device *dev = &adapter->pdev->dev;
488 
489  if (lancer_chip(adapter)) {
490  status = lancer_wait_ready(adapter);
491  return status;
492  }
493 
494  do {
495  status = be_POST_stage_get(adapter, &stage);
496  if (status) {
497  dev_err(dev, "POST error; stage=0x%x\n", stage);
498  return -1;
499  } else if (stage != POST_STAGE_ARMFW_RDY) {
500  if (msleep_interruptible(2000)) {
501  dev_err(dev, "Waiting for POST aborted\n");
502  return -EINTR;
503  }
504  timeout += 2;
505  } else {
506  return 0;
507  }
508  } while (timeout < 60);
509 
510  dev_err(dev, "POST timeout; stage=0x%x\n", stage);
511  return -1;
512 }
513 
514 
515 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
516 {
517  return &wrb->payload.sgl[0];
518 }
519 
520 
521 /* Don't touch the hdr after it's prepared */
522 /* mem will be NULL for embedded commands */
523 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
524  u8 subsystem, u8 opcode, int cmd_len,
525  struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
526 {
527  struct be_sge *sge;
528  unsigned long addr = (unsigned long)req_hdr;
529  u64 req_addr = addr;
530 
531  req_hdr->opcode = opcode;
532  req_hdr->subsystem = subsystem;
533  req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
534  req_hdr->version = 0;
535 
536  wrb->tag0 = req_addr & 0xFFFFFFFF;
537  wrb->tag1 = upper_32_bits(req_addr);
538 
539  wrb->payload_length = cmd_len;
540  if (mem) {
541  wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
543  sge = nonembedded_sgl(wrb);
544  sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
545  sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
546  sge->len = cpu_to_le32(mem->size);
547  } else
549  be_dws_cpu_to_le(wrb, 8);
550 }
551 
552 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
553  struct be_dma_mem *mem)
554 {
555  int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
556  u64 dma = (u64)mem->dma;
557 
558  for (i = 0; i < buf_pages; i++) {
559  pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
560  pages[i].hi = cpu_to_le32(upper_32_bits(dma));
561  dma += PAGE_SIZE_4K;
562  }
563 }
564 
565 /* Converts interrupt delay in microseconds to multiplier value */
566 static u32 eq_delay_to_mult(u32 usec_delay)
567 {
568 #define MAX_INTR_RATE 651042
569  const u32 round = 10;
570  u32 multiplier;
571 
572  if (usec_delay == 0)
573  multiplier = 0;
574  else {
575  u32 interrupt_rate = 1000000 / usec_delay;
576  /* Max delay, corresponding to the lowest interrupt rate */
577  if (interrupt_rate == 0)
578  multiplier = 1023;
579  else {
580  multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
581  multiplier /= interrupt_rate;
582  /* Round the multiplier to the closest value.*/
583  multiplier = (multiplier + round/2) / round;
584  multiplier = min(multiplier, (u32)1023);
585  }
586  }
587  return multiplier;
588 }
589 
590 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
591 {
592  struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
593  struct be_mcc_wrb *wrb
594  = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
595  memset(wrb, 0, sizeof(*wrb));
596  return wrb;
597 }
598 
599 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
600 {
601  struct be_queue_info *mccq = &adapter->mcc_obj.q;
602  struct be_mcc_wrb *wrb;
603 
604  if (atomic_read(&mccq->used) >= mccq->len) {
605  dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
606  return NULL;
607  }
608 
609  wrb = queue_head_node(mccq);
610  queue_head_inc(mccq);
611  atomic_inc(&mccq->used);
612  memset(wrb, 0, sizeof(*wrb));
613  return wrb;
614 }
615 
616 /* Tell fw we're about to start firing cmds by writing a
617  * special pattern across the wrb hdr; uses mbox
618  */
619 int be_cmd_fw_init(struct be_adapter *adapter)
620 {
621  u8 *wrb;
622  int status;
623 
624  if (lancer_chip(adapter))
625  return 0;
626 
627  if (mutex_lock_interruptible(&adapter->mbox_lock))
628  return -1;
629 
630  wrb = (u8 *)wrb_from_mbox(adapter);
631  *wrb++ = 0xFF;
632  *wrb++ = 0x12;
633  *wrb++ = 0x34;
634  *wrb++ = 0xFF;
635  *wrb++ = 0xFF;
636  *wrb++ = 0x56;
637  *wrb++ = 0x78;
638  *wrb = 0xFF;
639 
640  status = be_mbox_notify_wait(adapter);
641 
642  mutex_unlock(&adapter->mbox_lock);
643  return status;
644 }
645 
646 /* Tell fw we're done with firing cmds by writing a
647  * special pattern across the wrb hdr; uses mbox
648  */
649 int be_cmd_fw_clean(struct be_adapter *adapter)
650 {
651  u8 *wrb;
652  int status;
653 
654  if (lancer_chip(adapter))
655  return 0;
656 
657  if (mutex_lock_interruptible(&adapter->mbox_lock))
658  return -1;
659 
660  wrb = (u8 *)wrb_from_mbox(adapter);
661  *wrb++ = 0xFF;
662  *wrb++ = 0xAA;
663  *wrb++ = 0xBB;
664  *wrb++ = 0xFF;
665  *wrb++ = 0xFF;
666  *wrb++ = 0xCC;
667  *wrb++ = 0xDD;
668  *wrb = 0xFF;
669 
670  status = be_mbox_notify_wait(adapter);
671 
672  mutex_unlock(&adapter->mbox_lock);
673  return status;
674 }
675 
676 int be_cmd_eq_create(struct be_adapter *adapter,
677  struct be_queue_info *eq, int eq_delay)
678 {
679  struct be_mcc_wrb *wrb;
680  struct be_cmd_req_eq_create *req;
681  struct be_dma_mem *q_mem = &eq->dma_mem;
682  int status;
683 
684  if (mutex_lock_interruptible(&adapter->mbox_lock))
685  return -1;
686 
687  wrb = wrb_from_mbox(adapter);
688  req = embedded_payload(wrb);
689 
690  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
691  OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
692 
693  req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
694 
695  AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
696  /* 4byte eqe*/
697  AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
699  __ilog2_u32(eq->len/256));
700  AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
701  eq_delay_to_mult(eq_delay));
702  be_dws_cpu_to_le(req->context, sizeof(req->context));
703 
704  be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
705 
706  status = be_mbox_notify_wait(adapter);
707  if (!status) {
708  struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
709  eq->id = le16_to_cpu(resp->eq_id);
710  eq->created = true;
711  }
712 
713  mutex_unlock(&adapter->mbox_lock);
714  return status;
715 }
716 
717 /* Use MCC */
719  bool permanent, u32 if_handle, u32 pmac_id)
720 {
721  struct be_mcc_wrb *wrb;
722  struct be_cmd_req_mac_query *req;
723  int status;
724 
725  spin_lock_bh(&adapter->mcc_lock);
726 
727  wrb = wrb_from_mccq(adapter);
728  if (!wrb) {
729  status = -EBUSY;
730  goto err;
731  }
732  req = embedded_payload(wrb);
733 
734  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
735  OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
737  if (permanent) {
738  req->permanent = 1;
739  } else {
740  req->if_id = cpu_to_le16((u16) if_handle);
741  req->pmac_id = cpu_to_le32(pmac_id);
742  req->permanent = 0;
743  }
744 
745  status = be_mcc_notify_wait(adapter);
746  if (!status) {
747  struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
748  memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
749  }
750 
751 err:
752  spin_unlock_bh(&adapter->mcc_lock);
753  return status;
754 }
755 
756 /* Uses synchronous MCCQ */
757 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
758  u32 if_id, u32 *pmac_id, u32 domain)
759 {
760  struct be_mcc_wrb *wrb;
761  struct be_cmd_req_pmac_add *req;
762  int status;
763 
764  spin_lock_bh(&adapter->mcc_lock);
765 
766  wrb = wrb_from_mccq(adapter);
767  if (!wrb) {
768  status = -EBUSY;
769  goto err;
770  }
771  req = embedded_payload(wrb);
772 
773  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
774  OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
775 
776  req->hdr.domain = domain;
777  req->if_id = cpu_to_le32(if_id);
778  memcpy(req->mac_address, mac_addr, ETH_ALEN);
779 
780  status = be_mcc_notify_wait(adapter);
781  if (!status) {
782  struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
783  *pmac_id = le32_to_cpu(resp->pmac_id);
784  }
785 
786 err:
787  spin_unlock_bh(&adapter->mcc_lock);
788 
789  if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
790  status = -EPERM;
791 
792  return status;
793 }
794 
795 /* Uses synchronous MCCQ */
796 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
797 {
798  struct be_mcc_wrb *wrb;
799  struct be_cmd_req_pmac_del *req;
800  int status;
801 
802  if (pmac_id == -1)
803  return 0;
804 
805  spin_lock_bh(&adapter->mcc_lock);
806 
807  wrb = wrb_from_mccq(adapter);
808  if (!wrb) {
809  status = -EBUSY;
810  goto err;
811  }
812  req = embedded_payload(wrb);
813 
814  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
815  OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
816 
817  req->hdr.domain = dom;
818  req->if_id = cpu_to_le32(if_id);
819  req->pmac_id = cpu_to_le32(pmac_id);
820 
821  status = be_mcc_notify_wait(adapter);
822 
823 err:
824  spin_unlock_bh(&adapter->mcc_lock);
825  return status;
826 }
827 
828 /* Uses Mbox */
829 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
830  struct be_queue_info *eq, bool no_delay, int coalesce_wm)
831 {
832  struct be_mcc_wrb *wrb;
833  struct be_cmd_req_cq_create *req;
834  struct be_dma_mem *q_mem = &cq->dma_mem;
835  void *ctxt;
836  int status;
837 
838  if (mutex_lock_interruptible(&adapter->mbox_lock))
839  return -1;
840 
841  wrb = wrb_from_mbox(adapter);
842  req = embedded_payload(wrb);
843  ctxt = &req->context;
844 
845  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
846  OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
847 
848  req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
849  if (lancer_chip(adapter)) {
850  req->hdr.version = 2;
851  req->page_size = 1; /* 1 for 4K */
852  AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
853  no_delay);
855  __ilog2_u32(cq->len/256));
856  AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
857  AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
858  ctxt, 1);
860  ctxt, eq->id);
861  } else {
862  AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
863  coalesce_wm);
864  AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
865  ctxt, no_delay);
866  AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
867  __ilog2_u32(cq->len/256));
868  AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
869  AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
870  AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
871  }
872 
873  be_dws_cpu_to_le(ctxt, sizeof(req->context));
874 
875  be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
876 
877  status = be_mbox_notify_wait(adapter);
878  if (!status) {
879  struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
880  cq->id = le16_to_cpu(resp->cq_id);
881  cq->created = true;
882  }
883 
884  mutex_unlock(&adapter->mbox_lock);
885 
886  return status;
887 }
888 
889 static u32 be_encoded_q_len(int q_len)
890 {
891  u32 len_encoded = fls(q_len); /* log2(len) + 1 */
892  if (len_encoded == 16)
893  len_encoded = 0;
894  return len_encoded;
895 }
896 
897 int be_cmd_mccq_ext_create(struct be_adapter *adapter,
898  struct be_queue_info *mccq,
899  struct be_queue_info *cq)
900 {
901  struct be_mcc_wrb *wrb;
903  struct be_dma_mem *q_mem = &mccq->dma_mem;
904  void *ctxt;
905  int status;
906 
907  if (mutex_lock_interruptible(&adapter->mbox_lock))
908  return -1;
909 
910  wrb = wrb_from_mbox(adapter);
911  req = embedded_payload(wrb);
912  ctxt = &req->context;
913 
914  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
915  OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
916 
917  req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
918  if (lancer_chip(adapter)) {
919  req->hdr.version = 1;
920  req->cq_id = cpu_to_le16(cq->id);
921 
923  be_encoded_q_len(mccq->len));
924  AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
925  AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
926  ctxt, cq->id);
927  AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
928  ctxt, 1);
929 
930  } else {
931  AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
933  be_encoded_q_len(mccq->len));
934  AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
935  }
936 
937  /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
938  req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
939  be_dws_cpu_to_le(ctxt, sizeof(req->context));
940 
941  be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
942 
943  status = be_mbox_notify_wait(adapter);
944  if (!status) {
945  struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
946  mccq->id = le16_to_cpu(resp->id);
947  mccq->created = true;
948  }
949  mutex_unlock(&adapter->mbox_lock);
950 
951  return status;
952 }
953 
954 int be_cmd_mccq_org_create(struct be_adapter *adapter,
955  struct be_queue_info *mccq,
956  struct be_queue_info *cq)
957 {
958  struct be_mcc_wrb *wrb;
959  struct be_cmd_req_mcc_create *req;
960  struct be_dma_mem *q_mem = &mccq->dma_mem;
961  void *ctxt;
962  int status;
963 
964  if (mutex_lock_interruptible(&adapter->mbox_lock))
965  return -1;
966 
967  wrb = wrb_from_mbox(adapter);
968  req = embedded_payload(wrb);
969  ctxt = &req->context;
970 
971  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
972  OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
973 
974  req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
975 
976  AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
978  be_encoded_q_len(mccq->len));
979  AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
980 
981  be_dws_cpu_to_le(ctxt, sizeof(req->context));
982 
983  be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
984 
985  status = be_mbox_notify_wait(adapter);
986  if (!status) {
987  struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
988  mccq->id = le16_to_cpu(resp->id);
989  mccq->created = true;
990  }
991 
992  mutex_unlock(&adapter->mbox_lock);
993  return status;
994 }
995 
996 int be_cmd_mccq_create(struct be_adapter *adapter,
997  struct be_queue_info *mccq,
998  struct be_queue_info *cq)
999 {
1000  int status;
1001 
1002  status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1003  if (status && !lancer_chip(adapter)) {
1004  dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1005  "or newer to avoid conflicting priorities between NIC "
1006  "and FCoE traffic");
1007  status = be_cmd_mccq_org_create(adapter, mccq, cq);
1008  }
1009  return status;
1010 }
1011 
1012 int be_cmd_txq_create(struct be_adapter *adapter,
1013  struct be_queue_info *txq,
1014  struct be_queue_info *cq)
1015 {
1016  struct be_mcc_wrb *wrb;
1017  struct be_cmd_req_eth_tx_create *req;
1018  struct be_dma_mem *q_mem = &txq->dma_mem;
1019  void *ctxt;
1020  int status;
1021 
1022  spin_lock_bh(&adapter->mcc_lock);
1023 
1024  wrb = wrb_from_mccq(adapter);
1025  if (!wrb) {
1026  status = -EBUSY;
1027  goto err;
1028  }
1029 
1030  req = embedded_payload(wrb);
1031  ctxt = &req->context;
1032 
1033  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1034  OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
1035 
1036  if (lancer_chip(adapter)) {
1037  req->hdr.version = 1;
1038  AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
1039  adapter->if_handle);
1040  }
1041 
1042  req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1043  req->ulp_num = BE_ULP1_NUM;
1045 
1046  AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
1047  be_encoded_q_len(txq->len));
1048  AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
1049  AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
1050 
1051  be_dws_cpu_to_le(ctxt, sizeof(req->context));
1052 
1053  be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1054 
1055  status = be_mcc_notify_wait(adapter);
1056  if (!status) {
1057  struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
1058  txq->id = le16_to_cpu(resp->cid);
1059  txq->created = true;
1060  }
1061 
1062 err:
1063  spin_unlock_bh(&adapter->mcc_lock);
1064 
1065  return status;
1066 }
1067 
1068 /* Uses MCC */
1069 int be_cmd_rxq_create(struct be_adapter *adapter,
1070  struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1071  u32 if_id, u32 rss, u8 *rss_id)
1072 {
1073  struct be_mcc_wrb *wrb;
1074  struct be_cmd_req_eth_rx_create *req;
1075  struct be_dma_mem *q_mem = &rxq->dma_mem;
1076  int status;
1077 
1078  spin_lock_bh(&adapter->mcc_lock);
1079 
1080  wrb = wrb_from_mccq(adapter);
1081  if (!wrb) {
1082  status = -EBUSY;
1083  goto err;
1084  }
1085  req = embedded_payload(wrb);
1086 
1087  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1088  OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1089 
1090  req->cq_id = cpu_to_le16(cq_id);
1091  req->frag_size = fls(frag_size) - 1;
1092  req->num_pages = 2;
1093  be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1094  req->interface_id = cpu_to_le32(if_id);
1096  req->rss_queue = cpu_to_le32(rss);
1097 
1098  status = be_mcc_notify_wait(adapter);
1099  if (!status) {
1100  struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1101  rxq->id = le16_to_cpu(resp->id);
1102  rxq->created = true;
1103  *rss_id = resp->rss_id;
1104  }
1105 
1106 err:
1107  spin_unlock_bh(&adapter->mcc_lock);
1108  return status;
1109 }
1110 
1111 /* Generic destroyer function for all types of queues
1112  * Uses Mbox
1113  */
1114 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1115  int queue_type)
1116 {
1117  struct be_mcc_wrb *wrb;
1118  struct be_cmd_req_q_destroy *req;
1119  u8 subsys = 0, opcode = 0;
1120  int status;
1121 
1122  if (mutex_lock_interruptible(&adapter->mbox_lock))
1123  return -1;
1124 
1125  wrb = wrb_from_mbox(adapter);
1126  req = embedded_payload(wrb);
1127 
1128  switch (queue_type) {
1129  case QTYPE_EQ:
1130  subsys = CMD_SUBSYSTEM_COMMON;
1131  opcode = OPCODE_COMMON_EQ_DESTROY;
1132  break;
1133  case QTYPE_CQ:
1134  subsys = CMD_SUBSYSTEM_COMMON;
1135  opcode = OPCODE_COMMON_CQ_DESTROY;
1136  break;
1137  case QTYPE_TXQ:
1138  subsys = CMD_SUBSYSTEM_ETH;
1139  opcode = OPCODE_ETH_TX_DESTROY;
1140  break;
1141  case QTYPE_RXQ:
1142  subsys = CMD_SUBSYSTEM_ETH;
1143  opcode = OPCODE_ETH_RX_DESTROY;
1144  break;
1145  case QTYPE_MCCQ:
1146  subsys = CMD_SUBSYSTEM_COMMON;
1147  opcode = OPCODE_COMMON_MCC_DESTROY;
1148  break;
1149  default:
1150  BUG();
1151  }
1152 
1153  be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1154  NULL);
1155  req->id = cpu_to_le16(q->id);
1156 
1157  status = be_mbox_notify_wait(adapter);
1158  if (!status)
1159  q->created = false;
1160 
1161  mutex_unlock(&adapter->mbox_lock);
1162  return status;
1163 }
1164 
1165 /* Uses MCC */
1166 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1167 {
1168  struct be_mcc_wrb *wrb;
1169  struct be_cmd_req_q_destroy *req;
1170  int status;
1171 
1172  spin_lock_bh(&adapter->mcc_lock);
1173 
1174  wrb = wrb_from_mccq(adapter);
1175  if (!wrb) {
1176  status = -EBUSY;
1177  goto err;
1178  }
1179  req = embedded_payload(wrb);
1180 
1181  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1182  OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1183  req->id = cpu_to_le16(q->id);
1184 
1185  status = be_mcc_notify_wait(adapter);
1186  if (!status)
1187  q->created = false;
1188 
1189 err:
1190  spin_unlock_bh(&adapter->mcc_lock);
1191  return status;
1192 }
1193 
1194 /* Create an rx filtering policy configuration on an i/f
1195  * Uses MCCQ
1196  */
1197 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1198  u32 *if_handle, u32 domain)
1199 {
1200  struct be_mcc_wrb *wrb;
1201  struct be_cmd_req_if_create *req;
1202  int status;
1203 
1204  spin_lock_bh(&adapter->mcc_lock);
1205 
1206  wrb = wrb_from_mccq(adapter);
1207  if (!wrb) {
1208  status = -EBUSY;
1209  goto err;
1210  }
1211  req = embedded_payload(wrb);
1212 
1213  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1214  OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
1215  req->hdr.domain = domain;
1216  req->capability_flags = cpu_to_le32(cap_flags);
1217  req->enable_flags = cpu_to_le32(en_flags);
1218 
1219  req->pmac_invalid = true;
1220 
1221  status = be_mcc_notify_wait(adapter);
1222  if (!status) {
1223  struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1224  *if_handle = le32_to_cpu(resp->interface_id);
1225  }
1226 
1227 err:
1228  spin_unlock_bh(&adapter->mcc_lock);
1229  return status;
1230 }
1231 
1232 /* Uses MCCQ */
1233 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1234 {
1235  struct be_mcc_wrb *wrb;
1236  struct be_cmd_req_if_destroy *req;
1237  int status;
1238 
1239  if (interface_id == -1)
1240  return 0;
1241 
1242  spin_lock_bh(&adapter->mcc_lock);
1243 
1244  wrb = wrb_from_mccq(adapter);
1245  if (!wrb) {
1246  status = -EBUSY;
1247  goto err;
1248  }
1249  req = embedded_payload(wrb);
1250 
1251  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1252  OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1253  req->hdr.domain = domain;
1254  req->interface_id = cpu_to_le32(interface_id);
1255 
1256  status = be_mcc_notify_wait(adapter);
1257 err:
1258  spin_unlock_bh(&adapter->mcc_lock);
1259  return status;
1260 }
1261 
1262 /* Get stats is a non embedded command: the request is not embedded inside
1263  * WRB but is a separate dma memory block
1264  * Uses asynchronous MCC
1265  */
1266 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1267 {
1268  struct be_mcc_wrb *wrb;
1269  struct be_cmd_req_hdr *hdr;
1270  int status = 0;
1271 
1272  spin_lock_bh(&adapter->mcc_lock);
1273 
1274  wrb = wrb_from_mccq(adapter);
1275  if (!wrb) {
1276  status = -EBUSY;
1277  goto err;
1278  }
1279  hdr = nonemb_cmd->va;
1280 
1281  be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1282  OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1283 
1284  if (adapter->generation == BE_GEN3)
1285  hdr->version = 1;
1286 
1287  be_mcc_notify(adapter);
1288  adapter->stats_cmd_sent = true;
1289 
1290 err:
1291  spin_unlock_bh(&adapter->mcc_lock);
1292  return status;
1293 }
1294 
1295 /* Lancer Stats */
1297  struct be_dma_mem *nonemb_cmd)
1298 {
1299 
1300  struct be_mcc_wrb *wrb;
1302  int status = 0;
1303 
1304  spin_lock_bh(&adapter->mcc_lock);
1305 
1306  wrb = wrb_from_mccq(adapter);
1307  if (!wrb) {
1308  status = -EBUSY;
1309  goto err;
1310  }
1311  req = nonemb_cmd->va;
1312 
1313  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1314  OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1315  nonemb_cmd);
1316 
1317  req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1318  req->cmd_params.params.reset_stats = 0;
1319 
1320  be_mcc_notify(adapter);
1321  adapter->stats_cmd_sent = true;
1322 
1323 err:
1324  spin_unlock_bh(&adapter->mcc_lock);
1325  return status;
1326 }
1327 
1328 static int be_mac_to_link_speed(int mac_speed)
1329 {
1330  switch (mac_speed) {
1331  case PHY_LINK_SPEED_ZERO:
1332  return 0;
1333  case PHY_LINK_SPEED_10MBPS:
1334  return 10;
1336  return 100;
1337  case PHY_LINK_SPEED_1GBPS:
1338  return 1000;
1339  case PHY_LINK_SPEED_10GBPS:
1340  return 10000;
1341  }
1342  return 0;
1343 }
1344 
1345 /* Uses synchronous mcc
1346  * Returns link_speed in Mbps
1347  */
1349  u8 *link_status, u32 dom)
1350 {
1351  struct be_mcc_wrb *wrb;
1352  struct be_cmd_req_link_status *req;
1353  int status;
1354 
1355  spin_lock_bh(&adapter->mcc_lock);
1356 
1357  if (link_status)
1358  *link_status = LINK_DOWN;
1359 
1360  wrb = wrb_from_mccq(adapter);
1361  if (!wrb) {
1362  status = -EBUSY;
1363  goto err;
1364  }
1365  req = embedded_payload(wrb);
1366 
1367  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1368  OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1369 
1370  if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
1371  req->hdr.version = 1;
1372 
1373  req->hdr.domain = dom;
1374 
1375  status = be_mcc_notify_wait(adapter);
1376  if (!status) {
1377  struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1378  if (link_speed) {
1379  *link_speed = resp->link_speed ?
1380  le16_to_cpu(resp->link_speed) * 10 :
1381  be_mac_to_link_speed(resp->mac_speed);
1382 
1383  if (!resp->logical_link_status)
1384  *link_speed = 0;
1385  }
1386  if (link_status)
1387  *link_status = resp->logical_link_status;
1388  }
1389 
1390 err:
1391  spin_unlock_bh(&adapter->mcc_lock);
1392  return status;
1393 }
1394 
1395 /* Uses synchronous mcc */
1397 {
1398  struct be_mcc_wrb *wrb;
1400  int status;
1401 
1402  spin_lock_bh(&adapter->mcc_lock);
1403 
1404  wrb = wrb_from_mccq(adapter);
1405  if (!wrb) {
1406  status = -EBUSY;
1407  goto err;
1408  }
1409  req = embedded_payload(wrb);
1410 
1411  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1413  wrb, NULL);
1414 
1415  be_mcc_notify(adapter);
1416 
1417 err:
1418  spin_unlock_bh(&adapter->mcc_lock);
1419  return status;
1420 }
1421 
1422 /* Uses synchronous mcc */
1423 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1424 {
1425  struct be_mcc_wrb *wrb;
1426  struct be_cmd_req_get_fat *req;
1427  int status;
1428 
1429  spin_lock_bh(&adapter->mcc_lock);
1430 
1431  wrb = wrb_from_mccq(adapter);
1432  if (!wrb) {
1433  status = -EBUSY;
1434  goto err;
1435  }
1436  req = embedded_payload(wrb);
1437 
1438  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1439  OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1441  status = be_mcc_notify_wait(adapter);
1442  if (!status) {
1443  struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1444  if (log_size && resp->log_size)
1445  *log_size = le32_to_cpu(resp->log_size) -
1446  sizeof(u32);
1447  }
1448 err:
1449  spin_unlock_bh(&adapter->mcc_lock);
1450  return status;
1451 }
1452 
1453 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1454 {
1455  struct be_dma_mem get_fat_cmd;
1456  struct be_mcc_wrb *wrb;
1457  struct be_cmd_req_get_fat *req;
1458  u32 offset = 0, total_size, buf_size,
1459  log_offset = sizeof(u32), payload_len;
1460  int status;
1461 
1462  if (buf_len == 0)
1463  return;
1464 
1465  total_size = buf_len;
1466 
1467  get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1468  get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1469  get_fat_cmd.size,
1470  &get_fat_cmd.dma);
1471  if (!get_fat_cmd.va) {
1472  status = -ENOMEM;
1473  dev_err(&adapter->pdev->dev,
1474  "Memory allocation failure while retrieving FAT data\n");
1475  return;
1476  }
1477 
1478  spin_lock_bh(&adapter->mcc_lock);
1479 
1480  while (total_size) {
1481  buf_size = min(total_size, (u32)60*1024);
1482  total_size -= buf_size;
1483 
1484  wrb = wrb_from_mccq(adapter);
1485  if (!wrb) {
1486  status = -EBUSY;
1487  goto err;
1488  }
1489  req = get_fat_cmd.va;
1490 
1491  payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1492  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1494  &get_fat_cmd);
1495 
1497  req->read_log_offset = cpu_to_le32(log_offset);
1498  req->read_log_length = cpu_to_le32(buf_size);
1499  req->data_buffer_size = cpu_to_le32(buf_size);
1500 
1501  status = be_mcc_notify_wait(adapter);
1502  if (!status) {
1503  struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1504  memcpy(buf + offset,
1505  resp->data_buffer,
1506  le32_to_cpu(resp->read_log_length));
1507  } else {
1508  dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1509  goto err;
1510  }
1511  offset += buf_size;
1512  log_offset += buf_size;
1513  }
1514 err:
1515  pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1516  get_fat_cmd.va,
1517  get_fat_cmd.dma);
1518  spin_unlock_bh(&adapter->mcc_lock);
1519 }
1520 
1521 /* Uses synchronous mcc */
1522 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1523  char *fw_on_flash)
1524 {
1525  struct be_mcc_wrb *wrb;
1527  int status;
1528 
1529  spin_lock_bh(&adapter->mcc_lock);
1530 
1531  wrb = wrb_from_mccq(adapter);
1532  if (!wrb) {
1533  status = -EBUSY;
1534  goto err;
1535  }
1536 
1537  req = embedded_payload(wrb);
1538 
1539  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1540  OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1541  status = be_mcc_notify_wait(adapter);
1542  if (!status) {
1543  struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1544  strcpy(fw_ver, resp->firmware_version_string);
1545  if (fw_on_flash)
1546  strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1547  }
1548 err:
1549  spin_unlock_bh(&adapter->mcc_lock);
1550  return status;
1551 }
1552 
1553 /* set the EQ delay interval of an EQ to specified value
1554  * Uses async mcc
1555  */
1556 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1557 {
1558  struct be_mcc_wrb *wrb;
1560  int status = 0;
1561 
1562  spin_lock_bh(&adapter->mcc_lock);
1563 
1564  wrb = wrb_from_mccq(adapter);
1565  if (!wrb) {
1566  status = -EBUSY;
1567  goto err;
1568  }
1569  req = embedded_payload(wrb);
1570 
1571  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1572  OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1573 
1574  req->num_eq = cpu_to_le32(1);
1575  req->delay[0].eq_id = cpu_to_le32(eq_id);
1576  req->delay[0].phase = 0;
1577  req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1578 
1579  be_mcc_notify(adapter);
1580 
1581 err:
1582  spin_unlock_bh(&adapter->mcc_lock);
1583  return status;
1584 }
1585 
1586 /* Uses sycnhronous mcc */
1587 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1588  u32 num, bool untagged, bool promiscuous)
1589 {
1590  struct be_mcc_wrb *wrb;
1591  struct be_cmd_req_vlan_config *req;
1592  int status;
1593 
1594  spin_lock_bh(&adapter->mcc_lock);
1595 
1596  wrb = wrb_from_mccq(adapter);
1597  if (!wrb) {
1598  status = -EBUSY;
1599  goto err;
1600  }
1601  req = embedded_payload(wrb);
1602 
1603  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1604  OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1605 
1606  req->interface_id = if_id;
1607  req->promiscuous = promiscuous;
1608  req->untagged = untagged;
1609  req->num_vlan = num;
1610  if (!promiscuous) {
1611  memcpy(req->normal_vlan, vtag_array,
1612  req->num_vlan * sizeof(vtag_array[0]));
1613  }
1614 
1615  status = be_mcc_notify_wait(adapter);
1616 
1617 err:
1618  spin_unlock_bh(&adapter->mcc_lock);
1619  return status;
1620 }
1621 
1623 {
1624  struct be_mcc_wrb *wrb;
1625  struct be_dma_mem *mem = &adapter->rx_filter;
1626  struct be_cmd_req_rx_filter *req = mem->va;
1627  int status;
1628 
1629  spin_lock_bh(&adapter->mcc_lock);
1630 
1631  wrb = wrb_from_mccq(adapter);
1632  if (!wrb) {
1633  status = -EBUSY;
1634  goto err;
1635  }
1636  memset(req, 0, sizeof(*req));
1637  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1638  OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1639  wrb, mem);
1640 
1641  req->if_id = cpu_to_le32(adapter->if_handle);
1642  if (flags & IFF_PROMISC) {
1645  if (value == ON)
1648  } else if (flags & IFF_ALLMULTI) {
1649  req->if_flags_mask = req->if_flags =
1651  } else {
1652  struct netdev_hw_addr *ha;
1653  int i = 0;
1654 
1655  req->if_flags_mask = req->if_flags =
1657 
1658  /* Reset mcast promisc mode if already set by setting mask
1659  * and not setting flags field
1660  */
1661  if (!lancer_chip(adapter) || be_physfn(adapter))
1662  req->if_flags_mask |=
1664 
1665  req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1666  netdev_for_each_mc_addr(ha, adapter->netdev)
1667  memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1668  }
1669 
1670  status = be_mcc_notify_wait(adapter);
1671 err:
1672  spin_unlock_bh(&adapter->mcc_lock);
1673  return status;
1674 }
1675 
1676 /* Uses synchrounous mcc */
1677 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1678 {
1679  struct be_mcc_wrb *wrb;
1681  int status;
1682 
1683  spin_lock_bh(&adapter->mcc_lock);
1684 
1685  wrb = wrb_from_mccq(adapter);
1686  if (!wrb) {
1687  status = -EBUSY;
1688  goto err;
1689  }
1690  req = embedded_payload(wrb);
1691 
1692  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1693  OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1694 
1695  req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1696  req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1697 
1698  status = be_mcc_notify_wait(adapter);
1699 
1700 err:
1701  spin_unlock_bh(&adapter->mcc_lock);
1702  return status;
1703 }
1704 
1705 /* Uses sycn mcc */
1706 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1707 {
1708  struct be_mcc_wrb *wrb;
1710  int status;
1711 
1712  spin_lock_bh(&adapter->mcc_lock);
1713 
1714  wrb = wrb_from_mccq(adapter);
1715  if (!wrb) {
1716  status = -EBUSY;
1717  goto err;
1718  }
1719  req = embedded_payload(wrb);
1720 
1721  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1722  OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1723 
1724  status = be_mcc_notify_wait(adapter);
1725  if (!status) {
1726  struct be_cmd_resp_get_flow_control *resp =
1727  embedded_payload(wrb);
1728  *tx_fc = le16_to_cpu(resp->tx_flow_control);
1729  *rx_fc = le16_to_cpu(resp->rx_flow_control);
1730  }
1731 
1732 err:
1733  spin_unlock_bh(&adapter->mcc_lock);
1734  return status;
1735 }
1736 
1737 /* Uses mbox */
1739  u32 *mode, u32 *caps)
1740 {
1741  struct be_mcc_wrb *wrb;
1742  struct be_cmd_req_query_fw_cfg *req;
1743  int status;
1744 
1745  if (mutex_lock_interruptible(&adapter->mbox_lock))
1746  return -1;
1747 
1748  wrb = wrb_from_mbox(adapter);
1749  req = embedded_payload(wrb);
1750 
1751  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1752  OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1753 
1754  status = be_mbox_notify_wait(adapter);
1755  if (!status) {
1756  struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1757  *port_num = le32_to_cpu(resp->phys_port);
1758  *mode = le32_to_cpu(resp->function_mode);
1759  *caps = le32_to_cpu(resp->function_caps);
1760  }
1761 
1762  mutex_unlock(&adapter->mbox_lock);
1763  return status;
1764 }
1765 
1766 /* Uses mbox */
1767 int be_cmd_reset_function(struct be_adapter *adapter)
1768 {
1769  struct be_mcc_wrb *wrb;
1770  struct be_cmd_req_hdr *req;
1771  int status;
1772 
1773  if (lancer_chip(adapter)) {
1774  status = lancer_wait_ready(adapter);
1775  if (!status) {
1777  adapter->db + SLIPORT_CONTROL_OFFSET);
1778  status = lancer_test_and_set_rdy_state(adapter);
1779  }
1780  if (status) {
1781  dev_err(&adapter->pdev->dev,
1782  "Adapter in non recoverable error\n");
1783  }
1784  return status;
1785  }
1786 
1787  if (mutex_lock_interruptible(&adapter->mbox_lock))
1788  return -1;
1789 
1790  wrb = wrb_from_mbox(adapter);
1791  req = embedded_payload(wrb);
1792 
1793  be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1794  OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1795 
1796  status = be_mbox_notify_wait(adapter);
1797 
1798  mutex_unlock(&adapter->mbox_lock);
1799  return status;
1800 }
1801 
1802 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1803 {
1804  struct be_mcc_wrb *wrb;
1805  struct be_cmd_req_rss_config *req;
1806  u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1807  0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1808  0x3ea83c02, 0x4a110304};
1809  int status;
1810 
1811  if (mutex_lock_interruptible(&adapter->mbox_lock))
1812  return -1;
1813 
1814  wrb = wrb_from_mbox(adapter);
1815  req = embedded_payload(wrb);
1816 
1817  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1818  OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1819 
1820  req->if_id = cpu_to_le32(adapter->if_handle);
1823 
1824  if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
1825  req->hdr.version = 1;
1828  }
1829 
1830  req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1831  memcpy(req->cpu_table, rsstable, table_size);
1832  memcpy(req->hash, myhash, sizeof(myhash));
1833  be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1834 
1835  status = be_mbox_notify_wait(adapter);
1836 
1837  mutex_unlock(&adapter->mbox_lock);
1838  return status;
1839 }
1840 
1841 /* Uses sync mcc */
1843  u8 bcn, u8 sts, u8 state)
1844 {
1845  struct be_mcc_wrb *wrb;
1847  int status;
1848 
1849  spin_lock_bh(&adapter->mcc_lock);
1850 
1851  wrb = wrb_from_mccq(adapter);
1852  if (!wrb) {
1853  status = -EBUSY;
1854  goto err;
1855  }
1856  req = embedded_payload(wrb);
1857 
1858  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1859  OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
1860 
1861  req->port_num = port_num;
1862  req->beacon_state = state;
1863  req->beacon_duration = bcn;
1864  req->status_duration = sts;
1865 
1866  status = be_mcc_notify_wait(adapter);
1867 
1868 err:
1869  spin_unlock_bh(&adapter->mcc_lock);
1870  return status;
1871 }
1872 
1873 /* Uses sync mcc */
1875 {
1876  struct be_mcc_wrb *wrb;
1878  int status;
1879 
1880  spin_lock_bh(&adapter->mcc_lock);
1881 
1882  wrb = wrb_from_mccq(adapter);
1883  if (!wrb) {
1884  status = -EBUSY;
1885  goto err;
1886  }
1887  req = embedded_payload(wrb);
1888 
1889  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1890  OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
1891 
1892  req->port_num = port_num;
1893 
1894  status = be_mcc_notify_wait(adapter);
1895  if (!status) {
1896  struct be_cmd_resp_get_beacon_state *resp =
1897  embedded_payload(wrb);
1898  *state = resp->beacon_state;
1899  }
1900 
1901 err:
1902  spin_unlock_bh(&adapter->mcc_lock);
1903  return status;
1904 }
1905 
1906 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1908  const char *obj_name, u32 *data_written,
1909  u8 *change_status, u8 *addn_status)
1910 {
1911  struct be_mcc_wrb *wrb;
1914  void *ctxt = NULL;
1915  int status;
1916 
1917  spin_lock_bh(&adapter->mcc_lock);
1918  adapter->flash_status = 0;
1919 
1920  wrb = wrb_from_mccq(adapter);
1921  if (!wrb) {
1922  status = -EBUSY;
1923  goto err_unlock;
1924  }
1925 
1926  req = embedded_payload(wrb);
1927 
1928  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1930  sizeof(struct lancer_cmd_req_write_object), wrb,
1931  NULL);
1932 
1933  ctxt = &req->context;
1935  write_length, ctxt, data_size);
1936 
1937  if (data_size == 0)
1939  eof, ctxt, 1);
1940  else
1942  eof, ctxt, 0);
1943 
1944  be_dws_cpu_to_le(ctxt, sizeof(req->context));
1945  req->write_offset = cpu_to_le32(data_offset);
1946  strcpy(req->object_name, obj_name);
1947  req->descriptor_count = cpu_to_le32(1);
1948  req->buf_len = cpu_to_le32(data_size);
1949  req->addr_low = cpu_to_le32((cmd->dma +
1950  sizeof(struct lancer_cmd_req_write_object))
1951  & 0xFFFFFFFF);
1952  req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
1953  sizeof(struct lancer_cmd_req_write_object)));
1954 
1955  be_mcc_notify(adapter);
1956  spin_unlock_bh(&adapter->mcc_lock);
1957 
1959  msecs_to_jiffies(30000)))
1960  status = -1;
1961  else
1962  status = adapter->flash_status;
1963 
1964  resp = embedded_payload(wrb);
1965  if (!status) {
1966  *data_written = le32_to_cpu(resp->actual_write_len);
1967  *change_status = resp->change_status;
1968  } else {
1969  *addn_status = resp->additional_status;
1970  }
1971 
1972  return status;
1973 
1974 err_unlock:
1975  spin_unlock_bh(&adapter->mcc_lock);
1976  return status;
1977 }
1978 
1979 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1980  u32 data_size, u32 data_offset, const char *obj_name,
1981  u32 *data_read, u32 *eof, u8 *addn_status)
1982 {
1983  struct be_mcc_wrb *wrb;
1986  int status;
1987 
1988  spin_lock_bh(&adapter->mcc_lock);
1989 
1990  wrb = wrb_from_mccq(adapter);
1991  if (!wrb) {
1992  status = -EBUSY;
1993  goto err_unlock;
1994  }
1995 
1996  req = embedded_payload(wrb);
1997 
1998  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2000  sizeof(struct lancer_cmd_req_read_object), wrb,
2001  NULL);
2002 
2003  req->desired_read_len = cpu_to_le32(data_size);
2004  req->read_offset = cpu_to_le32(data_offset);
2005  strcpy(req->object_name, obj_name);
2006  req->descriptor_count = cpu_to_le32(1);
2007  req->buf_len = cpu_to_le32(data_size);
2008  req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2009  req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2010 
2011  status = be_mcc_notify_wait(adapter);
2012 
2013  resp = embedded_payload(wrb);
2014  if (!status) {
2015  *data_read = le32_to_cpu(resp->actual_read_len);
2016  *eof = le32_to_cpu(resp->eof);
2017  } else {
2018  *addn_status = resp->additional_status;
2019  }
2020 
2021 err_unlock:
2022  spin_unlock_bh(&adapter->mcc_lock);
2023  return status;
2024 }
2025 
2026 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2027  u32 flash_type, u32 flash_opcode, u32 buf_size)
2028 {
2029  struct be_mcc_wrb *wrb;
2030  struct be_cmd_write_flashrom *req;
2031  int status;
2032 
2033  spin_lock_bh(&adapter->mcc_lock);
2034  adapter->flash_status = 0;
2035 
2036  wrb = wrb_from_mccq(adapter);
2037  if (!wrb) {
2038  status = -EBUSY;
2039  goto err_unlock;
2040  }
2041  req = cmd->va;
2042 
2043  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2044  OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
2045 
2046  req->params.op_type = cpu_to_le32(flash_type);
2047  req->params.op_code = cpu_to_le32(flash_opcode);
2048  req->params.data_buf_size = cpu_to_le32(buf_size);
2049 
2050  be_mcc_notify(adapter);
2051  spin_unlock_bh(&adapter->mcc_lock);
2052 
2054  msecs_to_jiffies(40000)))
2055  status = -1;
2056  else
2057  status = adapter->flash_status;
2058 
2059  return status;
2060 
2061 err_unlock:
2062  spin_unlock_bh(&adapter->mcc_lock);
2063  return status;
2064 }
2065 
2066 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2067  int offset)
2068 {
2069  struct be_mcc_wrb *wrb;
2070  struct be_cmd_write_flashrom *req;
2071  int status;
2072 
2073  spin_lock_bh(&adapter->mcc_lock);
2074 
2075  wrb = wrb_from_mccq(adapter);
2076  if (!wrb) {
2077  status = -EBUSY;
2078  goto err;
2079  }
2080  req = embedded_payload(wrb);
2081 
2082  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2083  OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL);
2084 
2085  req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
2086  req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2087  req->params.offset = cpu_to_le32(offset);
2088  req->params.data_buf_size = cpu_to_le32(0x4);
2089 
2090  status = be_mcc_notify_wait(adapter);
2091  if (!status)
2092  memcpy(flashed_crc, req->params.data_buf, 4);
2093 
2094 err:
2095  spin_unlock_bh(&adapter->mcc_lock);
2096  return status;
2097 }
2098 
2100  struct be_dma_mem *nonemb_cmd)
2101 {
2102  struct be_mcc_wrb *wrb;
2104  int status;
2105 
2106  spin_lock_bh(&adapter->mcc_lock);
2107 
2108  wrb = wrb_from_mccq(adapter);
2109  if (!wrb) {
2110  status = -EBUSY;
2111  goto err;
2112  }
2113  req = nonemb_cmd->va;
2114 
2115  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2116  OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
2117  nonemb_cmd);
2118  memcpy(req->magic_mac, mac, ETH_ALEN);
2119 
2120  status = be_mcc_notify_wait(adapter);
2121 
2122 err:
2123  spin_unlock_bh(&adapter->mcc_lock);
2124  return status;
2125 }
2126 
2128  u8 loopback_type, u8 enable)
2129 {
2130  struct be_mcc_wrb *wrb;
2131  struct be_cmd_req_set_lmode *req;
2132  int status;
2133 
2134  spin_lock_bh(&adapter->mcc_lock);
2135 
2136  wrb = wrb_from_mccq(adapter);
2137  if (!wrb) {
2138  status = -EBUSY;
2139  goto err;
2140  }
2141 
2142  req = embedded_payload(wrb);
2143 
2144  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2145  OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2146  NULL);
2147 
2148  req->src_port = port_num;
2149  req->dest_port = port_num;
2151  req->loopback_state = enable;
2152 
2153  status = be_mcc_notify_wait(adapter);
2154 err:
2155  spin_unlock_bh(&adapter->mcc_lock);
2156  return status;
2157 }
2158 
2160  u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2161 {
2162  struct be_mcc_wrb *wrb;
2163  struct be_cmd_req_loopback_test *req;
2164  int status;
2165 
2166  spin_lock_bh(&adapter->mcc_lock);
2167 
2168  wrb = wrb_from_mccq(adapter);
2169  if (!wrb) {
2170  status = -EBUSY;
2171  goto err;
2172  }
2173 
2174  req = embedded_payload(wrb);
2175 
2176  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2177  OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2178  req->hdr.timeout = cpu_to_le32(4);
2179 
2180  req->pattern = cpu_to_le64(pattern);
2181  req->src_port = cpu_to_le32(port_num);
2182  req->dest_port = cpu_to_le32(port_num);
2183  req->pkt_size = cpu_to_le32(pkt_size);
2184  req->num_pkts = cpu_to_le32(num_pkts);
2185  req->loopback_type = cpu_to_le32(loopback_type);
2186 
2187  status = be_mcc_notify_wait(adapter);
2188  if (!status) {
2189  struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2190  status = le32_to_cpu(resp->status);
2191  }
2192 
2193 err:
2194  spin_unlock_bh(&adapter->mcc_lock);
2195  return status;
2196 }
2197 
2199  u32 byte_cnt, struct be_dma_mem *cmd)
2200 {
2201  struct be_mcc_wrb *wrb;
2202  struct be_cmd_req_ddrdma_test *req;
2203  int status;
2204  int i, j = 0;
2205 
2206  spin_lock_bh(&adapter->mcc_lock);
2207 
2208  wrb = wrb_from_mccq(adapter);
2209  if (!wrb) {
2210  status = -EBUSY;
2211  goto err;
2212  }
2213  req = cmd->va;
2214  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2215  OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2216 
2217  req->pattern = cpu_to_le64(pattern);
2218  req->byte_count = cpu_to_le32(byte_cnt);
2219  for (i = 0; i < byte_cnt; i++) {
2220  req->snd_buff[i] = (u8)(pattern >> (j*8));
2221  j++;
2222  if (j > 7)
2223  j = 0;
2224  }
2225 
2226  status = be_mcc_notify_wait(adapter);
2227 
2228  if (!status) {
2229  struct be_cmd_resp_ddrdma_test *resp;
2230  resp = cmd->va;
2231  if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2232  resp->snd_err) {
2233  status = -1;
2234  }
2235  }
2236 
2237 err:
2238  spin_unlock_bh(&adapter->mcc_lock);
2239  return status;
2240 }
2241 
2243  struct be_dma_mem *nonemb_cmd)
2244 {
2245  struct be_mcc_wrb *wrb;
2246  struct be_cmd_req_seeprom_read *req;
2247  struct be_sge *sge;
2248  int status;
2249 
2250  spin_lock_bh(&adapter->mcc_lock);
2251 
2252  wrb = wrb_from_mccq(adapter);
2253  if (!wrb) {
2254  status = -EBUSY;
2255  goto err;
2256  }
2257  req = nonemb_cmd->va;
2258  sge = nonembedded_sgl(wrb);
2259 
2260  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2261  OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2262  nonemb_cmd);
2263 
2264  status = be_mcc_notify_wait(adapter);
2265 
2266 err:
2267  spin_unlock_bh(&adapter->mcc_lock);
2268  return status;
2269 }
2270 
2271 int be_cmd_get_phy_info(struct be_adapter *adapter)
2272 {
2273  struct be_mcc_wrb *wrb;
2274  struct be_cmd_req_get_phy_info *req;
2275  struct be_dma_mem cmd;
2276  int status;
2277 
2278  spin_lock_bh(&adapter->mcc_lock);
2279 
2280  wrb = wrb_from_mccq(adapter);
2281  if (!wrb) {
2282  status = -EBUSY;
2283  goto err;
2284  }
2285  cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2286  cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2287  &cmd.dma);
2288  if (!cmd.va) {
2289  dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2290  status = -ENOMEM;
2291  goto err;
2292  }
2293 
2294  req = cmd.va;
2295 
2296  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2297  OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2298  wrb, &cmd);
2299 
2300  status = be_mcc_notify_wait(adapter);
2301  if (!status) {
2302  struct be_phy_info *resp_phy_info =
2303  cmd.va + sizeof(struct be_cmd_req_hdr);
2304  adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2305  adapter->phy.interface_type =
2306  le16_to_cpu(resp_phy_info->interface_type);
2307  adapter->phy.auto_speeds_supported =
2308  le16_to_cpu(resp_phy_info->auto_speeds_supported);
2309  adapter->phy.fixed_speeds_supported =
2310  le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2311  adapter->phy.misc_params =
2312  le32_to_cpu(resp_phy_info->misc_params);
2313  }
2314  pci_free_consistent(adapter->pdev, cmd.size,
2315  cmd.va, cmd.dma);
2316 err:
2317  spin_unlock_bh(&adapter->mcc_lock);
2318  return status;
2319 }
2320 
2321 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2322 {
2323  struct be_mcc_wrb *wrb;
2324  struct be_cmd_req_set_qos *req;
2325  int status;
2326 
2327  spin_lock_bh(&adapter->mcc_lock);
2328 
2329  wrb = wrb_from_mccq(adapter);
2330  if (!wrb) {
2331  status = -EBUSY;
2332  goto err;
2333  }
2334 
2335  req = embedded_payload(wrb);
2336 
2337  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2338  OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2339 
2340  req->hdr.domain = domain;
2342  req->max_bps_nic = cpu_to_le32(bps);
2343 
2344  status = be_mcc_notify_wait(adapter);
2345 
2346 err:
2347  spin_unlock_bh(&adapter->mcc_lock);
2348  return status;
2349 }
2350 
2352 {
2353  struct be_mcc_wrb *wrb;
2354  struct be_cmd_req_cntl_attribs *req;
2356  int status;
2357  int payload_len = max(sizeof(*req), sizeof(*resp));
2358  struct mgmt_controller_attrib *attribs;
2359  struct be_dma_mem attribs_cmd;
2360 
2361  memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2362  attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2363  attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2364  &attribs_cmd.dma);
2365  if (!attribs_cmd.va) {
2366  dev_err(&adapter->pdev->dev,
2367  "Memory allocation failure\n");
2368  return -ENOMEM;
2369  }
2370 
2371  if (mutex_lock_interruptible(&adapter->mbox_lock))
2372  return -1;
2373 
2374  wrb = wrb_from_mbox(adapter);
2375  if (!wrb) {
2376  status = -EBUSY;
2377  goto err;
2378  }
2379  req = attribs_cmd.va;
2380 
2381  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2382  OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2383  &attribs_cmd);
2384 
2385  status = be_mbox_notify_wait(adapter);
2386  if (!status) {
2387  attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2388  adapter->hba_port_num = attribs->hba_attribs.phy_port;
2389  }
2390 
2391 err:
2392  mutex_unlock(&adapter->mbox_lock);
2393  pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2394  attribs_cmd.dma);
2395  return status;
2396 }
2397 
2398 /* Uses mbox */
2400 {
2401  struct be_mcc_wrb *wrb;
2402  struct be_cmd_req_set_func_cap *req;
2403  int status;
2404 
2405  if (mutex_lock_interruptible(&adapter->mbox_lock))
2406  return -1;
2407 
2408  wrb = wrb_from_mbox(adapter);
2409  if (!wrb) {
2410  status = -EBUSY;
2411  goto err;
2412  }
2413 
2414  req = embedded_payload(wrb);
2415 
2416  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2417  OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2418 
2422 
2423  status = be_mbox_notify_wait(adapter);
2424  if (!status) {
2425  struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2426  adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2428  if (!adapter->be3_native)
2429  dev_warn(&adapter->pdev->dev,
2430  "adapter not in advanced mode\n");
2431  }
2432 err:
2433  mutex_unlock(&adapter->mbox_lock);
2434  return status;
2435 }
2436 
2437 /* Uses synchronous MCCQ */
2439  bool *pmac_id_active, u32 *pmac_id, u8 domain)
2440 {
2441  struct be_mcc_wrb *wrb;
2442  struct be_cmd_req_get_mac_list *req;
2443  int status;
2444  int mac_count;
2445  struct be_dma_mem get_mac_list_cmd;
2446  int i;
2447 
2448  memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2449  get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2450  get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2451  get_mac_list_cmd.size,
2452  &get_mac_list_cmd.dma);
2453 
2454  if (!get_mac_list_cmd.va) {
2455  dev_err(&adapter->pdev->dev,
2456  "Memory allocation failure during GET_MAC_LIST\n");
2457  return -ENOMEM;
2458  }
2459 
2460  spin_lock_bh(&adapter->mcc_lock);
2461 
2462  wrb = wrb_from_mccq(adapter);
2463  if (!wrb) {
2464  status = -EBUSY;
2465  goto out;
2466  }
2467 
2468  req = get_mac_list_cmd.va;
2469 
2470  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2471  OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
2472  wrb, &get_mac_list_cmd);
2473 
2474  req->hdr.domain = domain;
2476  req->perm_override = 1;
2477 
2478  status = be_mcc_notify_wait(adapter);
2479  if (!status) {
2480  struct be_cmd_resp_get_mac_list *resp =
2481  get_mac_list_cmd.va;
2482  mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2483  /* Mac list returned could contain one or more active mac_ids
2484  * or one or more true or pseudo permanant mac addresses.
2485  * If an active mac_id is present, return first active mac_id
2486  * found.
2487  */
2488  for (i = 0; i < mac_count; i++) {
2489  struct get_list_macaddr *mac_entry;
2491  u32 mac_id;
2492 
2493  mac_entry = &resp->macaddr_list[i];
2494  mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2495  /* mac_id is a 32 bit value and mac_addr size
2496  * is 6 bytes
2497  */
2498  if (mac_addr_size == sizeof(u32)) {
2499  *pmac_id_active = true;
2500  mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2501  *pmac_id = le32_to_cpu(mac_id);
2502  goto out;
2503  }
2504  }
2505  /* If no active mac_id found, return first mac addr */
2506  *pmac_id_active = false;
2507  memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2508  ETH_ALEN);
2509  }
2510 
2511 out:
2512  spin_unlock_bh(&adapter->mcc_lock);
2513  pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2514  get_mac_list_cmd.va, get_mac_list_cmd.dma);
2515  return status;
2516 }
2517 
2518 /* Uses synchronous MCCQ */
2519 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2520  u8 mac_count, u32 domain)
2521 {
2522  struct be_mcc_wrb *wrb;
2523  struct be_cmd_req_set_mac_list *req;
2524  int status;
2525  struct be_dma_mem cmd;
2526 
2527  memset(&cmd, 0, sizeof(struct be_dma_mem));
2528  cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2529  cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2530  &cmd.dma, GFP_KERNEL);
2531  if (!cmd.va) {
2532  dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2533  return -ENOMEM;
2534  }
2535 
2536  spin_lock_bh(&adapter->mcc_lock);
2537 
2538  wrb = wrb_from_mccq(adapter);
2539  if (!wrb) {
2540  status = -EBUSY;
2541  goto err;
2542  }
2543 
2544  req = cmd.va;
2545  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2546  OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2547  wrb, &cmd);
2548 
2549  req->hdr.domain = domain;
2550  req->mac_count = mac_count;
2551  if (mac_count)
2552  memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2553 
2554  status = be_mcc_notify_wait(adapter);
2555 
2556 err:
2557  dma_free_coherent(&adapter->pdev->dev, cmd.size,
2558  cmd.va, cmd.dma);
2559  spin_unlock_bh(&adapter->mcc_lock);
2560  return status;
2561 }
2562 
2563 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2564  u32 domain, u16 intf_id)
2565 {
2566  struct be_mcc_wrb *wrb;
2568  void *ctxt;
2569  int status;
2570 
2571  spin_lock_bh(&adapter->mcc_lock);
2572 
2573  wrb = wrb_from_mccq(adapter);
2574  if (!wrb) {
2575  status = -EBUSY;
2576  goto err;
2577  }
2578 
2579  req = embedded_payload(wrb);
2580  ctxt = &req->context;
2581 
2582  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2583  OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2584 
2585  req->hdr.domain = domain;
2586  AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2587  if (pvid) {
2588  AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2589  AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2590  }
2591 
2592  be_dws_cpu_to_le(req->context, sizeof(req->context));
2593  status = be_mcc_notify_wait(adapter);
2594 
2595 err:
2596  spin_unlock_bh(&adapter->mcc_lock);
2597  return status;
2598 }
2599 
2600 /* Get Hyper switch config */
2601 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2602  u32 domain, u16 intf_id)
2603 {
2604  struct be_mcc_wrb *wrb;
2606  void *ctxt;
2607  int status;
2608  u16 vid;
2609 
2610  spin_lock_bh(&adapter->mcc_lock);
2611 
2612  wrb = wrb_from_mccq(adapter);
2613  if (!wrb) {
2614  status = -EBUSY;
2615  goto err;
2616  }
2617 
2618  req = embedded_payload(wrb);
2619  ctxt = &req->context;
2620 
2621  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2622  OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2623 
2624  req->hdr.domain = domain;
2625  AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
2626  intf_id);
2627  AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2628  be_dws_cpu_to_le(req->context, sizeof(req->context));
2629 
2630  status = be_mcc_notify_wait(adapter);
2631  if (!status) {
2632  struct be_cmd_resp_get_hsw_config *resp =
2633  embedded_payload(wrb);
2634  be_dws_le_to_cpu(&resp->context,
2635  sizeof(resp->context));
2637  pvid, &resp->context);
2638  *pvid = le16_to_cpu(vid);
2639  }
2640 
2641 err:
2642  spin_unlock_bh(&adapter->mcc_lock);
2643  return status;
2644 }
2645 
2647 {
2648  struct be_mcc_wrb *wrb;
2650  int status;
2651  int payload_len = sizeof(*req);
2652  struct be_dma_mem cmd;
2653 
2654  memset(&cmd, 0, sizeof(struct be_dma_mem));
2655  cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
2656  cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2657  &cmd.dma);
2658  if (!cmd.va) {
2659  dev_err(&adapter->pdev->dev,
2660  "Memory allocation failure\n");
2661  return -ENOMEM;
2662  }
2663 
2664  if (mutex_lock_interruptible(&adapter->mbox_lock))
2665  return -1;
2666 
2667  wrb = wrb_from_mbox(adapter);
2668  if (!wrb) {
2669  status = -EBUSY;
2670  goto err;
2671  }
2672 
2673  req = cmd.va;
2674 
2675  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2677  payload_len, wrb, &cmd);
2678 
2679  req->hdr.version = 1;
2681 
2682  status = be_mbox_notify_wait(adapter);
2683  if (!status) {
2685  resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
2686 
2687  /* the command could succeed misleadingly on old f/w
2688  * which is not aware of the V1 version. fake an error. */
2689  if (resp->hdr.response_length < payload_len) {
2690  status = -1;
2691  goto err;
2692  }
2693  adapter->wol_cap = resp->wol_settings;
2694  }
2695 err:
2696  mutex_unlock(&adapter->mbox_lock);
2697  pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2698  return status;
2699 
2700 }
2702  struct be_dma_mem *cmd)
2703 {
2704  struct be_mcc_wrb *wrb;
2706  int status;
2707 
2708  if (mutex_lock_interruptible(&adapter->mbox_lock))
2709  return -1;
2710 
2711  wrb = wrb_from_mbox(adapter);
2712  if (!wrb) {
2713  status = -EBUSY;
2714  goto err;
2715  }
2716 
2717  req = cmd->va;
2718  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2720  cmd->size, wrb, cmd);
2721  req->parameter_type = cpu_to_le32(1);
2722 
2723  status = be_mbox_notify_wait(adapter);
2724 err:
2725  mutex_unlock(&adapter->mbox_lock);
2726  return status;
2727 }
2728 
2730  struct be_dma_mem *cmd,
2731  struct be_fat_conf_params *configs)
2732 {
2733  struct be_mcc_wrb *wrb;
2735  int status;
2736 
2737  spin_lock_bh(&adapter->mcc_lock);
2738 
2739  wrb = wrb_from_mccq(adapter);
2740  if (!wrb) {
2741  status = -EBUSY;
2742  goto err;
2743  }
2744 
2745  req = cmd->va;
2746  memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
2747  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2749  cmd->size, wrb, cmd);
2750 
2751  status = be_mcc_notify_wait(adapter);
2752 err:
2753  spin_unlock_bh(&adapter->mcc_lock);
2754  return status;
2755 }
2756 
2758 {
2759  struct be_mcc_wrb *wrb;
2760  struct be_cmd_req_get_port_name *req;
2761  int status;
2762 
2763  if (!lancer_chip(adapter)) {
2764  *port_name = adapter->hba_port_num + '0';
2765  return 0;
2766  }
2767 
2768  spin_lock_bh(&adapter->mcc_lock);
2769 
2770  wrb = wrb_from_mccq(adapter);
2771  if (!wrb) {
2772  status = -EBUSY;
2773  goto err;
2774  }
2775 
2776  req = embedded_payload(wrb);
2777 
2778  be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2779  OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
2780  NULL);
2781  req->hdr.version = 1;
2782 
2783  status = be_mcc_notify_wait(adapter);
2784  if (!status) {
2785  struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
2786  *port_name = resp->port_name[adapter->hba_port_num];
2787  } else {
2788  *port_name = adapter->hba_port_num + '0';
2789  }
2790 err:
2791  spin_unlock_bh(&adapter->mcc_lock);
2792  return status;
2793 }
2794 
2795 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
2796  int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
2797 {
2798  struct be_adapter *adapter = netdev_priv(netdev_handle);
2799  struct be_mcc_wrb *wrb;
2800  struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
2801  struct be_cmd_req_hdr *req;
2802  struct be_cmd_resp_hdr *resp;
2803  int status;
2804 
2805  spin_lock_bh(&adapter->mcc_lock);
2806 
2807  wrb = wrb_from_mccq(adapter);
2808  if (!wrb) {
2809  status = -EBUSY;
2810  goto err;
2811  }
2812  req = embedded_payload(wrb);
2813  resp = embedded_payload(wrb);
2814 
2815  be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
2816  hdr->opcode, wrb_payload_size, wrb, NULL);
2817  memcpy(req, wrb_payload, wrb_payload_size);
2818  be_dws_cpu_to_le(req, wrb_payload_size);
2819 
2820  status = be_mcc_notify_wait(adapter);
2821  if (cmd_status)
2822  *cmd_status = (status & 0xffff);
2823  if (ext_status)
2824  *ext_status = 0;
2825  memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
2826  be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
2827 err:
2828  spin_unlock_bh(&adapter->mcc_lock);
2829  return status;
2830 }