Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
be_cmds.c
Go to the documentation of this file.
1 
18 #include <scsi/iscsi_proto.h>
19 
20 #include "be.h"
21 #include "be_mgmt.h"
22 #include "be_main.h"
23 
25 {
26  u32 sreset;
27  u8 *pci_reset_offset = 0;
28  u8 *pci_online0_offset = 0;
29  u8 *pci_online1_offset = 0;
30  u32 pconline0 = 0;
31  u32 pconline1 = 0;
32  u32 i;
33 
34  pci_reset_offset = (u8 *)phba->pci_va + BE2_SOFT_RESET;
35  pci_online0_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE0;
36  pci_online1_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE1;
37  sreset = readl((void *)pci_reset_offset);
38  sreset |= BE2_SET_RESET;
39  writel(sreset, (void *)pci_reset_offset);
40 
41  i = 0;
42  while (sreset & BE2_SET_RESET) {
43  if (i > 64)
44  break;
45  msleep(100);
46  sreset = readl((void *)pci_reset_offset);
47  i++;
48  }
49 
50  if (sreset & BE2_SET_RESET) {
52  " Soft Reset did not deassert\n");
53  return -EIO;
54  }
55  pconline1 = BE2_MPU_IRAM_ONLINE;
56  writel(pconline0, (void *)pci_online0_offset);
57  writel(pconline1, (void *)pci_online1_offset);
58 
59  sreset = BE2_SET_RESET;
60  writel(sreset, (void *)pci_reset_offset);
61 
62  i = 0;
63  while (sreset & BE2_SET_RESET) {
64  if (i > 64)
65  break;
66  msleep(1);
67  sreset = readl((void *)pci_reset_offset);
68  i++;
69  }
70  if (sreset & BE2_SET_RESET) {
72  " MPU Online Soft Reset did not deassert\n");
73  return -EIO;
74  }
75  return 0;
76 }
77 
79 {
80  unsigned int num_loop;
81  u8 *mpu_sem = 0;
82  u32 status;
83 
84  num_loop = 1000;
85  mpu_sem = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
86  msleep(5000);
87 
88  while (num_loop) {
89  status = readl((void *)mpu_sem);
90 
91  if ((status & 0x80000000) || (status & 0x0000FFFF) == 0xC000)
92  break;
93  msleep(60);
94  num_loop--;
95  }
96 
97  if ((status & 0x80000000) || (!num_loop)) {
99  "BC_%d : Failed in be_chk_reset_complete"
100  "status = 0x%x\n", status);
101  return -EIO;
102  }
103 
104  return 0;
105 }
106 
107 void be_mcc_notify(struct beiscsi_hba *phba)
108 {
109  struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
110  u32 val = 0;
111 
112  val |= mccq->id & DB_MCCQ_RING_ID_MASK;
113  val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
114  iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
115 }
116 
117 unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
118 {
119  unsigned int tag = 0;
120 
121  if (phba->ctrl.mcc_tag_available) {
122  tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
123  phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
124  phba->ctrl.mcc_numtag[tag] = 0;
125  }
126  if (tag) {
127  phba->ctrl.mcc_tag_available--;
128  if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
129  phba->ctrl.mcc_alloc_index = 0;
130  else
131  phba->ctrl.mcc_alloc_index++;
132  }
133  return tag;
134 }
135 
136 void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
137 {
138  spin_lock(&ctrl->mbox_lock);
139  tag = tag & 0x000000FF;
140  ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
141  if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
142  ctrl->mcc_free_index = 0;
143  else
144  ctrl->mcc_free_index++;
145  ctrl->mcc_tag_available++;
146  spin_unlock(&ctrl->mbox_lock);
147 }
148 
149 bool is_link_state_evt(u32 trailer)
150 {
151  return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
154 }
155 
156 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
157 {
158  if (compl->flags != 0) {
159  compl->flags = le32_to_cpu(compl->flags);
160  WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
161  return true;
162  } else
163  return false;
164 }
165 
166 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
167 {
168  compl->flags = 0;
169 }
170 
171 static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
172  struct be_mcc_compl *compl)
173 {
174  u16 compl_status, extd_status;
175  struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
176 
177  be_dws_le_to_cpu(compl, 4);
178 
179  compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
181  if (compl_status != MCC_STATUS_SUCCESS) {
182  extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
184 
185  beiscsi_log(phba, KERN_ERR,
187  "BC_%d : error in cmd completion: status(compl/extd)=%d/%d\n",
188  compl_status, extd_status);
189 
190  return -EBUSY;
191  }
192  return 0;
193 }
194 
196  struct be_mcc_compl *compl)
197 {
198  u16 compl_status, extd_status;
199  unsigned short tag;
200 
201  be_dws_le_to_cpu(compl, 4);
202 
203  compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
205  /* The ctrl.mcc_numtag[tag] is filled with
206  * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
207  * [7:0] = compl_status
208  */
209  tag = (compl->tag0 & 0x000000FF);
210  extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
212 
213  ctrl->mcc_numtag[tag] = 0x80000000;
214  ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
215  ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
216  ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
217  wake_up_interruptible(&ctrl->mcc_wait[tag]);
218  return 0;
219 }
220 
221 static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
222 {
223  struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
224  struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
225 
226  if (be_mcc_compl_is_new(compl)) {
227  queue_tail_inc(mcc_cq);
228  return compl;
229  }
230  return NULL;
231 }
232 
233 static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
234 {
236 }
237 
240 {
241  switch (evt->port_link_status) {
243  beiscsi_log(phba, KERN_ERR,
245  "BC_%d : Link Down on Physical Port %d\n",
246  evt->physical_port);
247 
248  phba->state |= BE_ADAPTER_LINK_DOWN;
250  be2iscsi_fail_session);
251  break;
252  case ASYNC_EVENT_LINK_UP:
253  phba->state = BE_ADAPTER_UP;
254  beiscsi_log(phba, KERN_ERR,
256  "BC_%d : Link UP on Physical Port %d\n",
257  evt->physical_port);
258  break;
259  default:
260  beiscsi_log(phba, KERN_ERR,
262  "BC_%d : Unexpected Async Notification %d on"
263  "Physical Port %d\n",
264  evt->port_link_status,
265  evt->physical_port);
266  }
267 }
268 
269 static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
270  u16 num_popped)
271 {
272  u32 val = 0;
273  val |= qid & DB_CQ_RING_ID_MASK;
274  if (arm)
275  val |= 1 << DB_CQ_REARM_SHIFT;
276  val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
277  iowrite32(val, phba->db_va + DB_CQ_OFFSET);
278 }
279 
280 
282 {
283  struct be_mcc_compl *compl;
284  int num = 0, status = 0;
285  struct be_ctrl_info *ctrl = &phba->ctrl;
286 
287  spin_lock_bh(&phba->ctrl.mcc_cq_lock);
288  while ((compl = be_mcc_compl_get(phba))) {
289  if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
290  /* Interpret flags as an async trailer */
291  if (is_link_state_evt(compl->flags))
292  /* Interpret compl as a async link evt */
294  (struct be_async_event_link_state *) compl);
295  else
296  beiscsi_log(phba, KERN_ERR,
299  "BC_%d : Unsupported Async Event, flags"
300  " = 0x%08x\n", compl->flags);
301 
302  } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
303  status = be_mcc_compl_process(ctrl, compl);
304  atomic_dec(&phba->ctrl.mcc_obj.q.used);
305  }
306  be_mcc_compl_use(compl);
307  num++;
308  }
309 
310  if (num)
311  beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num);
312 
313  spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
314  return status;
315 }
316 
317 /* Wait till no more pending mcc requests are present */
318 static int be_mcc_wait_compl(struct beiscsi_hba *phba)
319 {
320  int i, status;
321  for (i = 0; i < mcc_timeout; i++) {
322  status = beiscsi_process_mcc(phba);
323  if (status)
324  return status;
325 
326  if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
327  break;
328  udelay(100);
329  }
330  if (i == mcc_timeout) {
331  beiscsi_log(phba, KERN_ERR,
333  "BC_%d : mccq poll timed out\n");
334 
335  return -EBUSY;
336  }
337  return 0;
338 }
339 
340 /* Notify MCC requests and wait for completion */
342 {
343  be_mcc_notify(phba);
344  return be_mcc_wait_compl(phba);
345 }
346 
347 static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
348 {
349 #define long_delay 2000
350  void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
351  int cnt = 0, wait = 5; /* in usecs */
352  u32 ready;
353 
354  do {
355  ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
356  if (ready)
357  break;
358 
359  if (cnt > 12000000) {
360  struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
361  beiscsi_log(phba, KERN_ERR,
363  "BC_%d : mbox_db poll timed out\n");
364 
365  return -EBUSY;
366  }
367 
368  if (cnt > 50) {
369  wait = long_delay;
370  mdelay(long_delay / 1000);
371  } else
372  udelay(wait);
373  cnt += wait;
374  } while (true);
375  return 0;
376 }
377 
378 int be_mbox_notify(struct be_ctrl_info *ctrl)
379 {
380  int status;
381  u32 val = 0;
382  void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
383  struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
384  struct be_mcc_mailbox *mbox = mbox_mem->va;
385  struct be_mcc_compl *compl = &mbox->compl;
386  struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
387 
388  val &= ~MPU_MAILBOX_DB_RDY_MASK;
389  val |= MPU_MAILBOX_DB_HI_MASK;
390  val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
391  iowrite32(val, db);
392 
393  status = be_mbox_db_ready_wait(ctrl);
394  if (status != 0) {
395  beiscsi_log(phba, KERN_ERR,
397  "BC_%d : be_mbox_db_ready_wait failed\n");
398 
399  return status;
400  }
401  val = 0;
402  val &= ~MPU_MAILBOX_DB_RDY_MASK;
403  val &= ~MPU_MAILBOX_DB_HI_MASK;
404  val |= (u32) (mbox_mem->dma >> 4) << 2;
405  iowrite32(val, db);
406 
407  status = be_mbox_db_ready_wait(ctrl);
408  if (status != 0) {
409  beiscsi_log(phba, KERN_ERR,
411  "BC_%d : be_mbox_db_ready_wait failed\n");
412 
413  return status;
414  }
415  if (be_mcc_compl_is_new(compl)) {
416  status = be_mcc_compl_process(ctrl, &mbox->compl);
417  be_mcc_compl_use(compl);
418  if (status) {
419  beiscsi_log(phba, KERN_ERR,
421  "BC_%d : After be_mcc_compl_process\n");
422 
423  return status;
424  }
425  } else {
426  beiscsi_log(phba, KERN_ERR,
428  "BC_%d : Invalid Mailbox Completion\n");
429 
430  return -EBUSY;
431  }
432  return 0;
433 }
434 
435 /*
436  * Insert the mailbox address into the doorbell in two steps
437  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
438  */
439 static int be_mbox_notify_wait(struct beiscsi_hba *phba)
440 {
441  int status;
442  u32 val = 0;
443  void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
444  struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
445  struct be_mcc_mailbox *mbox = mbox_mem->va;
446  struct be_mcc_compl *compl = &mbox->compl;
447  struct be_ctrl_info *ctrl = &phba->ctrl;
448 
449  val |= MPU_MAILBOX_DB_HI_MASK;
450  /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
451  val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
452  iowrite32(val, db);
453 
454  /* wait for ready to be set */
455  status = be_mbox_db_ready_wait(ctrl);
456  if (status != 0)
457  return status;
458 
459  val = 0;
460  /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
461  val |= (u32)(mbox_mem->dma >> 4) << 2;
462  iowrite32(val, db);
463 
464  status = be_mbox_db_ready_wait(ctrl);
465  if (status != 0)
466  return status;
467 
468  /* A cq entry has been made now */
469  if (be_mcc_compl_is_new(compl)) {
470  status = be_mcc_compl_process(ctrl, &mbox->compl);
471  be_mcc_compl_use(compl);
472  if (status)
473  return status;
474  } else {
475  beiscsi_log(phba, KERN_ERR,
477  "BC_%d : invalid mailbox completion\n");
478 
479  return -EBUSY;
480  }
481  return 0;
482 }
483 
485  bool embedded, u8 sge_cnt)
486 {
487  if (embedded)
489  else
490  wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
493  be_dws_cpu_to_le(wrb, 8);
494 }
495 
496 void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
497  u8 subsystem, u8 opcode, int cmd_len)
498 {
499  req_hdr->opcode = opcode;
500  req_hdr->subsystem = subsystem;
501  req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
502  req_hdr->timeout = 120;
503 }
504 
505 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
506  struct be_dma_mem *mem)
507 {
508  int i, buf_pages;
509  u64 dma = (u64) mem->dma;
510 
511  buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
512  for (i = 0; i < buf_pages; i++) {
513  pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
514  pages[i].hi = cpu_to_le32(upper_32_bits(dma));
515  dma += PAGE_SIZE_4K;
516  }
517 }
518 
519 static u32 eq_delay_to_mult(u32 usec_delay)
520 {
521 #define MAX_INTR_RATE 651042
522  const u32 round = 10;
523  u32 multiplier;
524 
525  if (usec_delay == 0)
526  multiplier = 0;
527  else {
528  u32 interrupt_rate = 1000000 / usec_delay;
529  if (interrupt_rate == 0)
530  multiplier = 1023;
531  else {
532  multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
533  multiplier /= interrupt_rate;
534  multiplier = (multiplier + round / 2) / round;
535  multiplier = min(multiplier, (u32) 1023);
536  }
537  }
538  return multiplier;
539 }
540 
541 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
542 {
543  return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
544 }
545 
546 struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
547 {
548  struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
549  struct be_mcc_wrb *wrb;
550 
551  BUG_ON(atomic_read(&mccq->used) >= mccq->len);
552  wrb = queue_head_node(mccq);
553  memset(wrb, 0, sizeof(*wrb));
554  wrb->tag0 = (mccq->head & 0x000000FF) << 16;
555  queue_head_inc(mccq);
556  atomic_inc(&mccq->used);
557  return wrb;
558 }
559 
560 
562  struct be_queue_info *eq, int eq_delay)
563 {
564  struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
565  struct be_cmd_req_eq_create *req = embedded_payload(wrb);
566  struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
567  struct be_dma_mem *q_mem = &eq->dma_mem;
568  int status;
569 
570  spin_lock(&ctrl->mbox_lock);
571  memset(wrb, 0, sizeof(*wrb));
572 
573  be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
574 
576  OPCODE_COMMON_EQ_CREATE, sizeof(*req));
577 
578  req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
579 
581  PCI_FUNC(ctrl->pdev->devfn));
582  AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
583  AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
585  __ilog2_u32(eq->len / 256));
586  AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
587  eq_delay_to_mult(eq_delay));
588  be_dws_cpu_to_le(req->context, sizeof(req->context));
589 
590  be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
591 
592  status = be_mbox_notify(ctrl);
593  if (!status) {
594  eq->id = le16_to_cpu(resp->eq_id);
595  eq->created = true;
596  }
597  spin_unlock(&ctrl->mbox_lock);
598  return status;
599 }
600 
602 {
603  struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
604  struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
605  int status;
606  u8 *endian_check;
607 
608  spin_lock(&ctrl->mbox_lock);
609  memset(wrb, 0, sizeof(*wrb));
610 
611  endian_check = (u8 *) wrb;
612  *endian_check++ = 0xFF;
613  *endian_check++ = 0x12;
614  *endian_check++ = 0x34;
615  *endian_check++ = 0xFF;
616  *endian_check++ = 0xFF;
617  *endian_check++ = 0x56;
618  *endian_check++ = 0x78;
619  *endian_check++ = 0xFF;
620  be_dws_cpu_to_le(wrb, sizeof(*wrb));
621 
622  status = be_mbox_notify(ctrl);
623  if (status)
625  "BC_%d : be_cmd_fw_initialize Failed\n");
626 
627  spin_unlock(&ctrl->mbox_lock);
628  return status;
629 }
630 
632  struct be_queue_info *cq, struct be_queue_info *eq,
633  bool sol_evts, bool no_delay, int coalesce_wm)
634 {
635  struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
636  struct be_cmd_req_cq_create *req = embedded_payload(wrb);
637  struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
638  struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
639  struct be_dma_mem *q_mem = &cq->dma_mem;
640  void *ctxt = &req->context;
641  int status;
642 
643  spin_lock(&ctrl->mbox_lock);
644  memset(wrb, 0, sizeof(*wrb));
645 
646  be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
647 
649  OPCODE_COMMON_CQ_CREATE, sizeof(*req));
650 
651  req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
652 
653  AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
654  AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
655  AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
656  __ilog2_u32(cq->len / 256));
657  AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
658  AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
659  AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
660  AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
661  AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
662  AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
663  PCI_FUNC(ctrl->pdev->devfn));
664  be_dws_cpu_to_le(ctxt, sizeof(req->context));
665 
666  be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
667 
668  status = be_mbox_notify(ctrl);
669  if (!status) {
670  cq->id = le16_to_cpu(resp->cq_id);
671  cq->created = true;
672  } else
674  "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
675  status);
676 
677  spin_unlock(&ctrl->mbox_lock);
678 
679  return status;
680 }
681 
682 static u32 be_encoded_q_len(int q_len)
683 {
684  u32 len_encoded = fls(q_len); /* log2(len) + 1 */
685  if (len_encoded == 16)
686  len_encoded = 0;
687  return len_encoded;
688 }
689 
691  struct be_queue_info *mccq,
692  struct be_queue_info *cq)
693 {
694  struct be_mcc_wrb *wrb;
695  struct be_cmd_req_mcc_create *req;
696  struct be_dma_mem *q_mem = &mccq->dma_mem;
697  struct be_ctrl_info *ctrl;
698  void *ctxt;
699  int status;
700 
701  spin_lock(&phba->ctrl.mbox_lock);
702  ctrl = &phba->ctrl;
703  wrb = wrb_from_mbox(&ctrl->mbox_mem);
704  memset(wrb, 0, sizeof(*wrb));
705  req = embedded_payload(wrb);
706  ctxt = &req->context;
707 
708  be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
709 
711  OPCODE_COMMON_MCC_CREATE, sizeof(*req));
712 
713  req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
714 
715  AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
716  PCI_FUNC(phba->pcidev->devfn));
717  AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
719  be_encoded_q_len(mccq->len));
720  AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
721 
722  be_dws_cpu_to_le(ctxt, sizeof(req->context));
723 
724  be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
725 
726  status = be_mbox_notify_wait(phba);
727  if (!status) {
728  struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
729  mccq->id = le16_to_cpu(resp->id);
730  mccq->created = true;
731  }
732  spin_unlock(&phba->ctrl.mbox_lock);
733 
734  return status;
735 }
736 
738  int queue_type)
739 {
740  struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
741  struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
742  struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
743  u8 subsys = 0, opcode = 0;
744  int status;
745 
747  "BC_%d : In beiscsi_cmd_q_destroy "
748  "queue_type : %d\n", queue_type);
749 
750  spin_lock(&ctrl->mbox_lock);
751  memset(wrb, 0, sizeof(*wrb));
752  be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
753 
754  switch (queue_type) {
755  case QTYPE_EQ:
756  subsys = CMD_SUBSYSTEM_COMMON;
758  break;
759  case QTYPE_CQ:
760  subsys = CMD_SUBSYSTEM_COMMON;
762  break;
763  case QTYPE_MCCQ:
764  subsys = CMD_SUBSYSTEM_COMMON;
766  break;
767  case QTYPE_WRBQ:
768  subsys = CMD_SUBSYSTEM_ISCSI;
770  break;
771  case QTYPE_DPDUQ:
772  subsys = CMD_SUBSYSTEM_ISCSI;
774  break;
775  case QTYPE_SGL:
776  subsys = CMD_SUBSYSTEM_ISCSI;
778  break;
779  default:
780  spin_unlock(&ctrl->mbox_lock);
781  BUG();
782  return -ENXIO;
783  }
784  be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
785  if (queue_type != QTYPE_SGL)
786  req->id = cpu_to_le16(q->id);
787 
788  status = be_mbox_notify(ctrl);
789 
790  spin_unlock(&ctrl->mbox_lock);
791  return status;
792 }
793 
795  struct be_queue_info *cq,
796  struct be_queue_info *dq, int length,
797  int entry_size)
798 {
799  struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
800  struct be_defq_create_req *req = embedded_payload(wrb);
801  struct be_dma_mem *q_mem = &dq->dma_mem;
802  void *ctxt = &req->context;
803  int status;
804 
805  spin_lock(&ctrl->mbox_lock);
806  memset(wrb, 0, sizeof(*wrb));
807 
808  be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
809 
811  OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
812 
813  req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
814  AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
815  AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
816  1);
817  AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
818  PCI_FUNC(ctrl->pdev->devfn));
820  be_encoded_q_len(length / sizeof(struct phys_addr)));
821  AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
822  ctxt, entry_size);
823  AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
824  cq->id);
825 
826  be_dws_cpu_to_le(ctxt, sizeof(req->context));
827 
828  be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
829 
830  status = be_mbox_notify(ctrl);
831  if (!status) {
832  struct be_defq_create_resp *resp = embedded_payload(wrb);
833 
834  dq->id = le16_to_cpu(resp->id);
835  dq->created = true;
836  }
837  spin_unlock(&ctrl->mbox_lock);
838 
839  return status;
840 }
841 
842 int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
843  struct be_queue_info *wrbq)
844 {
845  struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
846  struct be_wrbq_create_req *req = embedded_payload(wrb);
847  struct be_wrbq_create_resp *resp = embedded_payload(wrb);
848  int status;
849 
850  spin_lock(&ctrl->mbox_lock);
851  memset(wrb, 0, sizeof(*wrb));
852 
853  be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
854 
856  OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
857  req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
858  be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
859 
860  status = be_mbox_notify(ctrl);
861  if (!status) {
862  wrbq->id = le16_to_cpu(resp->cid);
863  wrbq->created = true;
864  }
865  spin_unlock(&ctrl->mbox_lock);
866  return status;
867 }
868 
870  struct be_dma_mem *q_mem,
872 {
873  struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
874  struct be_post_sgl_pages_req *req = embedded_payload(wrb);
875  struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
876  int status;
877  unsigned int curr_pages;
878  u32 internal_page_offset = 0;
879  u32 temp_num_pages = num_pages;
880 
881  if (num_pages == 0xff)
882  num_pages = 1;
883 
884  spin_lock(&ctrl->mbox_lock);
885  do {
886  memset(wrb, 0, sizeof(*wrb));
887  be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
890  sizeof(*req));
891  curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
892  pages);
893  req->num_pages = min(num_pages, curr_pages);
894  req->page_offset = page_offset;
895  be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
896  q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
897  internal_page_offset += req->num_pages;
898  page_offset += req->num_pages;
899  num_pages -= req->num_pages;
900 
901  if (temp_num_pages == 0xff)
902  req->num_pages = temp_num_pages;
903 
904  status = be_mbox_notify(ctrl);
905  if (status) {
907  "BC_%d : FW CMD to map iscsi frags failed.\n");
908 
909  goto error;
910  }
911  } while (num_pages > 0);
912 error:
913  spin_unlock(&ctrl->mbox_lock);
914  if (status != 0)
916  return status;
917 }
918 
920 {
921  struct be_ctrl_info *ctrl = &phba->ctrl;
922  struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
923  struct be_post_sgl_pages_req *req = embedded_payload(wrb);
924  int status;
925 
926  spin_lock(&ctrl->mbox_lock);
927 
928  req = embedded_payload(wrb);
929  be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
931  OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
932  status = be_mbox_notify_wait(phba);
933 
934  spin_unlock(&ctrl->mbox_lock);
935  return status;
936 }
937 
948 int be_cmd_set_vlan(struct beiscsi_hba *phba,
949  uint16_t vlan_tag)
950 {
951  unsigned int tag = 0;
952  struct be_mcc_wrb *wrb;
953  struct be_cmd_set_vlan_req *req;
954  struct be_ctrl_info *ctrl = &phba->ctrl;
955 
956  spin_lock(&ctrl->mbox_lock);
957  tag = alloc_mcc_tag(phba);
958  if (!tag) {
959  spin_unlock(&ctrl->mbox_lock);
960  return tag;
961  }
962 
963  wrb = wrb_from_mccq(phba);
964  req = embedded_payload(wrb);
965  wrb->tag0 |= tag;
966  be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
969  sizeof(*req));
970 
971  req->interface_hndl = phba->interface_handle;
972  req->vlan_priority = vlan_tag;
973 
974  be_mcc_notify(phba);
975  spin_unlock(&ctrl->mbox_lock);
976 
977  return tag;
978 }