Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
fnic_scsi.c
Go to the documentation of this file.
1 /*
2  * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4  *
5  * This program is free software; you may redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; version 2 of the License.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16  * SOFTWARE.
17  */
18 #include <linux/mempool.h>
19 #include <linux/errno.h>
20 #include <linux/init.h>
21 #include <linux/workqueue.h>
22 #include <linux/pci.h>
23 #include <linux/scatterlist.h>
24 #include <linux/skbuff.h>
25 #include <linux/spinlock.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/delay.h>
29 #include <linux/gfp.h>
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_tcq.h>
35 #include <scsi/fc/fc_els.h>
36 #include <scsi/fc/fc_fcoe.h>
37 #include <scsi/libfc.h>
38 #include <scsi/fc_frame.h>
39 #include "fnic_io.h"
40 #include "fnic.h"
41 
42 const char *fnic_state_str[] = {
43  [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
44  [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
45  [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
46  [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
47 };
48 
49 static const char *fnic_ioreq_state_str[] = {
50  [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
51  [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
52  [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
53  [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
54 };
55 
56 static const char *fcpio_status_str[] = {
57  [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
58  [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
59  [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
60  [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
61  [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
62  [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
63  [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
64  [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
65  [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
66  [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
67  [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
68  [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
69  [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
70  [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
71  [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
72  [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
73  [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
74  [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
75  [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
76 };
77 
78 const char *fnic_state_to_str(unsigned int state)
79 {
80  if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
81  return "unknown";
82 
83  return fnic_state_str[state];
84 }
85 
86 static const char *fnic_ioreq_state_to_str(unsigned int state)
87 {
88  if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
89  !fnic_ioreq_state_str[state])
90  return "unknown";
91 
92  return fnic_ioreq_state_str[state];
93 }
94 
95 static const char *fnic_fcpio_status_to_str(unsigned int status)
96 {
97  if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
98  return "unknown";
99 
100  return fcpio_status_str[status];
101 }
102 
103 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
104 
105 static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
106  struct scsi_cmnd *sc)
107 {
108  u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
109 
110  return &fnic->io_req_lock[hash];
111 }
112 
113 /*
114  * Unmap the data buffer and sense buffer for an io_req,
115  * also unmap and free the device-private scatter/gather list.
116  */
117 static void fnic_release_ioreq_buf(struct fnic *fnic,
118  struct fnic_io_req *io_req,
119  struct scsi_cmnd *sc)
120 {
121  if (io_req->sgl_list_pa)
122  pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
123  sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
125  scsi_dma_unmap(sc);
126 
127  if (io_req->sgl_cnt)
129  fnic->io_sgl_pool[io_req->sgl_type]);
130  if (io_req->sense_buf_pa)
131  pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
133 }
134 
135 /* Free up Copy Wq descriptors. Called with copy_wq lock held */
136 static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
137 {
138  /* if no Ack received from firmware, then nothing to clean */
139  if (!fnic->fw_ack_recd[0])
140  return 1;
141 
142  /*
143  * Update desc_available count based on number of freed descriptors
144  * Account for wraparound
145  */
146  if (wq->to_clean_index <= fnic->fw_ack_index[0])
147  wq->ring.desc_avail += (fnic->fw_ack_index[0]
148  - wq->to_clean_index + 1);
149  else
150  wq->ring.desc_avail += (wq->ring.desc_count
151  - wq->to_clean_index
152  + fnic->fw_ack_index[0] + 1);
153 
154  /*
155  * just bump clean index to ack_index+1 accounting for wraparound
156  * this will essentially free up all descriptors between
157  * to_clean_index and fw_ack_index, both inclusive
158  */
159  wq->to_clean_index =
160  (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
161 
162  /* we have processed the acks received so far */
163  fnic->fw_ack_recd[0] = 0;
164  return 0;
165 }
166 
167 
168 /*
169  * fnic_fw_reset_handler
170  * Routine to send reset msg to fw
171  */
172 int fnic_fw_reset_handler(struct fnic *fnic)
173 {
174  struct vnic_wq_copy *wq = &fnic->wq_copy[0];
175  int ret = 0;
176  unsigned long flags;
177 
179  skb_queue_purge(&fnic->tx_queue);
180 
181  spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
182 
183  if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
184  free_wq_copy_descs(fnic, wq);
185 
186  if (!vnic_wq_copy_desc_avail(wq))
187  ret = -EAGAIN;
188  else
189  fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
190 
191  spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
192 
193  if (!ret)
194  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
195  "Issued fw reset\n");
196  else
197  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
198  "Failed to issue fw reset\n");
199  return ret;
200 }
201 
202 
203 /*
204  * fnic_flogi_reg_handler
205  * Routine to send flogi register msg to fw
206  */
207 int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
208 {
209  struct vnic_wq_copy *wq = &fnic->wq_copy[0];
211  struct fc_lport *lp = fnic->lport;
212  u8 gw_mac[ETH_ALEN];
213  int ret = 0;
214  unsigned long flags;
215 
216  spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
217 
218  if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
219  free_wq_copy_descs(fnic, wq);
220 
221  if (!vnic_wq_copy_desc_avail(wq)) {
222  ret = -EAGAIN;
223  goto flogi_reg_ioreq_end;
224  }
225 
226  if (fnic->ctlr.map_dest) {
227  memset(gw_mac, 0xff, ETH_ALEN);
228  format = FCPIO_FLOGI_REG_DEF_DEST;
229  } else {
230  memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
231  format = FCPIO_FLOGI_REG_GW_DEST;
232  }
233 
234  if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
235  fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
236  fc_id, gw_mac,
237  fnic->data_src_addr,
238  lp->r_a_tov, lp->e_d_tov);
239  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
240  "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
241  fc_id, fnic->data_src_addr, gw_mac);
242  } else {
243  fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
244  format, fc_id, gw_mac);
245  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
246  "FLOGI reg issued fcid %x map %d dest %pM\n",
247  fc_id, fnic->ctlr.map_dest, gw_mac);
248  }
249 
250 flogi_reg_ioreq_end:
251  spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
252  return ret;
253 }
254 
255 /*
256  * fnic_queue_wq_copy_desc
257  * Routine to enqueue a wq copy desc
258  */
259 static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
260  struct vnic_wq_copy *wq,
261  struct fnic_io_req *io_req,
262  struct scsi_cmnd *sc,
263  int sg_count)
264 {
265  struct scatterlist *sg;
267  struct fc_rport_libfc_priv *rp = rport->dd_data;
268  struct host_sg_desc *desc;
269  u8 pri_tag = 0;
270  unsigned int i;
271  unsigned long intr_flags;
272  int flags;
273  u8 exch_flags;
274  struct scsi_lun fc_lun;
275  char msg[2];
276 
277  if (sg_count) {
278  /* For each SGE, create a device desc entry */
279  desc = io_req->sgl_list;
280  for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
281  desc->addr = cpu_to_le64(sg_dma_address(sg));
282  desc->len = cpu_to_le32(sg_dma_len(sg));
283  desc->_resvd = 0;
284  desc++;
285  }
286 
287  io_req->sgl_list_pa = pci_map_single
288  (fnic->pdev,
289  io_req->sgl_list,
290  sizeof(io_req->sgl_list[0]) * sg_count,
292  }
293 
294  io_req->sense_buf_pa = pci_map_single(fnic->pdev,
295  sc->sense_buffer,
298 
299  int_to_scsilun(sc->device->lun, &fc_lun);
300 
301  pri_tag = FCPIO_ICMND_PTA_SIMPLE;
302  msg[0] = MSG_SIMPLE_TAG;
303  scsi_populate_tag_msg(sc, msg);
304  if (msg[0] == MSG_ORDERED_TAG)
305  pri_tag = FCPIO_ICMND_PTA_ORDERED;
306 
307  /* Enqueue the descriptor in the Copy WQ */
308  spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
309 
310  if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
311  free_wq_copy_descs(fnic, wq);
312 
313  if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
314  spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
315  return SCSI_MLQUEUE_HOST_BUSY;
316  }
317 
318  flags = 0;
320  flags = FCPIO_ICMND_RDDATA;
321  else if (sc->sc_data_direction == DMA_TO_DEVICE)
322  flags = FCPIO_ICMND_WRDATA;
323 
324  exch_flags = 0;
325  if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
326  (rp->flags & FC_RP_FLAGS_RETRY))
327  exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
328 
329  fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
330  0, exch_flags, io_req->sgl_cnt,
332  io_req->sgl_list_pa,
333  io_req->sense_buf_pa,
334  0, /* scsi cmd ref, always 0 */
335  pri_tag, /* scsi pri and tag */
336  flags, /* command flags */
337  sc->cmnd, sc->cmd_len,
338  scsi_bufflen(sc),
339  fc_lun.scsi_lun, io_req->port_id,
340  rport->maxframe_size, rp->r_a_tov,
341  rp->e_d_tov);
342 
343  spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
344  return 0;
345 }
346 
347 /*
348  * fnic_queuecommand
349  * Routine to send a scsi cdb
350  * Called with host_lock held and interrupts disabled.
351  */
352 static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
353 {
354  struct fc_lport *lp;
355  struct fc_rport *rport;
356  struct fnic_io_req *io_req;
357  struct fnic *fnic;
358  struct vnic_wq_copy *wq;
359  int ret;
360  int sg_count;
361  unsigned long flags;
362  unsigned long ptr;
363 
364  rport = starget_to_rport(scsi_target(sc->device));
365  ret = fc_remote_port_chkready(rport);
366  if (ret) {
367  sc->result = ret;
368  done(sc);
369  return 0;
370  }
371 
372  lp = shost_priv(sc->device->host);
373  if (lp->state != LPORT_ST_READY || !(lp->link_up))
374  return SCSI_MLQUEUE_HOST_BUSY;
375 
376  /*
377  * Release host lock, use driver resource specific locks from here.
378  * Don't re-enable interrupts in case they were disabled prior to the
379  * caller disabling them.
380  */
381  spin_unlock(lp->host->host_lock);
382 
383  /* Get a new io_req for this SCSI IO */
384  fnic = lport_priv(lp);
385 
386  io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
387  if (!io_req) {
389  goto out;
390  }
391  memset(io_req, 0, sizeof(*io_req));
392 
393  /* Map the data buffer */
394  sg_count = scsi_dma_map(sc);
395  if (sg_count < 0) {
396  mempool_free(io_req, fnic->io_req_pool);
397  goto out;
398  }
399 
400  /* Determine the type of scatter/gather list we need */
401  io_req->sgl_cnt = sg_count;
402  io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
403  if (sg_count > FNIC_DFLT_SG_DESC_CNT)
404  io_req->sgl_type = FNIC_SGL_CACHE_MAX;
405 
406  if (sg_count) {
407  io_req->sgl_list =
408  mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
409  GFP_ATOMIC);
410  if (!io_req->sgl_list) {
412  scsi_dma_unmap(sc);
413  mempool_free(io_req, fnic->io_req_pool);
414  goto out;
415  }
416 
417  /* Cache sgl list allocated address before alignment */
418  io_req->sgl_list_alloc = io_req->sgl_list;
419  ptr = (unsigned long) io_req->sgl_list;
420  if (ptr % FNIC_SG_DESC_ALIGN) {
421  io_req->sgl_list = (struct host_sg_desc *)
422  (((unsigned long) ptr
423  + FNIC_SG_DESC_ALIGN - 1)
424  & ~(FNIC_SG_DESC_ALIGN - 1));
425  }
426  }
427 
428  /* initialize rest of io_req */
429  io_req->port_id = rport->port_id;
431  CMD_SP(sc) = (char *)io_req;
432  sc->scsi_done = done;
433 
434  /* create copy wq desc and enqueue it */
435  wq = &fnic->wq_copy[0];
436  ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
437  if (ret) {
438  /*
439  * In case another thread cancelled the request,
440  * refetch the pointer under the lock.
441  */
442  spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
443 
444  spin_lock_irqsave(io_lock, flags);
445  io_req = (struct fnic_io_req *)CMD_SP(sc);
446  CMD_SP(sc) = NULL;
448  spin_unlock_irqrestore(io_lock, flags);
449  if (io_req) {
450  fnic_release_ioreq_buf(fnic, io_req, sc);
451  mempool_free(io_req, fnic->io_req_pool);
452  }
453  }
454 out:
455  /* acquire host lock before returning to SCSI */
456  spin_lock(lp->host->host_lock);
457  return ret;
458 }
459 
461 
462 /*
463  * fnic_fcpio_fw_reset_cmpl_handler
464  * Routine to handle fw reset completion
465  */
466 static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
467  struct fcpio_fw_req *desc)
468 {
469  u8 type;
470  u8 hdr_status;
471  struct fcpio_tag tag;
472  int ret = 0;
473  unsigned long flags;
474 
475  fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
476 
477  /* Clean up all outstanding io requests */
478  fnic_cleanup_io(fnic, SCSI_NO_TAG);
479 
480  spin_lock_irqsave(&fnic->fnic_lock, flags);
481 
482  /* fnic should be in FC_TRANS_ETH_MODE */
483  if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
484  /* Check status of reset completion */
485  if (!hdr_status) {
486  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
487  "reset cmpl success\n");
488  /* Ready to send flogi out */
489  fnic->state = FNIC_IN_ETH_MODE;
490  } else {
492  fnic->lport->host,
493  "fnic fw_reset : failed %s\n",
494  fnic_fcpio_status_to_str(hdr_status));
495 
496  /*
497  * Unable to change to eth mode, cannot send out flogi
498  * Change state to fc mode, so that subsequent Flogi
499  * requests from libFC will cause more attempts to
500  * reset the firmware. Free the cached flogi
501  */
502  fnic->state = FNIC_IN_FC_MODE;
503  ret = -1;
504  }
505  } else {
507  fnic->lport->host,
508  "Unexpected state %s while processing"
509  " reset cmpl\n", fnic_state_to_str(fnic->state));
510  ret = -1;
511  }
512 
513  /* Thread removing device blocks till firmware reset is complete */
514  if (fnic->remove_wait)
515  complete(fnic->remove_wait);
516 
517  /*
518  * If fnic is being removed, or fw reset failed
519  * free the flogi frame. Else, send it out
520  */
521  if (fnic->remove_wait || ret) {
522  spin_unlock_irqrestore(&fnic->fnic_lock, flags);
523  skb_queue_purge(&fnic->tx_queue);
524  goto reset_cmpl_handler_end;
525  }
526 
527  spin_unlock_irqrestore(&fnic->fnic_lock, flags);
528 
529  fnic_flush_tx(fnic);
530 
531  reset_cmpl_handler_end:
532  return ret;
533 }
534 
535 /*
536  * fnic_fcpio_flogi_reg_cmpl_handler
537  * Routine to handle flogi register completion
538  */
539 static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
540  struct fcpio_fw_req *desc)
541 {
542  u8 type;
543  u8 hdr_status;
544  struct fcpio_tag tag;
545  int ret = 0;
546  unsigned long flags;
547 
548  fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
549 
550  /* Update fnic state based on status of flogi reg completion */
551  spin_lock_irqsave(&fnic->fnic_lock, flags);
552 
553  if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
554 
555  /* Check flogi registration completion status */
556  if (!hdr_status) {
557  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
558  "flog reg succeeded\n");
559  fnic->state = FNIC_IN_FC_MODE;
560  } else {
562  fnic->lport->host,
563  "fnic flogi reg :failed %s\n",
564  fnic_fcpio_status_to_str(hdr_status));
565  fnic->state = FNIC_IN_ETH_MODE;
566  ret = -1;
567  }
568  } else {
569  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
570  "Unexpected fnic state %s while"
571  " processing flogi reg completion\n",
572  fnic_state_to_str(fnic->state));
573  ret = -1;
574  }
575 
576  if (!ret) {
577  if (fnic->stop_rx_link_events) {
578  spin_unlock_irqrestore(&fnic->fnic_lock, flags);
579  goto reg_cmpl_handler_end;
580  }
581  spin_unlock_irqrestore(&fnic->fnic_lock, flags);
582 
583  fnic_flush_tx(fnic);
585  } else {
586  spin_unlock_irqrestore(&fnic->fnic_lock, flags);
587  }
588 
589 reg_cmpl_handler_end:
590  return ret;
591 }
592 
593 static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
594  u16 request_out)
595 {
596  if (wq->to_clean_index <= wq->to_use_index) {
597  /* out of range, stale request_out index */
598  if (request_out < wq->to_clean_index ||
599  request_out >= wq->to_use_index)
600  return 0;
601  } else {
602  /* out of range, stale request_out index */
603  if (request_out < wq->to_clean_index &&
604  request_out >= wq->to_use_index)
605  return 0;
606  }
607  /* request_out index is in range */
608  return 1;
609 }
610 
611 
612 /*
613  * Mark that ack received and store the Ack index. If there are multiple
614  * acks received before Tx thread cleans it up, the latest value will be
615  * used which is correct behavior. This state should be in the copy Wq
616  * instead of in the fnic
617  */
618 static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
619  unsigned int cq_index,
620  struct fcpio_fw_req *desc)
621 {
622  struct vnic_wq_copy *wq;
623  u16 request_out = desc->u.ack.request_out;
624  unsigned long flags;
625 
626  /* mark the ack state */
627  wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
628  spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
629 
630  if (is_ack_index_in_range(wq, request_out)) {
631  fnic->fw_ack_index[0] = request_out;
632  fnic->fw_ack_recd[0] = 1;
633  }
634  spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
635 }
636 
637 /*
638  * fnic_fcpio_icmnd_cmpl_handler
639  * Routine to handle icmnd completions
640  */
641 static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
642  struct fcpio_fw_req *desc)
643 {
644  u8 type;
645  u8 hdr_status;
646  struct fcpio_tag tag;
647  u32 id;
648  u64 xfer_len = 0;
649  struct fcpio_icmnd_cmpl *icmnd_cmpl;
650  struct fnic_io_req *io_req;
651  struct scsi_cmnd *sc;
652  unsigned long flags;
654 
655  /* Decode the cmpl description to get the io_req id */
656  fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
657  fcpio_tag_id_dec(&tag, &id);
658 
659  if (id >= FNIC_MAX_IO_REQ)
660  return;
661 
662  sc = scsi_host_find_tag(fnic->lport->host, id);
663  WARN_ON_ONCE(!sc);
664  if (!sc)
665  return;
666 
667  io_lock = fnic_io_lock_hash(fnic, sc);
668  spin_lock_irqsave(io_lock, flags);
669  io_req = (struct fnic_io_req *)CMD_SP(sc);
670  WARN_ON_ONCE(!io_req);
671  if (!io_req) {
672  spin_unlock_irqrestore(io_lock, flags);
673  return;
674  }
675 
676  /* firmware completed the io */
677  io_req->io_completed = 1;
678 
679  /*
680  * if SCSI-ML has already issued abort on this command,
681  * ignore completion of the IO. The abts path will clean it up
682  */
683  if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
684  spin_unlock_irqrestore(io_lock, flags);
685  return;
686  }
687 
688  /* Mark the IO as complete */
690 
691  icmnd_cmpl = &desc->u.icmnd_cmpl;
692 
693  switch (hdr_status) {
694  case FCPIO_SUCCESS:
695  sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
696  xfer_len = scsi_bufflen(sc);
697  scsi_set_resid(sc, icmnd_cmpl->residual);
698 
699  if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
700  xfer_len -= icmnd_cmpl->residual;
701 
702  /*
703  * If queue_full, then try to reduce queue depth for all
704  * LUNS on the target. Todo: this should be accompanied
705  * by a periodic queue_depth rampup based on successful
706  * IO completion.
707  */
708  if (icmnd_cmpl->scsi_status == QUEUE_FULL) {
709  struct scsi_device *t_sdev;
710  int qd = 0;
711 
712  shost_for_each_device(t_sdev, sc->device->host) {
713  if (t_sdev->id != sc->device->id)
714  continue;
715 
716  if (t_sdev->queue_depth > 1) {
718  (t_sdev,
719  t_sdev->queue_depth - 1);
720  if (qd == -1)
721  qd = t_sdev->host->cmd_per_lun;
723  fnic->lport->host,
724  "scsi[%d:%d:%d:%d"
725  "] queue full detected,"
726  "new depth = %d\n",
727  t_sdev->host->host_no,
728  t_sdev->channel,
729  t_sdev->id, t_sdev->lun,
730  t_sdev->queue_depth);
731  }
732  }
733  }
734  break;
735 
736  case FCPIO_TIMEOUT: /* request was timed out */
737  sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
738  break;
739 
740  case FCPIO_ABORTED: /* request was aborted */
741  sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
742  break;
743 
744  case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
745  scsi_set_resid(sc, icmnd_cmpl->residual);
746  sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
747  break;
748 
749  case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
750  sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
751  break;
752  case FCPIO_INVALID_HEADER: /* header contains invalid data */
753  case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
754  case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
755  case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
756  case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
757  case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
758  case FCPIO_FW_ERR: /* request was terminated due fw error */
759  default:
760  shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
761  fnic_fcpio_status_to_str(hdr_status));
762  sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
763  break;
764  }
765 
766  /* Break link with the SCSI command */
767  CMD_SP(sc) = NULL;
768 
769  spin_unlock_irqrestore(io_lock, flags);
770 
771  fnic_release_ioreq_buf(fnic, io_req, sc);
772 
773  mempool_free(io_req, fnic->io_req_pool);
774 
775  if (sc->sc_data_direction == DMA_FROM_DEVICE) {
776  fnic->lport->host_stats.fcp_input_requests++;
777  fnic->fcp_input_bytes += xfer_len;
778  } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
779  fnic->lport->host_stats.fcp_output_requests++;
780  fnic->fcp_output_bytes += xfer_len;
781  } else
782  fnic->lport->host_stats.fcp_control_requests++;
783 
784  /* Call SCSI completion function to complete the IO */
785  if (sc->scsi_done)
786  sc->scsi_done(sc);
787 
788 }
789 
790 /* fnic_fcpio_itmf_cmpl_handler
791  * Routine to handle itmf completions
792  */
793 static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
794  struct fcpio_fw_req *desc)
795 {
796  u8 type;
797  u8 hdr_status;
798  struct fcpio_tag tag;
799  u32 id;
800  struct scsi_cmnd *sc;
801  struct fnic_io_req *io_req;
802  unsigned long flags;
804 
805  fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
806  fcpio_tag_id_dec(&tag, &id);
807 
808  if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ)
809  return;
810 
811  sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
812  WARN_ON_ONCE(!sc);
813  if (!sc)
814  return;
815 
816  io_lock = fnic_io_lock_hash(fnic, sc);
817  spin_lock_irqsave(io_lock, flags);
818  io_req = (struct fnic_io_req *)CMD_SP(sc);
819  WARN_ON_ONCE(!io_req);
820  if (!io_req) {
821  spin_unlock_irqrestore(io_lock, flags);
822  return;
823  }
824 
825  if (id & FNIC_TAG_ABORT) {
826  /* Completion of abort cmd */
827  if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
828  /* This is a late completion. Ignore it */
829  spin_unlock_irqrestore(io_lock, flags);
830  return;
831  }
833  CMD_ABTS_STATUS(sc) = hdr_status;
834 
835  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
836  "abts cmpl recd. id %d status %s\n",
837  (int)(id & FNIC_TAG_MASK),
838  fnic_fcpio_status_to_str(hdr_status));
839 
840  /*
841  * If scsi_eh thread is blocked waiting for abts to complete,
842  * signal completion to it. IO will be cleaned in the thread
843  * else clean it in this context
844  */
845  if (io_req->abts_done) {
846  complete(io_req->abts_done);
847  spin_unlock_irqrestore(io_lock, flags);
848  } else {
849  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
850  "abts cmpl, completing IO\n");
851  CMD_SP(sc) = NULL;
852  sc->result = (DID_ERROR << 16);
853 
854  spin_unlock_irqrestore(io_lock, flags);
855 
856  fnic_release_ioreq_buf(fnic, io_req, sc);
857  mempool_free(io_req, fnic->io_req_pool);
858  if (sc->scsi_done)
859  sc->scsi_done(sc);
860  }
861 
862  } else if (id & FNIC_TAG_DEV_RST) {
863  /* Completion of device reset */
864  CMD_LR_STATUS(sc) = hdr_status;
866  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
867  "dev reset cmpl recd. id %d status %s\n",
868  (int)(id & FNIC_TAG_MASK),
869  fnic_fcpio_status_to_str(hdr_status));
870  if (io_req->dr_done)
871  complete(io_req->dr_done);
872  spin_unlock_irqrestore(io_lock, flags);
873 
874  } else {
875  shost_printk(KERN_ERR, fnic->lport->host,
876  "Unexpected itmf io state %s tag %x\n",
877  fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
878  spin_unlock_irqrestore(io_lock, flags);
879  }
880 
881 }
882 
883 /*
884  * fnic_fcpio_cmpl_handler
885  * Routine to service the cq for wq_copy
886  */
887 static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
888  unsigned int cq_index,
889  struct fcpio_fw_req *desc)
890 {
891  struct fnic *fnic = vnic_dev_priv(vdev);
892  int ret = 0;
893 
894  switch (desc->hdr.type) {
895  case FCPIO_ACK: /* fw copied copy wq desc to its queue */
896  fnic_fcpio_ack_handler(fnic, cq_index, desc);
897  break;
898 
899  case FCPIO_ICMND_CMPL: /* fw completed a command */
900  fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
901  break;
902 
903  case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
904  fnic_fcpio_itmf_cmpl_handler(fnic, desc);
905  break;
906 
907  case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
908  case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
909  ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
910  break;
911 
912  case FCPIO_RESET_CMPL: /* fw completed reset */
913  ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
914  break;
915 
916  default:
917  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
918  "firmware completion type %d\n",
919  desc->hdr.type);
920  break;
921  }
922 
923  return ret;
924 }
925 
926 /*
927  * fnic_wq_copy_cmpl_handler
928  * Routine to process wq copy
929  */
930 int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
931 {
932  unsigned int wq_work_done = 0;
933  unsigned int i, cq_index;
934  unsigned int cur_work_done;
935 
936  for (i = 0; i < fnic->wq_copy_count; i++) {
937  cq_index = i + fnic->raw_wq_count + fnic->rq_count;
938  cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
939  fnic_fcpio_cmpl_handler,
940  copy_work_to_do);
941  wq_work_done += cur_work_done;
942  }
943  return wq_work_done;
944 }
945 
946 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
947 {
948  unsigned int i;
949  struct fnic_io_req *io_req;
950  unsigned long flags = 0;
951  struct scsi_cmnd *sc;
953 
954  for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
955  if (i == exclude_id)
956  continue;
957 
958  sc = scsi_host_find_tag(fnic->lport->host, i);
959  if (!sc)
960  continue;
961 
962  io_lock = fnic_io_lock_hash(fnic, sc);
963  spin_lock_irqsave(io_lock, flags);
964  io_req = (struct fnic_io_req *)CMD_SP(sc);
965  if (!io_req) {
966  spin_unlock_irqrestore(io_lock, flags);
967  goto cleanup_scsi_cmd;
968  }
969 
970  CMD_SP(sc) = NULL;
971 
972  spin_unlock_irqrestore(io_lock, flags);
973 
974  /*
975  * If there is a scsi_cmnd associated with this io_req, then
976  * free the corresponding state
977  */
978  fnic_release_ioreq_buf(fnic, io_req, sc);
979  mempool_free(io_req, fnic->io_req_pool);
980 
981 cleanup_scsi_cmd:
982  sc->result = DID_TRANSPORT_DISRUPTED << 16;
983  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
984  " DID_TRANSPORT_DISRUPTED\n");
985 
986  /* Complete the command to SCSI */
987  if (sc->scsi_done)
988  sc->scsi_done(sc);
989  }
990 }
991 
993  struct fcpio_host_req *desc)
994 {
995  u32 id;
996  struct fnic *fnic = vnic_dev_priv(wq->vdev);
997  struct fnic_io_req *io_req;
998  struct scsi_cmnd *sc;
999  unsigned long flags;
1001 
1002  /* get the tag reference */
1003  fcpio_tag_id_dec(&desc->hdr.tag, &id);
1004  id &= FNIC_TAG_MASK;
1005 
1006  if (id >= FNIC_MAX_IO_REQ)
1007  return;
1008 
1009  sc = scsi_host_find_tag(fnic->lport->host, id);
1010  if (!sc)
1011  return;
1012 
1013  io_lock = fnic_io_lock_hash(fnic, sc);
1014  spin_lock_irqsave(io_lock, flags);
1015 
1016  /* Get the IO context which this desc refers to */
1017  io_req = (struct fnic_io_req *)CMD_SP(sc);
1018 
1019  /* fnic interrupts are turned off by now */
1020 
1021  if (!io_req) {
1022  spin_unlock_irqrestore(io_lock, flags);
1023  goto wq_copy_cleanup_scsi_cmd;
1024  }
1025 
1026  CMD_SP(sc) = NULL;
1027 
1028  spin_unlock_irqrestore(io_lock, flags);
1029 
1030  fnic_release_ioreq_buf(fnic, io_req, sc);
1031  mempool_free(io_req, fnic->io_req_pool);
1032 
1033 wq_copy_cleanup_scsi_cmd:
1034  sc->result = DID_NO_CONNECT << 16;
1035  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
1036  " DID_NO_CONNECT\n");
1037 
1038  if (sc->scsi_done)
1039  sc->scsi_done(sc);
1040 }
1041 
1042 static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1043  u32 task_req, u8 *fc_lun,
1044  struct fnic_io_req *io_req)
1045 {
1046  struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1047  unsigned long flags;
1048 
1049  spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1050 
1051  if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1052  free_wq_copy_descs(fnic, wq);
1053 
1054  if (!vnic_wq_copy_desc_avail(wq)) {
1055  spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1056  return 1;
1057  }
1058  fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1059  0, task_req, tag, fc_lun, io_req->port_id,
1060  fnic->config.ra_tov, fnic->config.ed_tov);
1061 
1062  spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1063  return 0;
1064 }
1065 
1066 void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1067 {
1068  int tag;
1069  struct fnic_io_req *io_req;
1071  unsigned long flags;
1072  struct scsi_cmnd *sc;
1073  struct scsi_lun fc_lun;
1074  enum fnic_ioreq_state old_ioreq_state;
1075 
1077  fnic->lport->host,
1078  "fnic_rport_reset_exch called portid 0x%06x\n",
1079  port_id);
1080 
1081  if (fnic->in_remove)
1082  return;
1083 
1084  for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
1085  sc = scsi_host_find_tag(fnic->lport->host, tag);
1086  if (!sc)
1087  continue;
1088 
1089  io_lock = fnic_io_lock_hash(fnic, sc);
1090  spin_lock_irqsave(io_lock, flags);
1091 
1092  io_req = (struct fnic_io_req *)CMD_SP(sc);
1093 
1094  if (!io_req || io_req->port_id != port_id) {
1095  spin_unlock_irqrestore(io_lock, flags);
1096  continue;
1097  }
1098 
1099  /*
1100  * Found IO that is still pending with firmware and
1101  * belongs to rport that went away
1102  */
1103  if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1104  spin_unlock_irqrestore(io_lock, flags);
1105  continue;
1106  }
1107  old_ioreq_state = CMD_STATE(sc);
1110 
1111  BUG_ON(io_req->abts_done);
1112 
1113  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1114  "fnic_rport_reset_exch: Issuing abts\n");
1115 
1116  spin_unlock_irqrestore(io_lock, flags);
1117 
1118  /* Now queue the abort command to firmware */
1119  int_to_scsilun(sc->device->lun, &fc_lun);
1120 
1121  if (fnic_queue_abort_io_req(fnic, tag,
1123  fc_lun.scsi_lun, io_req)) {
1124  /*
1125  * Revert the cmd state back to old state, if
1126  * it hasn't changed in between. This cmd will get
1127  * aborted later by scsi_eh, or cleaned up during
1128  * lun reset
1129  */
1130  io_lock = fnic_io_lock_hash(fnic, sc);
1131 
1132  spin_lock_irqsave(io_lock, flags);
1134  CMD_STATE(sc) = old_ioreq_state;
1135  spin_unlock_irqrestore(io_lock, flags);
1136  }
1137  }
1138 
1139 }
1140 
1142 {
1143  int tag;
1144  struct fnic_io_req *io_req;
1146  unsigned long flags;
1147  struct scsi_cmnd *sc;
1148  struct scsi_lun fc_lun;
1149  struct fc_rport_libfc_priv *rdata = rport->dd_data;
1150  struct fc_lport *lport = rdata->local_port;
1151  struct fnic *fnic = lport_priv(lport);
1152  struct fc_rport *cmd_rport;
1153  enum fnic_ioreq_state old_ioreq_state;
1154 
1156  fnic->lport->host, "fnic_terminate_rport_io called"
1157  " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n",
1158  rport->port_name, rport->node_name,
1159  rport->port_id);
1160 
1161  if (fnic->in_remove)
1162  return;
1163 
1164  for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
1165  sc = scsi_host_find_tag(fnic->lport->host, tag);
1166  if (!sc)
1167  continue;
1168 
1169  cmd_rport = starget_to_rport(scsi_target(sc->device));
1170  if (rport != cmd_rport)
1171  continue;
1172 
1173  io_lock = fnic_io_lock_hash(fnic, sc);
1174  spin_lock_irqsave(io_lock, flags);
1175 
1176  io_req = (struct fnic_io_req *)CMD_SP(sc);
1177 
1178  if (!io_req || rport != cmd_rport) {
1179  spin_unlock_irqrestore(io_lock, flags);
1180  continue;
1181  }
1182 
1183  /*
1184  * Found IO that is still pending with firmware and
1185  * belongs to rport that went away
1186  */
1187  if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1188  spin_unlock_irqrestore(io_lock, flags);
1189  continue;
1190  }
1191  old_ioreq_state = CMD_STATE(sc);
1194 
1195  BUG_ON(io_req->abts_done);
1196 
1198  fnic->lport->host,
1199  "fnic_terminate_rport_io: Issuing abts\n");
1200 
1201  spin_unlock_irqrestore(io_lock, flags);
1202 
1203  /* Now queue the abort command to firmware */
1204  int_to_scsilun(sc->device->lun, &fc_lun);
1205 
1206  if (fnic_queue_abort_io_req(fnic, tag,
1208  fc_lun.scsi_lun, io_req)) {
1209  /*
1210  * Revert the cmd state back to old state, if
1211  * it hasn't changed in between. This cmd will get
1212  * aborted later by scsi_eh, or cleaned up during
1213  * lun reset
1214  */
1215  io_lock = fnic_io_lock_hash(fnic, sc);
1216 
1217  spin_lock_irqsave(io_lock, flags);
1219  CMD_STATE(sc) = old_ioreq_state;
1220  spin_unlock_irqrestore(io_lock, flags);
1221  }
1222  }
1223 
1224 }
1225 
1226 /*
1227  * This function is exported to SCSI for sending abort cmnds.
1228  * A SCSI IO is represented by a io_req in the driver.
1229  * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1230  */
1231 int fnic_abort_cmd(struct scsi_cmnd *sc)
1232 {
1233  struct fc_lport *lp;
1234  struct fnic *fnic;
1235  struct fnic_io_req *io_req;
1236  struct fc_rport *rport;
1238  unsigned long flags;
1239  int ret = SUCCESS;
1240  u32 task_req;
1241  struct scsi_lun fc_lun;
1242  DECLARE_COMPLETION_ONSTACK(tm_done);
1243 
1244  /* Wait for rport to unblock */
1245  fc_block_scsi_eh(sc);
1246 
1247  /* Get local-port, check ready and link up */
1248  lp = shost_priv(sc->device->host);
1249 
1250  fnic = lport_priv(lp);
1251  rport = starget_to_rport(scsi_target(sc->device));
1252  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1253  "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n",
1254  rport->port_id, sc->device->lun, sc->request->tag);
1255 
1256  if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1257  ret = FAILED;
1258  goto fnic_abort_cmd_end;
1259  }
1260 
1261  /*
1262  * Avoid a race between SCSI issuing the abort and the device
1263  * completing the command.
1264  *
1265  * If the command is already completed by the fw cmpl code,
1266  * we just return SUCCESS from here. This means that the abort
1267  * succeeded. In the SCSI ML, since the timeout for command has
1268  * happened, the completion wont actually complete the command
1269  * and it will be considered as an aborted command
1270  *
1271  * The CMD_SP will not be cleared except while holding io_req_lock.
1272  */
1273  io_lock = fnic_io_lock_hash(fnic, sc);
1274  spin_lock_irqsave(io_lock, flags);
1275  io_req = (struct fnic_io_req *)CMD_SP(sc);
1276  if (!io_req) {
1277  spin_unlock_irqrestore(io_lock, flags);
1278  goto fnic_abort_cmd_end;
1279  }
1280 
1281  io_req->abts_done = &tm_done;
1282 
1283  if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1284  spin_unlock_irqrestore(io_lock, flags);
1285  goto wait_pending;
1286  }
1287  /*
1288  * Command is still pending, need to abort it
1289  * If the firmware completes the command after this point,
1290  * the completion wont be done till mid-layer, since abort
1291  * has already started.
1292  */
1295 
1296  spin_unlock_irqrestore(io_lock, flags);
1297 
1298  /*
1299  * Check readiness of the remote port. If the path to remote
1300  * port is up, then send abts to the remote port to terminate
1301  * the IO. Else, just locally terminate the IO in the firmware
1302  */
1303  if (fc_remote_port_chkready(rport) == 0)
1304  task_req = FCPIO_ITMF_ABT_TASK;
1305  else
1306  task_req = FCPIO_ITMF_ABT_TASK_TERM;
1307 
1308  /* Now queue the abort command to firmware */
1309  int_to_scsilun(sc->device->lun, &fc_lun);
1310 
1311  if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
1312  fc_lun.scsi_lun, io_req)) {
1313  spin_lock_irqsave(io_lock, flags);
1314  io_req = (struct fnic_io_req *)CMD_SP(sc);
1315  if (io_req)
1316  io_req->abts_done = NULL;
1317  spin_unlock_irqrestore(io_lock, flags);
1318  ret = FAILED;
1319  goto fnic_abort_cmd_end;
1320  }
1321 
1322  /*
1323  * We queued an abort IO, wait for its completion.
1324  * Once the firmware completes the abort command, it will
1325  * wake up this thread.
1326  */
1327  wait_pending:
1328  wait_for_completion_timeout(&tm_done,
1330  (2 * fnic->config.ra_tov +
1331  fnic->config.ed_tov));
1332 
1333  /* Check the abort status */
1334  spin_lock_irqsave(io_lock, flags);
1335 
1336  io_req = (struct fnic_io_req *)CMD_SP(sc);
1337  if (!io_req) {
1338  spin_unlock_irqrestore(io_lock, flags);
1339  ret = FAILED;
1340  goto fnic_abort_cmd_end;
1341  }
1342  io_req->abts_done = NULL;
1343 
1344  /* fw did not complete abort, timed out */
1345  if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1346  spin_unlock_irqrestore(io_lock, flags);
1347  ret = FAILED;
1348  goto fnic_abort_cmd_end;
1349  }
1350 
1351  /*
1352  * firmware completed the abort, check the status,
1353  * free the io_req irrespective of failure or success
1354  */
1355  if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS)
1356  ret = FAILED;
1357 
1358  CMD_SP(sc) = NULL;
1359 
1360  spin_unlock_irqrestore(io_lock, flags);
1361 
1362  fnic_release_ioreq_buf(fnic, io_req, sc);
1363  mempool_free(io_req, fnic->io_req_pool);
1364 
1365 fnic_abort_cmd_end:
1366  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1367  "Returning from abort cmd %s\n",
1368  (ret == SUCCESS) ?
1369  "SUCCESS" : "FAILED");
1370  return ret;
1371 }
1372 
1373 static inline int fnic_queue_dr_io_req(struct fnic *fnic,
1374  struct scsi_cmnd *sc,
1375  struct fnic_io_req *io_req)
1376 {
1377  struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1378  struct scsi_lun fc_lun;
1379  int ret = 0;
1380  unsigned long intr_flags;
1381 
1382  spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
1383 
1384  if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1385  free_wq_copy_descs(fnic, wq);
1386 
1387  if (!vnic_wq_copy_desc_avail(wq)) {
1388  ret = -EAGAIN;
1389  goto lr_io_req_end;
1390  }
1391 
1392  /* fill in the lun info */
1393  int_to_scsilun(sc->device->lun, &fc_lun);
1394 
1395  fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
1397  fc_lun.scsi_lun, io_req->port_id,
1398  fnic->config.ra_tov, fnic->config.ed_tov);
1399 
1400 lr_io_req_end:
1401  spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
1402 
1403  return ret;
1404 }
1405 
1406 /*
1407  * Clean up any pending aborts on the lun
1408  * For each outstanding IO on this lun, whose abort is not completed by fw,
1409  * issue a local abort. Wait for abort to complete. Return 0 if all commands
1410  * successfully aborted, 1 otherwise
1411  */
1412 static int fnic_clean_pending_aborts(struct fnic *fnic,
1413  struct scsi_cmnd *lr_sc)
1414 {
1415  int tag;
1416  struct fnic_io_req *io_req;
1418  unsigned long flags;
1419  int ret = 0;
1420  struct scsi_cmnd *sc;
1421  struct scsi_lun fc_lun;
1422  struct scsi_device *lun_dev = lr_sc->device;
1423  DECLARE_COMPLETION_ONSTACK(tm_done);
1424 
1425  for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
1426  sc = scsi_host_find_tag(fnic->lport->host, tag);
1427  /*
1428  * ignore this lun reset cmd or cmds that do not belong to
1429  * this lun
1430  */
1431  if (!sc || sc == lr_sc || sc->device != lun_dev)
1432  continue;
1433 
1434  io_lock = fnic_io_lock_hash(fnic, sc);
1435  spin_lock_irqsave(io_lock, flags);
1436 
1437  io_req = (struct fnic_io_req *)CMD_SP(sc);
1438 
1439  if (!io_req || sc->device != lun_dev) {
1440  spin_unlock_irqrestore(io_lock, flags);
1441  continue;
1442  }
1443 
1444  /*
1445  * Found IO that is still pending with firmware and
1446  * belongs to the LUN that we are resetting
1447  */
1448  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1449  "Found IO in %s on lun\n",
1450  fnic_ioreq_state_to_str(CMD_STATE(sc)));
1451 
1453 
1455  io_req->abts_done = &tm_done;
1456  spin_unlock_irqrestore(io_lock, flags);
1457 
1458  /* Now queue the abort command to firmware */
1459  int_to_scsilun(sc->device->lun, &fc_lun);
1460 
1461  if (fnic_queue_abort_io_req(fnic, tag,
1463  fc_lun.scsi_lun, io_req)) {
1464  spin_lock_irqsave(io_lock, flags);
1465  io_req = (struct fnic_io_req *)CMD_SP(sc);
1466  if (io_req)
1467  io_req->abts_done = NULL;
1468  spin_unlock_irqrestore(io_lock, flags);
1469  ret = 1;
1470  goto clean_pending_aborts_end;
1471  }
1472 
1473  wait_for_completion_timeout(&tm_done,
1475  (fnic->config.ed_tov));
1476 
1477  /* Recheck cmd state to check if it is now aborted */
1478  spin_lock_irqsave(io_lock, flags);
1479  io_req = (struct fnic_io_req *)CMD_SP(sc);
1480  if (!io_req) {
1481  spin_unlock_irqrestore(io_lock, flags);
1482  ret = 1;
1483  goto clean_pending_aborts_end;
1484  }
1485 
1486  io_req->abts_done = NULL;
1487 
1488  /* if abort is still pending with fw, fail */
1489  if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1490  spin_unlock_irqrestore(io_lock, flags);
1491  ret = 1;
1492  goto clean_pending_aborts_end;
1493  }
1494  CMD_SP(sc) = NULL;
1495  spin_unlock_irqrestore(io_lock, flags);
1496 
1497  fnic_release_ioreq_buf(fnic, io_req, sc);
1498  mempool_free(io_req, fnic->io_req_pool);
1499  }
1500 
1501 clean_pending_aborts_end:
1502  return ret;
1503 }
1504 
1505 /*
1506  * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
1507  * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
1508  * on the LUN.
1509  */
1511 {
1512  struct fc_lport *lp;
1513  struct fnic *fnic;
1514  struct fnic_io_req *io_req;
1515  struct fc_rport *rport;
1516  int status;
1517  int ret = FAILED;
1519  unsigned long flags;
1520  DECLARE_COMPLETION_ONSTACK(tm_done);
1521 
1522  /* Wait for rport to unblock */
1523  fc_block_scsi_eh(sc);
1524 
1525  /* Get local-port, check ready and link up */
1526  lp = shost_priv(sc->device->host);
1527 
1528  fnic = lport_priv(lp);
1529 
1530  rport = starget_to_rport(scsi_target(sc->device));
1531  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1532  "Device reset called FCID 0x%x, LUN 0x%x\n",
1533  rport->port_id, sc->device->lun);
1534 
1535  if (lp->state != LPORT_ST_READY || !(lp->link_up))
1536  goto fnic_device_reset_end;
1537 
1538  /* Check if remote port up */
1539  if (fc_remote_port_chkready(rport))
1540  goto fnic_device_reset_end;
1541 
1542  io_lock = fnic_io_lock_hash(fnic, sc);
1543  spin_lock_irqsave(io_lock, flags);
1544  io_req = (struct fnic_io_req *)CMD_SP(sc);
1545 
1546  /*
1547  * If there is a io_req attached to this command, then use it,
1548  * else allocate a new one.
1549  */
1550  if (!io_req) {
1551  io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
1552  if (!io_req) {
1553  spin_unlock_irqrestore(io_lock, flags);
1554  goto fnic_device_reset_end;
1555  }
1556  memset(io_req, 0, sizeof(*io_req));
1557  io_req->port_id = rport->port_id;
1558  CMD_SP(sc) = (char *)io_req;
1559  }
1560  io_req->dr_done = &tm_done;
1563  spin_unlock_irqrestore(io_lock, flags);
1564 
1565  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n",
1566  sc->request->tag);
1567 
1568  /*
1569  * issue the device reset, if enqueue failed, clean up the ioreq
1570  * and break assoc with scsi cmd
1571  */
1572  if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
1573  spin_lock_irqsave(io_lock, flags);
1574  io_req = (struct fnic_io_req *)CMD_SP(sc);
1575  if (io_req)
1576  io_req->dr_done = NULL;
1577  goto fnic_device_reset_clean;
1578  }
1579 
1580  /*
1581  * Wait on the local completion for LUN reset. The io_req may be
1582  * freed while we wait since we hold no lock.
1583  */
1584  wait_for_completion_timeout(&tm_done,
1586 
1587  spin_lock_irqsave(io_lock, flags);
1588  io_req = (struct fnic_io_req *)CMD_SP(sc);
1589  if (!io_req) {
1590  spin_unlock_irqrestore(io_lock, flags);
1591  goto fnic_device_reset_end;
1592  }
1593  io_req->dr_done = NULL;
1594 
1595  status = CMD_LR_STATUS(sc);
1596  spin_unlock_irqrestore(io_lock, flags);
1597 
1598  /*
1599  * If lun reset not completed, bail out with failed. io_req
1600  * gets cleaned up during higher levels of EH
1601  */
1602  if (status == FCPIO_INVALID_CODE) {
1603  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1604  "Device reset timed out\n");
1605  goto fnic_device_reset_end;
1606  }
1607 
1608  /* Completed, but not successful, clean up the io_req, return fail */
1609  if (status != FCPIO_SUCCESS) {
1610  spin_lock_irqsave(io_lock, flags);
1612  fnic->lport->host,
1613  "Device reset completed - failed\n");
1614  io_req = (struct fnic_io_req *)CMD_SP(sc);
1615  goto fnic_device_reset_clean;
1616  }
1617 
1618  /*
1619  * Clean up any aborts on this lun that have still not
1620  * completed. If any of these fail, then LUN reset fails.
1621  * clean_pending_aborts cleans all cmds on this lun except
1622  * the lun reset cmd. If all cmds get cleaned, the lun reset
1623  * succeeds
1624  */
1625  if (fnic_clean_pending_aborts(fnic, sc)) {
1626  spin_lock_irqsave(io_lock, flags);
1627  io_req = (struct fnic_io_req *)CMD_SP(sc);
1628  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1629  "Device reset failed"
1630  " since could not abort all IOs\n");
1631  goto fnic_device_reset_clean;
1632  }
1633 
1634  /* Clean lun reset command */
1635  spin_lock_irqsave(io_lock, flags);
1636  io_req = (struct fnic_io_req *)CMD_SP(sc);
1637  if (io_req)
1638  /* Completed, and successful */
1639  ret = SUCCESS;
1640 
1641 fnic_device_reset_clean:
1642  if (io_req)
1643  CMD_SP(sc) = NULL;
1644 
1645  spin_unlock_irqrestore(io_lock, flags);
1646 
1647  if (io_req) {
1648  fnic_release_ioreq_buf(fnic, io_req, sc);
1649  mempool_free(io_req, fnic->io_req_pool);
1650  }
1651 
1652 fnic_device_reset_end:
1653  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1654  "Returning from device reset %s\n",
1655  (ret == SUCCESS) ?
1656  "SUCCESS" : "FAILED");
1657  return ret;
1658 }
1659 
1660 /* Clean up all IOs, clean up libFC local port */
1662 {
1663  struct fc_lport *lp;
1664  struct fnic *fnic;
1665  int ret = SUCCESS;
1666 
1667  lp = shost_priv(shost);
1668  fnic = lport_priv(lp);
1669 
1670  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1671  "fnic_reset called\n");
1672 
1673  /*
1674  * Reset local port, this will clean up libFC exchanges,
1675  * reset remote port sessions, and if link is up, begin flogi
1676  */
1677  if (lp->tt.lport_reset(lp))
1678  ret = FAILED;
1679 
1680  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1681  "Returning from fnic reset %s\n",
1682  (ret == SUCCESS) ?
1683  "SUCCESS" : "FAILED");
1684 
1685  return ret;
1686 }
1687 
1688 /*
1689  * SCSI Error handling calls driver's eh_host_reset if all prior
1690  * error handling levels return FAILED. If host reset completes
1691  * successfully, and if link is up, then Fabric login begins.
1692  *
1693  * Host Reset is the highest level of error recovery. If this fails, then
1694  * host is offlined by SCSI.
1695  *
1696  */
1698 {
1699  int ret;
1700  unsigned long wait_host_tmo;
1701  struct Scsi_Host *shost = sc->device->host;
1702  struct fc_lport *lp = shost_priv(shost);
1703 
1704  /*
1705  * If fnic_reset is successful, wait for fabric login to complete
1706  * scsi-ml tries to send a TUR to every device if host reset is
1707  * successful, so before returning to scsi, fabric should be up
1708  */
1709  ret = fnic_reset(shost);
1710  if (ret == SUCCESS) {
1711  wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
1712  ret = FAILED;
1713  while (time_before(jiffies, wait_host_tmo)) {
1714  if ((lp->state == LPORT_ST_READY) &&
1715  (lp->link_up)) {
1716  ret = SUCCESS;
1717  break;
1718  }
1719  ssleep(1);
1720  }
1721  }
1722 
1723  return ret;
1724 }
1725 
1726 /*
1727  * This fxn is called from libFC when host is removed
1728  */
1730 {
1731  int err = 0;
1732  unsigned long flags;
1733  enum fnic_state old_state;
1734  struct fnic *fnic = lport_priv(lp);
1736 
1737  /* Issue firmware reset for fnic, wait for reset to complete */
1738  spin_lock_irqsave(&fnic->fnic_lock, flags);
1739  fnic->remove_wait = &remove_wait;
1740  old_state = fnic->state;
1742  fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
1743  spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1744 
1745  err = fnic_fw_reset_handler(fnic);
1746  if (err) {
1747  spin_lock_irqsave(&fnic->fnic_lock, flags);
1748  if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
1749  fnic->state = old_state;
1750  fnic->remove_wait = NULL;
1751  spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1752  return;
1753  }
1754 
1755  /* Wait for firmware reset to complete */
1758 
1759  spin_lock_irqsave(&fnic->fnic_lock, flags);
1760  fnic->remove_wait = NULL;
1761  FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1762  "fnic_scsi_abort_io %s\n",
1763  (fnic->state == FNIC_IN_ETH_MODE) ?
1764  "SUCCESS" : "FAILED");
1765  spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1766 
1767 }
1768 
1769 /*
1770  * This fxn called from libFC to clean up driver IO state on link down
1771  */
1772 void fnic_scsi_cleanup(struct fc_lport *lp)
1773 {
1774  unsigned long flags;
1775  enum fnic_state old_state;
1776  struct fnic *fnic = lport_priv(lp);
1777 
1778  /* issue fw reset */
1779  spin_lock_irqsave(&fnic->fnic_lock, flags);
1780  old_state = fnic->state;
1782  fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
1783  spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1784 
1785  if (fnic_fw_reset_handler(fnic)) {
1786  spin_lock_irqsave(&fnic->fnic_lock, flags);
1787  if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
1788  fnic->state = old_state;
1789  spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1790  }
1791 
1792 }
1793 
1795 {
1796 }
1797 
1798 void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
1799 {
1800  struct fnic *fnic = lport_priv(lp);
1801 
1802  /* Non-zero sid, nothing to do */
1803  if (sid)
1804  goto call_fc_exch_mgr_reset;
1805 
1806  if (did) {
1807  fnic_rport_exch_reset(fnic, did);
1808  goto call_fc_exch_mgr_reset;
1809  }
1810 
1811  /*
1812  * sid = 0, did = 0
1813  * link down or device being removed
1814  */
1815  if (!fnic->in_remove)
1816  fnic_scsi_cleanup(lp);
1817  else
1818  fnic_scsi_abort_io(lp);
1819 
1820  /* call libFC exch mgr reset to reset its exchanges */
1821 call_fc_exch_mgr_reset:
1822  fc_exch_mgr_reset(lp, sid, did);
1823 
1824 }