Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bnx2fc_io.c
Go to the documentation of this file.
1 /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
2  * IO manager and SCSI IO processing.
3  *
4  * Copyright (c) 2008 - 2011 Broadcom Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Bhanu Prakash Gollapudi ([email protected])
11  */
12 
13 #include "bnx2fc.h"
14 
15 #define RESERVE_FREE_LIST_INDEX num_possible_cpus()
16 
17 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
18  int bd_index);
19 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
20 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
21 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
22 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
23 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
25  u8 num_rq);
26 
28  unsigned int timer_msec)
29 {
30  struct bnx2fc_interface *interface = io_req->port->priv;
31 
32  if (queue_delayed_work(interface->timer_work_queue,
33  &io_req->timeout_work,
34  msecs_to_jiffies(timer_msec)))
35  kref_get(&io_req->refcount);
36 }
37 
38 static void bnx2fc_cmd_timeout(struct work_struct *work)
39 {
40  struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd,
41  timeout_work.work);
42  struct fc_lport *lport;
43  struct fc_rport_priv *rdata;
44  u8 cmd_type = io_req->cmd_type;
45  struct bnx2fc_rport *tgt = io_req->tgt;
46  int logo_issued;
47  int rc;
48 
49  BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d,"
50  "req_flags = %lx\n", cmd_type, io_req->req_flags);
51 
52  spin_lock_bh(&tgt->tgt_lock);
55  /*
56  * ideally we should hold the io_req until RRQ complets,
57  * and release io_req from timeout hold.
58  */
59  spin_unlock_bh(&tgt->tgt_lock);
60  bnx2fc_send_rrq(io_req);
61  return;
62  }
64  BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n");
65  goto done;
66  }
67 
68  switch (cmd_type) {
69  case BNX2FC_SCSI_CMD:
71  &io_req->req_flags)) {
72  /* Handle eh_abort timeout */
73  BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
74  complete(&io_req->tm_done);
76  &io_req->req_flags)) {
77  /* Handle internally generated ABTS timeout */
78  BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n",
79  io_req->refcount.refcount.counter);
81  &io_req->req_flags))) {
82 
83  lport = io_req->port->lport;
84  rdata = io_req->tgt->rdata;
85  logo_issued = test_and_set_bit(
87  &tgt->flags);
88  kref_put(&io_req->refcount, bnx2fc_cmd_release);
89  spin_unlock_bh(&tgt->tgt_lock);
90 
91  /* Explicitly logo the target */
92  if (!logo_issued) {
93  BNX2FC_IO_DBG(io_req, "Explicit "
94  "logo - tgt flags = 0x%lx\n",
95  tgt->flags);
96 
97  mutex_lock(&lport->disc.disc_mutex);
98  lport->tt.rport_logoff(rdata);
99  mutex_unlock(&lport->disc.disc_mutex);
100  }
101  return;
102  }
103  } else {
104  /* Hanlde IO timeout */
105  BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n");
107  &io_req->req_flags)) {
108  BNX2FC_IO_DBG(io_req, "IO completed before "
109  " timer expiry\n");
110  goto done;
111  }
112 
114  &io_req->req_flags)) {
115  rc = bnx2fc_initiate_abts(io_req);
116  if (rc == SUCCESS)
117  goto done;
118  /*
119  * Explicitly logo the target if
120  * abts initiation fails
121  */
122  lport = io_req->port->lport;
123  rdata = io_req->tgt->rdata;
124  logo_issued = test_and_set_bit(
126  &tgt->flags);
127  kref_put(&io_req->refcount, bnx2fc_cmd_release);
128  spin_unlock_bh(&tgt->tgt_lock);
129 
130  if (!logo_issued) {
131  BNX2FC_IO_DBG(io_req, "Explicit "
132  "logo - tgt flags = 0x%lx\n",
133  tgt->flags);
134 
135 
136  mutex_lock(&lport->disc.disc_mutex);
137  lport->tt.rport_logoff(rdata);
138  mutex_unlock(&lport->disc.disc_mutex);
139  }
140  return;
141  } else {
142  BNX2FC_IO_DBG(io_req, "IO already in "
143  "ABTS processing\n");
144  }
145  }
146  break;
147  case BNX2FC_ELS:
148 
149  if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
150  BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n");
151 
153  &io_req->req_flags)) {
154  lport = io_req->port->lport;
155  rdata = io_req->tgt->rdata;
156  logo_issued = test_and_set_bit(
158  &tgt->flags);
159  kref_put(&io_req->refcount, bnx2fc_cmd_release);
160  spin_unlock_bh(&tgt->tgt_lock);
161 
162  /* Explicitly logo the target */
163  if (!logo_issued) {
164  BNX2FC_IO_DBG(io_req, "Explicitly logo"
165  "(els)\n");
166  mutex_lock(&lport->disc.disc_mutex);
167  lport->tt.rport_logoff(rdata);
168  mutex_unlock(&lport->disc.disc_mutex);
169  }
170  return;
171  }
172  } else {
173  /*
174  * Handle ELS timeout.
175  * tgt_lock is used to sync compl path and timeout
176  * path. If els compl path is processing this IO, we
177  * have nothing to do here, just release the timer hold
178  */
179  BNX2FC_IO_DBG(io_req, "ELS timed out\n");
181  &io_req->req_flags))
182  goto done;
183 
184  /* Indicate the cb_func that this ELS is timed out */
186 
187  if ((io_req->cb_func) && (io_req->cb_arg)) {
188  io_req->cb_func(io_req->cb_arg);
189  io_req->cb_arg = NULL;
190  }
191  }
192  break;
193  default:
194  printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n",
195  cmd_type);
196  break;
197  }
198 
199 done:
200  /* release the cmd that was held when timer was set */
201  kref_put(&io_req->refcount, bnx2fc_cmd_release);
202  spin_unlock_bh(&tgt->tgt_lock);
203 }
204 
205 static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
206 {
207  /* Called with host lock held */
208  struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
209 
210  /*
211  * active_cmd_queue may have other command types as well,
212  * and during flush operation, we want to error back only
213  * scsi commands.
214  */
215  if (io_req->cmd_type != BNX2FC_SCSI_CMD)
216  return;
217 
218  BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
219  if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) {
220  /* Do not call scsi done for this IO */
221  return;
222  }
223 
224  bnx2fc_unmap_sg_list(io_req);
225  io_req->sc_cmd = NULL;
226  if (!sc_cmd) {
227  printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. "
228  "IO(0x%x) already cleaned up\n",
229  io_req->xid);
230  return;
231  }
232  sc_cmd->result = err_code << 16;
233 
234  BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
235  sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
236  sc_cmd->allowed);
237  scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
238  sc_cmd->SCp.ptr = NULL;
239  sc_cmd->scsi_done(sc_cmd);
240 }
241 
243  u16 min_xid, u16 max_xid)
244 {
245  struct bnx2fc_cmd_mgr *cmgr;
246  struct io_bdt *bdt_info;
247  struct bnx2fc_cmd *io_req;
248  size_t len;
249  u32 mem_size;
250  u16 xid;
251  int i;
252  int num_ios, num_pri_ios;
253  size_t bd_tbl_sz;
254  int arr_sz = num_possible_cpus() + 1;
255 
256  if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
257  printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
258  and max_xid 0x%x\n", min_xid, max_xid);
259  return NULL;
260  }
261  BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid);
262 
263  num_ios = max_xid - min_xid + 1;
264  len = (num_ios * (sizeof(struct bnx2fc_cmd *)));
265  len += sizeof(struct bnx2fc_cmd_mgr);
266 
267  cmgr = kzalloc(len, GFP_KERNEL);
268  if (!cmgr) {
269  printk(KERN_ERR PFX "failed to alloc cmgr\n");
270  return NULL;
271  }
272 
273  cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) *
274  arr_sz, GFP_KERNEL);
275  if (!cmgr->free_list) {
276  printk(KERN_ERR PFX "failed to alloc free_list\n");
277  goto mem_err;
278  }
279 
280  cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) *
281  arr_sz, GFP_KERNEL);
282  if (!cmgr->free_list_lock) {
283  printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
284  goto mem_err;
285  }
286 
287  cmgr->hba = hba;
288  cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
289 
290  for (i = 0; i < arr_sz; i++) {
291  INIT_LIST_HEAD(&cmgr->free_list[i]);
292  spin_lock_init(&cmgr->free_list_lock[i]);
293  }
294 
295  /*
296  * Pre-allocated pool of bnx2fc_cmds.
297  * Last entry in the free list array is the free list
298  * of slow path requests.
299  */
300  xid = BNX2FC_MIN_XID;
301  num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS;
302  for (i = 0; i < num_ios; i++) {
303  io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
304 
305  if (!io_req) {
306  printk(KERN_ERR PFX "failed to alloc io_req\n");
307  goto mem_err;
308  }
309 
310  INIT_LIST_HEAD(&io_req->link);
311  INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout);
312 
313  io_req->xid = xid++;
314  if (i < num_pri_ios)
315  list_add_tail(&io_req->link,
316  &cmgr->free_list[io_req->xid %
317  num_possible_cpus()]);
318  else
319  list_add_tail(&io_req->link,
320  &cmgr->free_list[num_possible_cpus()]);
321  io_req++;
322  }
323 
324  /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
325  mem_size = num_ios * sizeof(struct io_bdt *);
326  cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
327  if (!cmgr->io_bdt_pool) {
328  printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
329  goto mem_err;
330  }
331 
332  mem_size = sizeof(struct io_bdt);
333  for (i = 0; i < num_ios; i++) {
334  cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL);
335  if (!cmgr->io_bdt_pool[i]) {
336  printk(KERN_ERR PFX "failed to alloc "
337  "io_bdt_pool[%d]\n", i);
338  goto mem_err;
339  }
340  }
341 
342  /* Allocate an map fcoe_bdt_ctx structures */
343  bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
344  for (i = 0; i < num_ios; i++) {
345  bdt_info = cmgr->io_bdt_pool[i];
346  bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
347  bd_tbl_sz,
348  &bdt_info->bd_tbl_dma,
349  GFP_KERNEL);
350  if (!bdt_info->bd_tbl) {
351  printk(KERN_ERR PFX "failed to alloc "
352  "bdt_tbl[%d]\n", i);
353  goto mem_err;
354  }
355  }
356 
357  return cmgr;
358 
359 mem_err:
360  bnx2fc_cmd_mgr_free(cmgr);
361  return NULL;
362 }
363 
365 {
366  struct io_bdt *bdt_info;
367  struct bnx2fc_hba *hba = cmgr->hba;
368  size_t bd_tbl_sz;
369  u16 min_xid = BNX2FC_MIN_XID;
370  u16 max_xid = BNX2FC_MAX_XID;
371  int num_ios;
372  int i;
373 
374  num_ios = max_xid - min_xid + 1;
375 
376  /* Free fcoe_bdt_ctx structures */
377  if (!cmgr->io_bdt_pool)
378  goto free_cmd_pool;
379 
380  bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
381  for (i = 0; i < num_ios; i++) {
382  bdt_info = cmgr->io_bdt_pool[i];
383  if (bdt_info->bd_tbl) {
384  dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz,
385  bdt_info->bd_tbl,
386  bdt_info->bd_tbl_dma);
387  bdt_info->bd_tbl = NULL;
388  }
389  }
390 
391  /* Destroy io_bdt pool */
392  for (i = 0; i < num_ios; i++) {
393  kfree(cmgr->io_bdt_pool[i]);
394  cmgr->io_bdt_pool[i] = NULL;
395  }
396 
397  kfree(cmgr->io_bdt_pool);
398  cmgr->io_bdt_pool = NULL;
399 
400 free_cmd_pool:
401  kfree(cmgr->free_list_lock);
402 
403  /* Destroy cmd pool */
404  if (!cmgr->free_list)
405  goto free_cmgr;
406 
407  for (i = 0; i < num_possible_cpus() + 1; i++) {
408  struct bnx2fc_cmd *tmp, *io_req;
409 
410  list_for_each_entry_safe(io_req, tmp,
411  &cmgr->free_list[i], link) {
412  list_del(&io_req->link);
413  kfree(io_req);
414  }
415  }
416  kfree(cmgr->free_list);
417 free_cmgr:
418  /* Free command manager itself */
419  kfree(cmgr);
420 }
421 
423 {
424  struct fcoe_port *port = tgt->port;
425  struct bnx2fc_interface *interface = port->priv;
426  struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
427  struct bnx2fc_cmd *io_req;
428  struct list_head *listp;
429  struct io_bdt *bd_tbl;
431  u32 free_sqes;
432  u32 max_sqes;
433  u16 xid;
434 
435  max_sqes = tgt->max_sqes;
436  switch (type) {
438  max_sqes = BNX2FC_TM_MAX_SQES;
439  break;
440  case BNX2FC_ELS:
441  max_sqes = BNX2FC_ELS_MAX_SQES;
442  break;
443  default:
444  break;
445  }
446 
447  /*
448  * NOTE: Free list insertions and deletions are protected with
449  * cmgr lock
450  */
451  spin_lock_bh(&cmd_mgr->free_list_lock[index]);
452  free_sqes = atomic_read(&tgt->free_sqes);
453  if ((list_empty(&(cmd_mgr->free_list[index]))) ||
454  (tgt->num_active_ios.counter >= max_sqes) ||
455  (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
456  BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
457  "ios(%d):sqes(%d)\n",
458  tgt->num_active_ios.counter, tgt->max_sqes);
459  if (list_empty(&(cmd_mgr->free_list[index])))
460  printk(KERN_ERR PFX "elstm_alloc: list_empty\n");
461  spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
462  return NULL;
463  }
464 
465  listp = (struct list_head *)
466  cmd_mgr->free_list[index].next;
467  list_del_init(listp);
468  io_req = (struct bnx2fc_cmd *) listp;
469  xid = io_req->xid;
470  cmd_mgr->cmds[xid] = io_req;
471  atomic_inc(&tgt->num_active_ios);
472  atomic_dec(&tgt->free_sqes);
473  spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
474 
475  INIT_LIST_HEAD(&io_req->link);
476 
477  io_req->port = port;
478  io_req->cmd_mgr = cmd_mgr;
479  io_req->req_flags = 0;
480  io_req->cmd_type = type;
481 
482  /* Bind io_bdt for this io_req */
483  /* Have a static link between io_req and io_bdt_pool */
484  bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
485  bd_tbl->io_req = io_req;
486 
487  /* Hold the io_req against deletion */
488  kref_init(&io_req->refcount);
489  return io_req;
490 }
491 
493 {
494  struct fcoe_port *port = tgt->port;
495  struct bnx2fc_interface *interface = port->priv;
496  struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
497  struct bnx2fc_cmd *io_req;
498  struct list_head *listp;
499  struct io_bdt *bd_tbl;
500  u32 free_sqes;
501  u32 max_sqes;
502  u16 xid;
503  int index = get_cpu();
504 
505  max_sqes = BNX2FC_SCSI_MAX_SQES;
506  /*
507  * NOTE: Free list insertions and deletions are protected with
508  * cmgr lock
509  */
510  spin_lock_bh(&cmd_mgr->free_list_lock[index]);
511  free_sqes = atomic_read(&tgt->free_sqes);
512  if ((list_empty(&cmd_mgr->free_list[index])) ||
513  (tgt->num_active_ios.counter >= max_sqes) ||
514  (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
515  spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
516  put_cpu();
517  return NULL;
518  }
519 
520  listp = (struct list_head *)
521  cmd_mgr->free_list[index].next;
522  list_del_init(listp);
523  io_req = (struct bnx2fc_cmd *) listp;
524  xid = io_req->xid;
525  cmd_mgr->cmds[xid] = io_req;
526  atomic_inc(&tgt->num_active_ios);
527  atomic_dec(&tgt->free_sqes);
528  spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
529  put_cpu();
530 
531  INIT_LIST_HEAD(&io_req->link);
532 
533  io_req->port = port;
534  io_req->cmd_mgr = cmd_mgr;
535  io_req->req_flags = 0;
536 
537  /* Bind io_bdt for this io_req */
538  /* Have a static link between io_req and io_bdt_pool */
539  bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
540  bd_tbl->io_req = io_req;
541 
542  /* Hold the io_req against deletion */
543  kref_init(&io_req->refcount);
544  return io_req;
545 }
546 
547 void bnx2fc_cmd_release(struct kref *ref)
548 {
549  struct bnx2fc_cmd *io_req = container_of(ref,
550  struct bnx2fc_cmd, refcount);
551  struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
552  int index;
553 
554  if (io_req->cmd_type == BNX2FC_SCSI_CMD)
555  index = io_req->xid % num_possible_cpus();
556  else
557  index = RESERVE_FREE_LIST_INDEX;
558 
559 
560  spin_lock_bh(&cmd_mgr->free_list_lock[index]);
561  if (io_req->cmd_type != BNX2FC_SCSI_CMD)
562  bnx2fc_free_mp_resc(io_req);
563  cmd_mgr->cmds[io_req->xid] = NULL;
564  /* Delete IO from retire queue */
565  list_del_init(&io_req->link);
566  /* Add it to the free list */
567  list_add(&io_req->link,
568  &cmd_mgr->free_list[index]);
569  atomic_dec(&io_req->tgt->num_active_ios);
570  spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
571 
572 }
573 
574 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
575 {
576  struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
577  struct bnx2fc_interface *interface = io_req->port->priv;
578  struct bnx2fc_hba *hba = interface->hba;
579  size_t sz = sizeof(struct fcoe_bd_ctx);
580 
581  /* clear tm flags */
582  mp_req->tm_flags = 0;
583  if (mp_req->mp_req_bd) {
584  dma_free_coherent(&hba->pcidev->dev, sz,
585  mp_req->mp_req_bd,
586  mp_req->mp_req_bd_dma);
587  mp_req->mp_req_bd = NULL;
588  }
589  if (mp_req->mp_resp_bd) {
590  dma_free_coherent(&hba->pcidev->dev, sz,
591  mp_req->mp_resp_bd,
592  mp_req->mp_resp_bd_dma);
593  mp_req->mp_resp_bd = NULL;
594  }
595  if (mp_req->req_buf) {
596  dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
597  mp_req->req_buf,
598  mp_req->req_buf_dma);
599  mp_req->req_buf = NULL;
600  }
601  if (mp_req->resp_buf) {
602  dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
603  mp_req->resp_buf,
604  mp_req->resp_buf_dma);
605  mp_req->resp_buf = NULL;
606  }
607 }
608 
609 int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
610 {
611  struct bnx2fc_mp_req *mp_req;
612  struct fcoe_bd_ctx *mp_req_bd;
613  struct fcoe_bd_ctx *mp_resp_bd;
614  struct bnx2fc_interface *interface = io_req->port->priv;
615  struct bnx2fc_hba *hba = interface->hba;
617  size_t sz;
618 
619  mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
620  memset(mp_req, 0, sizeof(struct bnx2fc_mp_req));
621 
622  mp_req->req_len = sizeof(struct fcp_cmnd);
623  io_req->data_xfer_len = mp_req->req_len;
624  mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
625  &mp_req->req_buf_dma,
626  GFP_ATOMIC);
627  if (!mp_req->req_buf) {
628  printk(KERN_ERR PFX "unable to alloc MP req buffer\n");
629  bnx2fc_free_mp_resc(io_req);
630  return FAILED;
631  }
632 
633  mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
634  &mp_req->resp_buf_dma,
635  GFP_ATOMIC);
636  if (!mp_req->resp_buf) {
637  printk(KERN_ERR PFX "unable to alloc TM resp buffer\n");
638  bnx2fc_free_mp_resc(io_req);
639  return FAILED;
640  }
641  memset(mp_req->req_buf, 0, PAGE_SIZE);
642  memset(mp_req->resp_buf, 0, PAGE_SIZE);
643 
644  /* Allocate and map mp_req_bd and mp_resp_bd */
645  sz = sizeof(struct fcoe_bd_ctx);
646  mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
647  &mp_req->mp_req_bd_dma,
648  GFP_ATOMIC);
649  if (!mp_req->mp_req_bd) {
650  printk(KERN_ERR PFX "unable to alloc MP req bd\n");
651  bnx2fc_free_mp_resc(io_req);
652  return FAILED;
653  }
654  mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
655  &mp_req->mp_resp_bd_dma,
656  GFP_ATOMIC);
657  if (!mp_req->mp_req_bd) {
658  printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
659  bnx2fc_free_mp_resc(io_req);
660  return FAILED;
661  }
662  /* Fill bd table */
663  addr = mp_req->req_buf_dma;
664  mp_req_bd = mp_req->mp_req_bd;
665  mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
666  mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
667  mp_req_bd->buf_len = PAGE_SIZE;
668  mp_req_bd->flags = 0;
669 
670  /*
671  * MP buffer is either a task mgmt command or an ELS.
672  * So the assumption is that it consumes a single bd
673  * entry in the bd table
674  */
675  mp_resp_bd = mp_req->mp_resp_bd;
676  addr = mp_req->resp_buf_dma;
677  mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
678  mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
679  mp_resp_bd->buf_len = PAGE_SIZE;
680  mp_resp_bd->flags = 0;
681 
682  return SUCCESS;
683 }
684 
685 static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
686 {
687  struct fc_lport *lport;
688  struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
689  struct fc_rport_libfc_priv *rp = rport->dd_data;
690  struct fcoe_port *port;
691  struct bnx2fc_interface *interface;
692  struct bnx2fc_rport *tgt;
693  struct bnx2fc_cmd *io_req;
694  struct bnx2fc_mp_req *tm_req;
695  struct fcoe_task_ctx_entry *task;
696  struct fcoe_task_ctx_entry *task_page;
697  struct Scsi_Host *host = sc_cmd->device->host;
698  struct fc_frame_header *fc_hdr;
699  struct fcp_cmnd *fcp_cmnd;
700  int task_idx, index;
701  int rc = SUCCESS;
702  u16 xid;
703  u32 sid, did;
704  unsigned long start = jiffies;
705 
706  lport = shost_priv(host);
707  port = lport_priv(lport);
708  interface = port->priv;
709 
710  if (rport == NULL) {
711  printk(KERN_ERR PFX "device_reset: rport is NULL\n");
712  rc = FAILED;
713  goto tmf_err;
714  }
715 
716  rc = fc_block_scsi_eh(sc_cmd);
717  if (rc)
718  return rc;
719 
720  if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
721  printk(KERN_ERR PFX "device_reset: link is not ready\n");
722  rc = FAILED;
723  goto tmf_err;
724  }
725  /* rport and tgt are allocated together, so tgt should be non-NULL */
726  tgt = (struct bnx2fc_rport *)&rp[1];
727 
728  if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
729  printk(KERN_ERR PFX "device_reset: tgt not offloaded\n");
730  rc = FAILED;
731  goto tmf_err;
732  }
733 retry_tmf:
735  if (!io_req) {
736  if (time_after(jiffies, start + HZ)) {
737  printk(KERN_ERR PFX "tmf: Failed TMF");
738  rc = FAILED;
739  goto tmf_err;
740  }
741  msleep(20);
742  goto retry_tmf;
743  }
744  /* Initialize rest of io_req fields */
745  io_req->sc_cmd = sc_cmd;
746  io_req->port = port;
747  io_req->tgt = tgt;
748 
749  tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
750 
751  rc = bnx2fc_init_mp_req(io_req);
752  if (rc == FAILED) {
753  printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
754  spin_lock_bh(&tgt->tgt_lock);
755  kref_put(&io_req->refcount, bnx2fc_cmd_release);
756  spin_unlock_bh(&tgt->tgt_lock);
757  goto tmf_err;
758  }
759 
760  /* Set TM flags */
761  io_req->io_req_flags = 0;
762  tm_req->tm_flags = tm_flags;
763 
764  /* Fill FCP_CMND */
765  bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
766  fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
767  memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len);
768  fcp_cmnd->fc_dl = 0;
769 
770  /* Fill FC header */
771  fc_hdr = &(tm_req->req_fc_hdr);
772  sid = tgt->sid;
773  did = rport->port_id;
774  __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid,
776  FC_FC_SEQ_INIT, 0);
777  /* Obtain exchange id */
778  xid = io_req->xid;
779 
780  BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid);
781  task_idx = xid/BNX2FC_TASKS_PER_PAGE;
782  index = xid % BNX2FC_TASKS_PER_PAGE;
783 
784  /* Initialize task context for this IO request */
785  task_page = (struct fcoe_task_ctx_entry *)
786  interface->hba->task_ctx[task_idx];
787  task = &(task_page[index]);
788  bnx2fc_init_mp_task(io_req, task);
789 
790  sc_cmd->SCp.ptr = (char *)io_req;
791 
792  /* Obtain free SQ entry */
793  spin_lock_bh(&tgt->tgt_lock);
794  bnx2fc_add_2_sq(tgt, xid);
795 
796  /* Enqueue the io_req to active_tm_queue */
797  io_req->on_tmf_queue = 1;
798  list_add_tail(&io_req->link, &tgt->active_tm_queue);
799 
800  init_completion(&io_req->tm_done);
801  io_req->wait_for_comp = 1;
802 
803  /* Ring doorbell */
805  spin_unlock_bh(&tgt->tgt_lock);
806 
807  rc = wait_for_completion_timeout(&io_req->tm_done,
809  spin_lock_bh(&tgt->tgt_lock);
810 
811  io_req->wait_for_comp = 0;
812  if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
814  if (io_req->on_tmf_queue) {
815  list_del_init(&io_req->link);
816  io_req->on_tmf_queue = 0;
817  }
818  io_req->wait_for_comp = 1;
819  bnx2fc_initiate_cleanup(io_req);
820  spin_unlock_bh(&tgt->tgt_lock);
821  rc = wait_for_completion_timeout(&io_req->tm_done,
823  spin_lock_bh(&tgt->tgt_lock);
824  io_req->wait_for_comp = 0;
825  if (!rc)
826  kref_put(&io_req->refcount, bnx2fc_cmd_release);
827  }
828 
829  spin_unlock_bh(&tgt->tgt_lock);
830 
831  if (!rc) {
832  BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n");
833  rc = FAILED;
834  } else {
835  BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n");
836  rc = SUCCESS;
837  }
838 tmf_err:
839  return rc;
840 }
841 
842 int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
843 {
844  struct fc_lport *lport;
845  struct bnx2fc_rport *tgt = io_req->tgt;
846  struct fc_rport *rport = tgt->rport;
847  struct fc_rport_priv *rdata = tgt->rdata;
848  struct bnx2fc_interface *interface;
849  struct fcoe_port *port;
850  struct bnx2fc_cmd *abts_io_req;
851  struct fcoe_task_ctx_entry *task;
852  struct fcoe_task_ctx_entry *task_page;
853  struct fc_frame_header *fc_hdr;
854  struct bnx2fc_mp_req *abts_req;
855  int task_idx, index;
856  u32 sid, did;
857  u16 xid;
858  int rc = SUCCESS;
859  u32 r_a_tov = rdata->r_a_tov;
860 
861  /* called with tgt_lock held */
862  BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
863 
864  port = io_req->port;
865  interface = port->priv;
866  lport = port->lport;
867 
869  printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n");
870  rc = FAILED;
871  goto abts_err;
872  }
873 
874  if (rport == NULL) {
875  printk(KERN_ERR PFX "initiate_abts: rport is NULL\n");
876  rc = FAILED;
877  goto abts_err;
878  }
879 
880  if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
881  printk(KERN_ERR PFX "initiate_abts: link is not ready\n");
882  rc = FAILED;
883  goto abts_err;
884  }
885 
886  abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
887  if (!abts_io_req) {
888  printk(KERN_ERR PFX "abts: couldnt allocate cmd\n");
889  rc = FAILED;
890  goto abts_err;
891  }
892 
893  /* Initialize rest of io_req fields */
894  abts_io_req->sc_cmd = NULL;
895  abts_io_req->port = port;
896  abts_io_req->tgt = tgt;
897  abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */
898 
899  abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req);
900  memset(abts_req, 0, sizeof(struct bnx2fc_mp_req));
901 
902  /* Fill FC header */
903  fc_hdr = &(abts_req->req_fc_hdr);
904 
905  /* Obtain oxid and rxid for the original exchange to be aborted */
906  fc_hdr->fh_ox_id = htons(io_req->xid);
907  fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id);
908 
909  sid = tgt->sid;
910  did = rport->port_id;
911 
912  __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid,
914  FC_FC_SEQ_INIT, 0);
915 
916  xid = abts_io_req->xid;
917  BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n");
918  task_idx = xid/BNX2FC_TASKS_PER_PAGE;
919  index = xid % BNX2FC_TASKS_PER_PAGE;
920 
921  /* Initialize task context for this IO request */
922  task_page = (struct fcoe_task_ctx_entry *)
923  interface->hba->task_ctx[task_idx];
924  task = &(task_page[index]);
925  bnx2fc_init_mp_task(abts_io_req, task);
926 
927  /*
928  * ABTS task is a temporary task that will be cleaned up
929  * irrespective of ABTS response. We need to start the timer
930  * for the original exchange, as the CQE is posted for the original
931  * IO request.
932  *
933  * Timer for ABTS is started only when it is originated by a
934  * TM request. For the ABTS issued as part of ULP timeout,
935  * scsi-ml maintains the timers.
936  */
937 
938  /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/
939  bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov);
940 
941  /* Obtain free SQ entry */
942  bnx2fc_add_2_sq(tgt, xid);
943 
944  /* Ring doorbell */
946 
947 abts_err:
948  return rc;
949 }
950 
952  enum fc_rctl r_ctl)
953 {
954  struct fc_lport *lport;
955  struct bnx2fc_rport *tgt = orig_io_req->tgt;
956  struct bnx2fc_interface *interface;
957  struct fcoe_port *port;
958  struct bnx2fc_cmd *seq_clnp_req;
959  struct fcoe_task_ctx_entry *task;
960  struct fcoe_task_ctx_entry *task_page;
961  struct bnx2fc_els_cb_arg *cb_arg = NULL;
962  int task_idx, index;
963  u16 xid;
964  int rc = 0;
965 
966  BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
967  orig_io_req->xid);
968  kref_get(&orig_io_req->refcount);
969 
970  port = orig_io_req->port;
971  interface = port->priv;
972  lport = port->lport;
973 
974  cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
975  if (!cb_arg) {
976  printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n");
977  rc = -ENOMEM;
978  goto cleanup_err;
979  }
980 
981  seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
982  if (!seq_clnp_req) {
983  printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
984  rc = -ENOMEM;
985  kfree(cb_arg);
986  goto cleanup_err;
987  }
988  /* Initialize rest of io_req fields */
989  seq_clnp_req->sc_cmd = NULL;
990  seq_clnp_req->port = port;
991  seq_clnp_req->tgt = tgt;
992  seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */
993 
994  xid = seq_clnp_req->xid;
995 
996  task_idx = xid/BNX2FC_TASKS_PER_PAGE;
997  index = xid % BNX2FC_TASKS_PER_PAGE;
998 
999  /* Initialize task context for this IO request */
1000  task_page = (struct fcoe_task_ctx_entry *)
1001  interface->hba->task_ctx[task_idx];
1002  task = &(task_page[index]);
1003  cb_arg->aborted_io_req = orig_io_req;
1004  cb_arg->io_req = seq_clnp_req;
1005  cb_arg->r_ctl = r_ctl;
1006  cb_arg->offset = offset;
1007  seq_clnp_req->cb_arg = cb_arg;
1008 
1009  printk(KERN_ERR PFX "call init_seq_cleanup_task\n");
1010  bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset);
1011 
1012  /* Obtain free SQ entry */
1013  bnx2fc_add_2_sq(tgt, xid);
1014 
1015  /* Ring doorbell */
1016  bnx2fc_ring_doorbell(tgt);
1017 cleanup_err:
1018  return rc;
1019 }
1020 
1022 {
1023  struct fc_lport *lport;
1024  struct bnx2fc_rport *tgt = io_req->tgt;
1025  struct bnx2fc_interface *interface;
1026  struct fcoe_port *port;
1027  struct bnx2fc_cmd *cleanup_io_req;
1028  struct fcoe_task_ctx_entry *task;
1029  struct fcoe_task_ctx_entry *task_page;
1030  int task_idx, index;
1031  u16 xid, orig_xid;
1032  int rc = 0;
1033 
1034  /* ASSUMPTION: called with tgt_lock held */
1035  BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
1036 
1037  port = io_req->port;
1038  interface = port->priv;
1039  lport = port->lport;
1040 
1041  cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
1042  if (!cleanup_io_req) {
1043  printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
1044  rc = -1;
1045  goto cleanup_err;
1046  }
1047 
1048  /* Initialize rest of io_req fields */
1049  cleanup_io_req->sc_cmd = NULL;
1050  cleanup_io_req->port = port;
1051  cleanup_io_req->tgt = tgt;
1052  cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */
1053 
1054  xid = cleanup_io_req->xid;
1055 
1056  task_idx = xid/BNX2FC_TASKS_PER_PAGE;
1057  index = xid % BNX2FC_TASKS_PER_PAGE;
1058 
1059  /* Initialize task context for this IO request */
1060  task_page = (struct fcoe_task_ctx_entry *)
1061  interface->hba->task_ctx[task_idx];
1062  task = &(task_page[index]);
1063  orig_xid = io_req->xid;
1064 
1065  BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid);
1066 
1067  bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid);
1068 
1069  /* Obtain free SQ entry */
1070  bnx2fc_add_2_sq(tgt, xid);
1071 
1072  /* Ring doorbell */
1073  bnx2fc_ring_doorbell(tgt);
1074 
1075 cleanup_err:
1076  return rc;
1077 }
1078 
1088 {
1089  return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
1090 }
1091 
1101 {
1102  return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
1103 }
1104 
1105 int bnx2fc_expl_logo(struct fc_lport *lport, struct bnx2fc_cmd *io_req)
1106 {
1107  struct bnx2fc_rport *tgt = io_req->tgt;
1108  struct fc_rport_priv *rdata = tgt->rdata;
1109  int logo_issued;
1110  int rc = SUCCESS;
1111  int wait_cnt = 0;
1112 
1113  BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
1114  tgt->flags);
1116  &tgt->flags);
1117  io_req->wait_for_comp = 1;
1118  bnx2fc_initiate_cleanup(io_req);
1119 
1120  spin_unlock_bh(&tgt->tgt_lock);
1121 
1122  wait_for_completion(&io_req->tm_done);
1123 
1124  io_req->wait_for_comp = 0;
1125  /*
1126  * release the reference taken in eh_abort to allow the
1127  * target to re-login after flushing IOs
1128  */
1129  kref_put(&io_req->refcount, bnx2fc_cmd_release);
1130 
1131  if (!logo_issued) {
1133  mutex_lock(&lport->disc.disc_mutex);
1134  lport->tt.rport_logoff(rdata);
1135  mutex_unlock(&lport->disc.disc_mutex);
1136  do {
1138  if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) {
1139  rc = FAILED;
1140  break;
1141  }
1142  } while (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags));
1143  }
1144  spin_lock_bh(&tgt->tgt_lock);
1145  return rc;
1146 }
1155 int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1156 {
1157  struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1158  struct fc_rport_libfc_priv *rp = rport->dd_data;
1159  struct bnx2fc_cmd *io_req;
1160  struct fc_lport *lport;
1161  struct bnx2fc_rport *tgt;
1162  int rc = FAILED;
1163 
1164 
1165  rc = fc_block_scsi_eh(sc_cmd);
1166  if (rc)
1167  return rc;
1168 
1169  lport = shost_priv(sc_cmd->device->host);
1170  if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1171  printk(KERN_ERR PFX "eh_abort: link not ready\n");
1172  return rc;
1173  }
1174 
1175  tgt = (struct bnx2fc_rport *)&rp[1];
1176 
1177  BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
1178 
1179  spin_lock_bh(&tgt->tgt_lock);
1180  io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
1181  if (!io_req) {
1182  /* Command might have just completed */
1183  printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
1184  spin_unlock_bh(&tgt->tgt_lock);
1185  return SUCCESS;
1186  }
1187  BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n",
1188  io_req->refcount.refcount.counter);
1189 
1190  /* Hold IO request across abort processing */
1191  kref_get(&io_req->refcount);
1192 
1193  BUG_ON(tgt != io_req->tgt);
1194 
1195  /* Remove the io_req from the active_q. */
1196  /*
1197  * Task Mgmt functions (LUN RESET & TGT RESET) will not
1198  * issue an ABTS on this particular IO req, as the
1199  * io_req is no longer in the active_q.
1200  */
1201  if (tgt->flush_in_prog) {
1202  printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1203  "flush in progress\n", io_req->xid);
1204  kref_put(&io_req->refcount, bnx2fc_cmd_release);
1205  spin_unlock_bh(&tgt->tgt_lock);
1206  return SUCCESS;
1207  }
1208 
1209  if (io_req->on_active_queue == 0) {
1210  printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1211  "not on active_q\n", io_req->xid);
1212  /*
1213  * This condition can happen only due to the FW bug,
1214  * where we do not receive cleanup response from
1215  * the FW. Handle this case gracefully by erroring
1216  * back the IO request to SCSI-ml
1217  */
1218  bnx2fc_scsi_done(io_req, DID_ABORT);
1219 
1220  kref_put(&io_req->refcount, bnx2fc_cmd_release);
1221  spin_unlock_bh(&tgt->tgt_lock);
1222  return SUCCESS;
1223  }
1224 
1225  /*
1226  * Only eh_abort processing will remove the IO from
1227  * active_cmd_q before processing the request. this is
1228  * done to avoid race conditions between IOs aborted
1229  * as part of task management completion and eh_abort
1230  * processing
1231  */
1232  list_del_init(&io_req->link);
1233  io_req->on_active_queue = 0;
1234  /* Move IO req to retire queue */
1235  list_add_tail(&io_req->link, &tgt->io_retire_queue);
1236 
1237  init_completion(&io_req->tm_done);
1238 
1240  printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1241  "already in abts processing\n", io_req->xid);
1242  if (cancel_delayed_work(&io_req->timeout_work))
1243  kref_put(&io_req->refcount,
1244  bnx2fc_cmd_release); /* drop timer hold */
1245  rc = bnx2fc_expl_logo(lport, io_req);
1246  goto out;
1247  }
1248 
1249  /* Cancel the current timer running on this io_req */
1250  if (cancel_delayed_work(&io_req->timeout_work))
1251  kref_put(&io_req->refcount,
1252  bnx2fc_cmd_release); /* drop timer hold */
1254  io_req->wait_for_comp = 1;
1255  rc = bnx2fc_initiate_abts(io_req);
1256  if (rc == FAILED) {
1257  bnx2fc_initiate_cleanup(io_req);
1258  spin_unlock_bh(&tgt->tgt_lock);
1259  wait_for_completion(&io_req->tm_done);
1260  spin_lock_bh(&tgt->tgt_lock);
1261  io_req->wait_for_comp = 0;
1262  goto done;
1263  }
1264  spin_unlock_bh(&tgt->tgt_lock);
1265 
1266  wait_for_completion(&io_req->tm_done);
1267 
1268  spin_lock_bh(&tgt->tgt_lock);
1269  io_req->wait_for_comp = 0;
1271  &io_req->req_flags))) {
1272  /* Let the scsi-ml try to recover this command */
1273  printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
1274  io_req->xid);
1275  rc = bnx2fc_expl_logo(lport, io_req);
1276  goto out;
1277  } else {
1278  /*
1279  * We come here even when there was a race condition
1280  * between timeout and abts completion, and abts
1281  * completion happens just in time.
1282  */
1283  BNX2FC_IO_DBG(io_req, "abort succeeded\n");
1284  rc = SUCCESS;
1285  bnx2fc_scsi_done(io_req, DID_ABORT);
1286  kref_put(&io_req->refcount, bnx2fc_cmd_release);
1287  }
1288 done:
1289  /* release the reference taken in eh_abort */
1290  kref_put(&io_req->refcount, bnx2fc_cmd_release);
1291 out:
1292  spin_unlock_bh(&tgt->tgt_lock);
1293  return rc;
1294 }
1295 
1297  struct fcoe_task_ctx_entry *task,
1298  u8 rx_state)
1299 {
1300  struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg;
1301  struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req;
1302  u32 offset = cb_arg->offset;
1303  enum fc_rctl r_ctl = cb_arg->r_ctl;
1304  int rc = 0;
1305  struct bnx2fc_rport *tgt = orig_io_req->tgt;
1306 
1307  BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x"
1308  "cmd_type = %d\n",
1309  seq_clnp_req->xid, seq_clnp_req->cmd_type);
1310 
1312  printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n",
1313  seq_clnp_req->xid);
1314  goto free_cb_arg;
1315  }
1316 
1317  spin_unlock_bh(&tgt->tgt_lock);
1318  rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
1319  spin_lock_bh(&tgt->tgt_lock);
1320 
1321  if (rc)
1322  printk(KERN_ERR PFX "clnup_compl: Unable to send SRR"
1323  " IO will abort\n");
1324  seq_clnp_req->cb_arg = NULL;
1325  kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
1326 free_cb_arg:
1327  kfree(cb_arg);
1328  return;
1329 }
1330 
1332  struct fcoe_task_ctx_entry *task,
1333  u8 num_rq)
1334 {
1335  BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
1336  "refcnt = %d, cmd_type = %d\n",
1337  io_req->refcount.refcount.counter, io_req->cmd_type);
1338  bnx2fc_scsi_done(io_req, DID_ERROR);
1339  kref_put(&io_req->refcount, bnx2fc_cmd_release);
1340  if (io_req->wait_for_comp)
1341  complete(&io_req->tm_done);
1342 }
1343 
1345  struct fcoe_task_ctx_entry *task,
1346  u8 num_rq)
1347 {
1348  u32 r_ctl;
1349  u32 r_a_tov = FC_DEF_R_A_TOV;
1350  u8 issue_rrq = 0;
1351  struct bnx2fc_rport *tgt = io_req->tgt;
1352 
1353  BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x"
1354  "refcnt = %d, cmd_type = %d\n",
1355  io_req->xid,
1356  io_req->refcount.refcount.counter, io_req->cmd_type);
1357 
1359  &io_req->req_flags)) {
1360  BNX2FC_IO_DBG(io_req, "Timer context finished processing"
1361  " this io\n");
1362  return;
1363  }
1364 
1365  /* Do not issue RRQ as this IO is already cleanedup */
1367  &io_req->req_flags))
1368  goto io_compl;
1369 
1370  /*
1371  * For ABTS issued due to SCSI eh_abort_handler, timeout
1372  * values are maintained by scsi-ml itself. Cancel timeout
1373  * in case ABTS issued as part of task management function
1374  * or due to FW error.
1375  */
1376  if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))
1377  if (cancel_delayed_work(&io_req->timeout_work))
1378  kref_put(&io_req->refcount,
1379  bnx2fc_cmd_release); /* drop timer hold */
1380 
1381  r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl;
1382 
1383  switch (r_ctl) {
1384  case FC_RCTL_BA_ACC:
1385  /*
1386  * Dont release this cmd yet. It will be relesed
1387  * after we get RRQ response
1388  */
1389  BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n");
1390  issue_rrq = 1;
1391  break;
1392 
1393  case FC_RCTL_BA_RJT:
1394  BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n");
1395  break;
1396  default:
1397  printk(KERN_ERR PFX "Unknown ABTS response\n");
1398  break;
1399  }
1400 
1401  if (issue_rrq) {
1402  BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n");
1404  }
1406  bnx2fc_cmd_timer_set(io_req, r_a_tov);
1407 
1408 io_compl:
1409  if (io_req->wait_for_comp) {
1411  &io_req->req_flags))
1412  complete(&io_req->tm_done);
1413  } else {
1414  /*
1415  * We end up here when ABTS is issued as
1416  * in asynchronous context, i.e., as part
1417  * of task management completion, or
1418  * when FW error is received or when the
1419  * ABTS is issued when the IO is timed
1420  * out.
1421  */
1422 
1423  if (io_req->on_active_queue) {
1424  list_del_init(&io_req->link);
1425  io_req->on_active_queue = 0;
1426  /* Move IO req to retire queue */
1427  list_add_tail(&io_req->link, &tgt->io_retire_queue);
1428  }
1429  bnx2fc_scsi_done(io_req, DID_ERROR);
1430  kref_put(&io_req->refcount, bnx2fc_cmd_release);
1431  }
1432 }
1433 
1434 static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
1435 {
1436  struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1437  struct bnx2fc_rport *tgt = io_req->tgt;
1438  struct bnx2fc_cmd *cmd, *tmp;
1439  int tm_lun = sc_cmd->device->lun;
1440  int rc = 0;
1441  int lun;
1442 
1443  /* called with tgt_lock held */
1444  BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n");
1445  /*
1446  * Walk thru the active_ios queue and ABORT the IO
1447  * that matches with the LUN that was reset
1448  */
1449  list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
1450  BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
1451  lun = cmd->sc_cmd->device->lun;
1452  if (lun == tm_lun) {
1453  /* Initiate ABTS on this cmd */
1455  &cmd->req_flags)) {
1456  /* cancel the IO timeout */
1457  if (cancel_delayed_work(&io_req->timeout_work))
1458  kref_put(&io_req->refcount,
1460  /* timer hold */
1461  rc = bnx2fc_initiate_abts(cmd);
1462  /* abts shouldn't fail in this context */
1463  WARN_ON(rc != SUCCESS);
1464  } else
1465  printk(KERN_ERR PFX "lun_rst: abts already in"
1466  " progress for this IO 0x%x\n",
1467  cmd->xid);
1468  }
1469  }
1470 }
1471 
1472 static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
1473 {
1474  struct bnx2fc_rport *tgt = io_req->tgt;
1475  struct bnx2fc_cmd *cmd, *tmp;
1476  int rc = 0;
1477 
1478  /* called with tgt_lock held */
1479  BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n");
1480  /*
1481  * Walk thru the active_ios queue and ABORT the IO
1482  * that matches with the LUN that was reset
1483  */
1484  list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
1485  BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
1486  /* Initiate ABTS */
1488  &cmd->req_flags)) {
1489  /* cancel the IO timeout */
1490  if (cancel_delayed_work(&io_req->timeout_work))
1491  kref_put(&io_req->refcount,
1492  bnx2fc_cmd_release); /* timer hold */
1493  rc = bnx2fc_initiate_abts(cmd);
1494  /* abts shouldn't fail in this context */
1495  WARN_ON(rc != SUCCESS);
1496 
1497  } else
1498  printk(KERN_ERR PFX "tgt_rst: abts already in progress"
1499  " for this IO 0x%x\n", cmd->xid);
1500  }
1501 }
1502 
1504  struct fcoe_task_ctx_entry *task, u8 num_rq)
1505 {
1506  struct bnx2fc_mp_req *tm_req;
1507  struct fc_frame_header *fc_hdr;
1508  struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1509  u64 *hdr;
1510  u64 *temp_hdr;
1511  void *rsp_buf;
1512 
1513  /* Called with tgt_lock held */
1514  BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n");
1515 
1516  if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags)))
1518  else {
1519  /* TM has already timed out and we got
1520  * delayed completion. Ignore completion
1521  * processing.
1522  */
1523  return;
1524  }
1525 
1526  tm_req = &(io_req->mp_req);
1527  fc_hdr = &(tm_req->resp_fc_hdr);
1528  hdr = (u64 *)fc_hdr;
1529  temp_hdr = (u64 *)
1530  &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
1531  hdr[0] = cpu_to_be64(temp_hdr[0]);
1532  hdr[1] = cpu_to_be64(temp_hdr[1]);
1533  hdr[2] = cpu_to_be64(temp_hdr[2]);
1534 
1535  tm_req->resp_len =
1536  task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
1537 
1538  rsp_buf = tm_req->resp_buf;
1539 
1540  if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
1541  bnx2fc_parse_fcp_rsp(io_req,
1542  (struct fcoe_fcp_rsp_payload *)
1543  rsp_buf, num_rq);
1544  if (io_req->fcp_rsp_code == 0) {
1545  /* TM successful */
1546  if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
1547  bnx2fc_lun_reset_cmpl(io_req);
1548  else if (tm_req->tm_flags & FCP_TMF_TGT_RESET)
1549  bnx2fc_tgt_reset_cmpl(io_req);
1550  }
1551  } else {
1552  printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
1553  fc_hdr->fh_r_ctl);
1554  }
1555  if (!sc_cmd->SCp.ptr) {
1556  printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
1557  return;
1558  }
1559  switch (io_req->fcp_status) {
1560  case FC_GOOD:
1561  if (io_req->cdb_status == 0) {
1562  /* Good IO completion */
1563  sc_cmd->result = DID_OK << 16;
1564  } else {
1565  /* Transport status is good, SCSI status not good */
1566  sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1567  }
1568  if (io_req->fcp_resid)
1569  scsi_set_resid(sc_cmd, io_req->fcp_resid);
1570  break;
1571 
1572  default:
1573  BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
1574  io_req->fcp_status);
1575  break;
1576  }
1577 
1578  sc_cmd = io_req->sc_cmd;
1579  io_req->sc_cmd = NULL;
1580 
1581  /* check if the io_req exists in tgt's tmf_q */
1582  if (io_req->on_tmf_queue) {
1583 
1584  list_del_init(&io_req->link);
1585  io_req->on_tmf_queue = 0;
1586  } else {
1587 
1588  printk(KERN_ERR PFX "Command not on active_cmd_queue!\n");
1589  return;
1590  }
1591 
1592  sc_cmd->SCp.ptr = NULL;
1593  sc_cmd->scsi_done(sc_cmd);
1594 
1595  kref_put(&io_req->refcount, bnx2fc_cmd_release);
1596  if (io_req->wait_for_comp) {
1597  BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
1598  complete(&io_req->tm_done);
1599  }
1600 }
1601 
1602 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
1603  int bd_index)
1604 {
1605  struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1606  int frag_size, sg_frags;
1607 
1608  sg_frags = 0;
1609  while (sg_len) {
1610  if (sg_len >= BNX2FC_BD_SPLIT_SZ)
1611  frag_size = BNX2FC_BD_SPLIT_SZ;
1612  else
1613  frag_size = sg_len;
1614  bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff;
1615  bd[bd_index + sg_frags].buf_addr_hi = addr >> 32;
1616  bd[bd_index + sg_frags].buf_len = (u16)frag_size;
1617  bd[bd_index + sg_frags].flags = 0;
1618 
1619  addr += (u64) frag_size;
1620  sg_frags++;
1621  sg_len -= frag_size;
1622  }
1623  return sg_frags;
1624 
1625 }
1626 
1627 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
1628 {
1629  struct bnx2fc_interface *interface = io_req->port->priv;
1630  struct bnx2fc_hba *hba = interface->hba;
1631  struct scsi_cmnd *sc = io_req->sc_cmd;
1632  struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1633  struct scatterlist *sg;
1634  int byte_count = 0;
1635  int sg_count = 0;
1636  int bd_count = 0;
1637  int sg_frags;
1638  unsigned int sg_len;
1639  u64 addr;
1640  int i;
1641 
1642  sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc),
1643  scsi_sg_count(sc), sc->sc_data_direction);
1644  scsi_for_each_sg(sc, sg, sg_count, i) {
1645  sg_len = sg_dma_len(sg);
1646  addr = sg_dma_address(sg);
1647  if (sg_len > BNX2FC_MAX_BD_LEN) {
1648  sg_frags = bnx2fc_split_bd(io_req, addr, sg_len,
1649  bd_count);
1650  } else {
1651 
1652  sg_frags = 1;
1653  bd[bd_count].buf_addr_lo = addr & 0xffffffff;
1654  bd[bd_count].buf_addr_hi = addr >> 32;
1655  bd[bd_count].buf_len = (u16)sg_len;
1656  bd[bd_count].flags = 0;
1657  }
1658  bd_count += sg_frags;
1659  byte_count += sg_len;
1660  }
1661  if (byte_count != scsi_bufflen(sc))
1662  printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, "
1663  "task_id = 0x%x\n", byte_count, scsi_bufflen(sc),
1664  io_req->xid);
1665  return bd_count;
1666 }
1667 
1668 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
1669 {
1670  struct scsi_cmnd *sc = io_req->sc_cmd;
1671  struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1672  int bd_count;
1673 
1674  if (scsi_sg_count(sc)) {
1675  bd_count = bnx2fc_map_sg(io_req);
1676  if (bd_count == 0)
1677  return -ENOMEM;
1678  } else {
1679  bd_count = 0;
1680  bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0;
1681  bd[0].buf_len = bd[0].flags = 0;
1682  }
1683  io_req->bd_tbl->bd_valid = bd_count;
1684 
1685  return 0;
1686 }
1687 
1688 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
1689 {
1690  struct scsi_cmnd *sc = io_req->sc_cmd;
1691 
1692  if (io_req->bd_tbl->bd_valid && sc) {
1693  scsi_dma_unmap(sc);
1694  io_req->bd_tbl->bd_valid = 0;
1695  }
1696 }
1697 
1698 void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
1699  struct fcp_cmnd *fcp_cmnd)
1700 {
1701  struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1702  char tag[2];
1703 
1704  memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
1705 
1706  int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun);
1707 
1708  fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
1709  memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
1710 
1711  fcp_cmnd->fc_cmdref = 0;
1712  fcp_cmnd->fc_pri_ta = 0;
1713  fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
1714  fcp_cmnd->fc_flags = io_req->io_req_flags;
1715 
1716  if (scsi_populate_tag_msg(sc_cmd, tag)) {
1717  switch (tag[0]) {
1718  case HEAD_OF_QUEUE_TAG:
1719  fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ;
1720  break;
1721  case ORDERED_QUEUE_TAG:
1722  fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED;
1723  break;
1724  default:
1725  fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
1726  break;
1727  }
1728  } else {
1729  fcp_cmnd->fc_pri_ta = 0;
1730  }
1731 }
1732 
1733 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
1734  struct fcoe_fcp_rsp_payload *fcp_rsp,
1735  u8 num_rq)
1736 {
1737  struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1738  struct bnx2fc_rport *tgt = io_req->tgt;
1739  u8 rsp_flags = fcp_rsp->fcp_flags.flags;
1740  u32 rq_buff_len = 0;
1741  int i;
1742  unsigned char *rq_data;
1743  unsigned char *dummy;
1744  int fcp_sns_len = 0;
1745  int fcp_rsp_len = 0;
1746 
1747  io_req->fcp_status = FC_GOOD;
1748  io_req->fcp_resid = fcp_rsp->fcp_resid;
1749 
1750  io_req->scsi_comp_flags = rsp_flags;
1751  CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1752  fcp_rsp->scsi_status_code;
1753 
1754  /* Fetch fcp_rsp_info and fcp_sns_info if available */
1755  if (num_rq) {
1756 
1757  /*
1758  * We do not anticipate num_rq >1, as the linux defined
1759  * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO
1760  * 256 bytes of single rq buffer is good enough to hold this.
1761  */
1762 
1763  if (rsp_flags &
1765  fcp_rsp_len = rq_buff_len
1766  = fcp_rsp->fcp_rsp_len;
1767  }
1768 
1769  if (rsp_flags &
1771  fcp_sns_len = fcp_rsp->fcp_sns_len;
1772  rq_buff_len += fcp_rsp->fcp_sns_len;
1773  }
1774 
1775  io_req->fcp_rsp_len = fcp_rsp_len;
1776  io_req->fcp_sns_len = fcp_sns_len;
1777 
1778  if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
1779  /* Invalid sense sense length. */
1780  printk(KERN_ERR PFX "invalid sns length %d\n",
1781  rq_buff_len);
1782  /* reset rq_buff_len */
1783  rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
1784  }
1785 
1786  rq_data = bnx2fc_get_next_rqe(tgt, 1);
1787 
1788  if (num_rq > 1) {
1789  /* We do not need extra sense data */
1790  for (i = 1; i < num_rq; i++)
1791  dummy = bnx2fc_get_next_rqe(tgt, 1);
1792  }
1793 
1794  /* fetch fcp_rsp_code */
1795  if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1796  /* Only for task management function */
1797  io_req->fcp_rsp_code = rq_data[3];
1798  printk(KERN_ERR PFX "fcp_rsp_code = %d\n",
1799  io_req->fcp_rsp_code);
1800  }
1801 
1802  /* fetch sense data */
1803  rq_data += fcp_rsp_len;
1804 
1805  if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1806  printk(KERN_ERR PFX "Truncating sense buffer\n");
1807  fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1808  }
1809 
1811  if (fcp_sns_len)
1812  memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
1813 
1814  /* return RQ entries */
1815  for (i = 0; i < num_rq; i++)
1816  bnx2fc_return_rqe(tgt, 1);
1817  }
1818 }
1819 
1829  struct scsi_cmnd *sc_cmd)
1830 {
1831  struct fc_lport *lport = shost_priv(host);
1832  struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1833  struct fc_rport_libfc_priv *rp = rport->dd_data;
1834  struct bnx2fc_rport *tgt;
1835  struct bnx2fc_cmd *io_req;
1836  int rc = 0;
1837  int rval;
1838 
1839  rval = fc_remote_port_chkready(rport);
1840  if (rval) {
1841  sc_cmd->result = rval;
1842  sc_cmd->scsi_done(sc_cmd);
1843  return 0;
1844  }
1845 
1846  if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1848  goto exit_qcmd;
1849  }
1850 
1851  /* rport and tgt are allocated together, so tgt should be non-NULL */
1852  tgt = (struct bnx2fc_rport *)&rp[1];
1853 
1854  if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
1855  /*
1856  * Session is not offloaded yet. Let SCSI-ml retry
1857  * the command.
1858  */
1860  goto exit_qcmd;
1861  }
1862 
1863  io_req = bnx2fc_cmd_alloc(tgt);
1864  if (!io_req) {
1866  goto exit_qcmd;
1867  }
1868  io_req->sc_cmd = sc_cmd;
1869 
1870  if (bnx2fc_post_io_req(tgt, io_req)) {
1871  printk(KERN_ERR PFX "Unable to post io_req\n");
1873  goto exit_qcmd;
1874  }
1875 exit_qcmd:
1876  return rc;
1877 }
1878 
1880  struct fcoe_task_ctx_entry *task,
1881  u8 num_rq)
1882 {
1883  struct fcoe_fcp_rsp_payload *fcp_rsp;
1884  struct bnx2fc_rport *tgt = io_req->tgt;
1885  struct scsi_cmnd *sc_cmd;
1886  struct Scsi_Host *host;
1887 
1888 
1889  /* scsi_cmd_cmpl is called with tgt lock held */
1890 
1892  /* we will not receive ABTS response for this IO */
1893  BNX2FC_IO_DBG(io_req, "Timer context finished processing "
1894  "this scsi cmd\n");
1895  }
1896 
1897  /* Cancel the timeout_work, as we received IO completion */
1898  if (cancel_delayed_work(&io_req->timeout_work))
1899  kref_put(&io_req->refcount,
1900  bnx2fc_cmd_release); /* drop timer hold */
1901 
1902  sc_cmd = io_req->sc_cmd;
1903  if (sc_cmd == NULL) {
1904  printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n");
1905  return;
1906  }
1907 
1908  /* Fetch fcp_rsp from task context and perform cmd completion */
1909  fcp_rsp = (struct fcoe_fcp_rsp_payload *)
1910  &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload);
1911 
1912  /* parse fcp_rsp and obtain sense data from RQ if available */
1913  bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
1914 
1915  host = sc_cmd->device->host;
1916  if (!sc_cmd->SCp.ptr) {
1917  printk(KERN_ERR PFX "SCp.ptr is NULL\n");
1918  return;
1919  }
1920 
1921  if (io_req->on_active_queue) {
1922  list_del_init(&io_req->link);
1923  io_req->on_active_queue = 0;
1924  /* Move IO req to retire queue */
1925  list_add_tail(&io_req->link, &tgt->io_retire_queue);
1926  } else {
1927  /* This should not happen, but could have been pulled
1928  * by bnx2fc_flush_active_ios(), or during a race
1929  * between command abort and (late) completion.
1930  */
1931  BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n");
1932  if (io_req->wait_for_comp)
1934  &io_req->req_flags))
1935  complete(&io_req->tm_done);
1936  }
1937 
1938  bnx2fc_unmap_sg_list(io_req);
1939  io_req->sc_cmd = NULL;
1940 
1941  switch (io_req->fcp_status) {
1942  case FC_GOOD:
1943  if (io_req->cdb_status == 0) {
1944  /* Good IO completion */
1945  sc_cmd->result = DID_OK << 16;
1946  } else {
1947  /* Transport status is good, SCSI status not good */
1948  BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d"
1949  " fcp_resid = 0x%x\n",
1950  io_req->cdb_status, io_req->fcp_resid);
1951  sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1952  }
1953  if (io_req->fcp_resid)
1954  scsi_set_resid(sc_cmd, io_req->fcp_resid);
1955  break;
1956  default:
1957  printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n",
1958  io_req->fcp_status);
1959  break;
1960  }
1961  sc_cmd->SCp.ptr = NULL;
1962  sc_cmd->scsi_done(sc_cmd);
1963  kref_put(&io_req->refcount, bnx2fc_cmd_release);
1964 }
1965 
1967  struct bnx2fc_cmd *io_req)
1968 {
1969  struct fcoe_task_ctx_entry *task;
1970  struct fcoe_task_ctx_entry *task_page;
1971  struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1972  struct fcoe_port *port = tgt->port;
1973  struct bnx2fc_interface *interface = port->priv;
1974  struct bnx2fc_hba *hba = interface->hba;
1975  struct fc_lport *lport = port->lport;
1976  struct fc_stats *stats;
1977  int task_idx, index;
1978  u16 xid;
1979 
1980  /* Initialize rest of io_req fields */
1981  io_req->cmd_type = BNX2FC_SCSI_CMD;
1982  io_req->port = port;
1983  io_req->tgt = tgt;
1984  io_req->data_xfer_len = scsi_bufflen(sc_cmd);
1985  sc_cmd->SCp.ptr = (char *)io_req;
1986 
1987  stats = per_cpu_ptr(lport->stats, get_cpu());
1988  if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
1989  io_req->io_req_flags = BNX2FC_READ;
1990  stats->InputRequests++;
1991  stats->InputBytes += io_req->data_xfer_len;
1992  } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
1993  io_req->io_req_flags = BNX2FC_WRITE;
1994  stats->OutputRequests++;
1995  stats->OutputBytes += io_req->data_xfer_len;
1996  } else {
1997  io_req->io_req_flags = 0;
1998  stats->ControlRequests++;
1999  }
2000  put_cpu();
2001 
2002  xid = io_req->xid;
2003 
2004  /* Build buffer descriptor list for firmware from sg list */
2005  if (bnx2fc_build_bd_list_from_sg(io_req)) {
2006  printk(KERN_ERR PFX "BD list creation failed\n");
2007  spin_lock_bh(&tgt->tgt_lock);
2008  kref_put(&io_req->refcount, bnx2fc_cmd_release);
2009  spin_unlock_bh(&tgt->tgt_lock);
2010  return -EAGAIN;
2011  }
2012 
2013  task_idx = xid / BNX2FC_TASKS_PER_PAGE;
2014  index = xid % BNX2FC_TASKS_PER_PAGE;
2015 
2016  /* Initialize task context for this IO request */
2017  task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
2018  task = &(task_page[index]);
2019  bnx2fc_init_task(io_req, task);
2020 
2021  spin_lock_bh(&tgt->tgt_lock);
2022 
2023  if (tgt->flush_in_prog) {
2024  printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
2025  kref_put(&io_req->refcount, bnx2fc_cmd_release);
2026  spin_unlock_bh(&tgt->tgt_lock);
2027  return -EAGAIN;
2028  }
2029 
2030  if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
2031  printk(KERN_ERR PFX "Session not ready...post_io\n");
2032  kref_put(&io_req->refcount, bnx2fc_cmd_release);
2033  spin_unlock_bh(&tgt->tgt_lock);
2034  return -EAGAIN;
2035  }
2036 
2037  /* Time IO req */
2038  if (tgt->io_timeout)
2040  /* Obtain free SQ entry */
2041  bnx2fc_add_2_sq(tgt, xid);
2042 
2043  /* Enqueue the io_req to active_cmd_queue */
2044 
2045  io_req->on_active_queue = 1;
2046  /* move io_req from pending_queue to active_queue */
2047  list_add_tail(&io_req->link, &tgt->active_cmd_queue);
2048 
2049  /* Ring doorbell */
2050  bnx2fc_ring_doorbell(tgt);
2051  spin_unlock_bh(&tgt->tgt_lock);
2052  return 0;
2053 }