Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
qla_target.c
Go to the documentation of this file.
1 /*
2  * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
3  *
4  * based on qla2x00t.c code:
5  *
6  * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <[email protected]>
7  * Copyright (C) 2004 - 2005 Leonid Stoljar
8  * Copyright (C) 2006 Nathaniel Clark <[email protected]>
9  * Copyright (C) 2006 - 2010 ID7 Ltd.
10  *
11  * Forward port and refactoring to modern qla2xxx and target/configfs
12  *
13  * Copyright (C) 2010-2011 Nicholas A. Bellinger <[email protected]>
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation, version 2
18  * of the License.
19  *
20  * This program is distributed in the hope that it will be useful,
21  * but WITHOUT ANY WARRANTY; without even the implied warranty of
22  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23  * GNU General Public License for more details.
24  */
25 
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/blkdev.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/delay.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <asm/unaligned.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
41 
42 #include "qla_def.h"
43 #include "qla_target.h"
44 
45 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
46 module_param(qlini_mode, charp, S_IRUGO);
47 MODULE_PARM_DESC(qlini_mode,
48  "Determines when initiator mode will be enabled. Possible values: "
49  "\"exclusive\" - initiator mode will be enabled on load, "
50  "disabled on enabling target mode and then on disabling target mode "
51  "enabled back; "
52  "\"disabled\" - initiator mode will never be enabled; "
53  "\"enabled\" (default) - initiator mode will always stay enabled.");
54 
55 static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
56 
57 /*
58  * From scsi/fc/fc_fcp.h
59  */
68 };
69 
70 /*
71  * fc_pri_ta from scsi/fc/fc_fcp.h
72  */
73 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
74 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
75 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
76 #define FCP_PTA_ACA 4 /* auto. contigent allegiance */
77 #define FCP_PTA_MASK 7 /* mask for task attribute field */
78 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
79 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
80 
81 /*
82  * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
83  * must be called under HW lock and could unlock/lock it inside.
84  * It isn't an issue, since in the current implementation on the time when
85  * those functions are called:
86  *
87  * - Either context is IRQ and only IRQ handler can modify HW data,
88  * including rings related fields,
89  *
90  * - Or access to target mode variables from struct qla_tgt doesn't
91  * cross those functions boundaries, except tgt_stop, which
92  * additionally protected by irq_cmd_count.
93  */
94 /* Predefs for callbacks handed to qla2xxx LLD */
95 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
96  struct atio_from_isp *pkt);
97 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
98 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
99  int fn, void *iocb, int flags);
100 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
101  *cmd, struct atio_from_isp *atio, int ha_locked);
102 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
103  struct qla_tgt_srr_imm *imm, int ha_lock);
104 /*
105  * Global Variables
106  */
107 static struct kmem_cache *qla_tgt_cmd_cachep;
108 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
109 static mempool_t *qla_tgt_mgmt_cmd_mempool;
110 static struct workqueue_struct *qla_tgt_wq;
111 static DEFINE_MUTEX(qla_tgt_mutex);
112 static LIST_HEAD(qla_tgt_glist);
113 
114 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
115 static struct qla_tgt_sess *qlt_find_sess_by_port_name(
116  struct qla_tgt *tgt,
117  const uint8_t *port_name)
118 {
119  struct qla_tgt_sess *sess;
120 
122  if (!memcmp(sess->port_name, port_name, WWN_SIZE))
123  return sess;
124  }
125 
126  return NULL;
127 }
128 
129 /* Might release hw lock, then reaquire!! */
130 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
131 {
132  /* Send marker if required */
133  if (unlikely(vha->marker_needed != 0)) {
134  int rc = qla2x00_issue_marker(vha, vha_locked);
135  if (rc != QLA_SUCCESS) {
136  ql_dbg(ql_dbg_tgt, vha, 0xe03d,
137  "qla_target(%d): issue_marker() failed\n",
138  vha->vp_idx);
139  }
140  return rc;
141  }
142  return QLA_SUCCESS;
143 }
144 
145 static inline
146 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
147  uint8_t *d_id)
148 {
149  struct qla_hw_data *ha = vha->hw;
150  uint8_t vp_idx;
151 
152  if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
153  return NULL;
154 
155  if (vha->d_id.b.al_pa == d_id[2])
156  return vha;
157 
158  BUG_ON(ha->tgt.tgt_vp_map == NULL);
159  vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
160  if (likely(test_bit(vp_idx, ha->vp_idx_map)))
161  return ha->tgt.tgt_vp_map[vp_idx].vha;
162 
163  return NULL;
164 }
165 
166 static inline
167 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
168  uint16_t vp_idx)
169 {
170  struct qla_hw_data *ha = vha->hw;
171 
172  if (vha->vp_idx == vp_idx)
173  return vha;
174 
175  BUG_ON(ha->tgt.tgt_vp_map == NULL);
176  if (likely(test_bit(vp_idx, ha->vp_idx_map)))
177  return ha->tgt.tgt_vp_map[vp_idx].vha;
178 
179  return NULL;
180 }
181 
183  struct atio_from_isp *atio)
184 {
185  switch (atio->u.raw.entry_type) {
186  case ATIO_TYPE7:
187  {
188  struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
189  atio->u.isp24.fcp_hdr.d_id);
190  if (unlikely(NULL == host)) {
191  ql_dbg(ql_dbg_tgt, vha, 0xe03e,
192  "qla_target(%d): Received ATIO_TYPE7 "
193  "with unknown d_id %x:%x:%x\n", vha->vp_idx,
194  atio->u.isp24.fcp_hdr.d_id[0],
195  atio->u.isp24.fcp_hdr.d_id[1],
196  atio->u.isp24.fcp_hdr.d_id[2]);
197  break;
198  }
199  qlt_24xx_atio_pkt(host, atio);
200  break;
201  }
202 
203  case IMMED_NOTIFY_TYPE:
204  {
205  struct scsi_qla_host *host = vha;
206  struct imm_ntfy_from_isp *entry =
207  (struct imm_ntfy_from_isp *)atio;
208 
209  if ((entry->u.isp24.vp_index != 0xFF) &&
210  (entry->u.isp24.nport_handle != 0xFFFF)) {
211  host = qlt_find_host_by_vp_idx(vha,
212  entry->u.isp24.vp_index);
213  if (unlikely(!host)) {
214  ql_dbg(ql_dbg_tgt, vha, 0xe03f,
215  "qla_target(%d): Received "
216  "ATIO (IMMED_NOTIFY_TYPE) "
217  "with unknown vp_index %d\n",
218  vha->vp_idx, entry->u.isp24.vp_index);
219  break;
220  }
221  }
222  qlt_24xx_atio_pkt(host, atio);
223  break;
224  }
225 
226  default:
227  ql_dbg(ql_dbg_tgt, vha, 0xe040,
228  "qla_target(%d): Received unknown ATIO atio "
229  "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
230  break;
231  }
232 
233  return;
234 }
235 
237 {
238  switch (pkt->entry_type) {
239  case CTIO_TYPE7:
240  {
241  struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
242  struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
243  entry->vp_index);
244  if (unlikely(!host)) {
245  ql_dbg(ql_dbg_tgt, vha, 0xe041,
246  "qla_target(%d): Response pkt (CTIO_TYPE7) "
247  "received, with unknown vp_index %d\n",
248  vha->vp_idx, entry->vp_index);
249  break;
250  }
251  qlt_response_pkt(host, pkt);
252  break;
253  }
254 
255  case IMMED_NOTIFY_TYPE:
256  {
257  struct scsi_qla_host *host = vha;
258  struct imm_ntfy_from_isp *entry =
259  (struct imm_ntfy_from_isp *)pkt;
260 
261  host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
262  if (unlikely(!host)) {
263  ql_dbg(ql_dbg_tgt, vha, 0xe042,
264  "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
265  "received, with unknown vp_index %d\n",
266  vha->vp_idx, entry->u.isp24.vp_index);
267  break;
268  }
269  qlt_response_pkt(host, pkt);
270  break;
271  }
272 
273  case NOTIFY_ACK_TYPE:
274  {
275  struct scsi_qla_host *host = vha;
276  struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
277 
278  if (0xFF != entry->u.isp24.vp_index) {
279  host = qlt_find_host_by_vp_idx(vha,
280  entry->u.isp24.vp_index);
281  if (unlikely(!host)) {
282  ql_dbg(ql_dbg_tgt, vha, 0xe043,
283  "qla_target(%d): Response "
284  "pkt (NOTIFY_ACK_TYPE) "
285  "received, with unknown "
286  "vp_index %d\n", vha->vp_idx,
287  entry->u.isp24.vp_index);
288  break;
289  }
290  }
291  qlt_response_pkt(host, pkt);
292  break;
293  }
294 
295  case ABTS_RECV_24XX:
296  {
297  struct abts_recv_from_24xx *entry =
298  (struct abts_recv_from_24xx *)pkt;
299  struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
300  entry->vp_index);
301  if (unlikely(!host)) {
302  ql_dbg(ql_dbg_tgt, vha, 0xe044,
303  "qla_target(%d): Response pkt "
304  "(ABTS_RECV_24XX) received, with unknown "
305  "vp_index %d\n", vha->vp_idx, entry->vp_index);
306  break;
307  }
308  qlt_response_pkt(host, pkt);
309  break;
310  }
311 
312  case ABTS_RESP_24XX:
313  {
314  struct abts_resp_to_24xx *entry =
315  (struct abts_resp_to_24xx *)pkt;
316  struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
317  entry->vp_index);
318  if (unlikely(!host)) {
319  ql_dbg(ql_dbg_tgt, vha, 0xe045,
320  "qla_target(%d): Response pkt "
321  "(ABTS_RECV_24XX) received, with unknown "
322  "vp_index %d\n", vha->vp_idx, entry->vp_index);
323  break;
324  }
325  qlt_response_pkt(host, pkt);
326  break;
327  }
328 
329  default:
330  qlt_response_pkt(vha, pkt);
331  break;
332  }
333 
334 }
335 
336 static void qlt_free_session_done(struct work_struct *work)
337 {
338  struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
339  free_work);
340  struct qla_tgt *tgt = sess->tgt;
341  struct scsi_qla_host *vha = sess->vha;
342  struct qla_hw_data *ha = vha->hw;
343 
344  BUG_ON(!tgt);
345  /*
346  * Release the target session for FC Nexus from fabric module code.
347  */
348  if (sess->se_sess != NULL)
349  ha->tgt.tgt_ops->free_session(sess);
350 
351  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
352  "Unregistration of sess %p finished\n", sess);
353 
354  kfree(sess);
355  /*
356  * We need to protect against race, when tgt is freed before or
357  * inside wake_up()
358  */
359  tgt->sess_count--;
360  if (tgt->sess_count == 0)
361  wake_up_all(&tgt->waitQ);
362 }
363 
364 /* ha->hardware_lock supposed to be held on entry */
365 void qlt_unreg_sess(struct qla_tgt_sess *sess)
366 {
367  struct scsi_qla_host *vha = sess->vha;
368 
369  vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
370 
371  list_del(&sess->sess_list_entry);
372  if (sess->deleted)
373  list_del(&sess->del_list_entry);
374 
375  INIT_WORK(&sess->free_work, qlt_free_session_done);
376  schedule_work(&sess->free_work);
377 }
379 
380 /* ha->hardware_lock supposed to be held on entry */
381 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
382 {
383  struct qla_hw_data *ha = vha->hw;
384  struct qla_tgt_sess *sess = NULL;
385  uint32_t unpacked_lun, lun = 0;
387  int res = 0;
388  struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
389  struct atio_from_isp *a = (struct atio_from_isp *)iocb;
390 
391  loop_id = le16_to_cpu(n->u.isp24.nport_handle);
392  if (loop_id == 0xFFFF) {
393 #if 0 /* FIXME: Re-enable Global event handling.. */
394  /* Global event */
395  atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
396  qlt_clear_tgt_db(ha->tgt.qla_tgt, 1);
397  if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
398  sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
399  typeof(*sess), sess_list_entry);
400  switch (mcmd) {
402  mcmd = QLA_TGT_NEXUS_LOSS;
403  break;
405  mcmd = QLA_TGT_ABORT_ALL;
406  break;
407  case QLA_TGT_NEXUS_LOSS:
408  case QLA_TGT_ABORT_ALL:
409  break;
410  default:
411  ql_dbg(ql_dbg_tgt, vha, 0xe046,
412  "qla_target(%d): Not allowed "
413  "command %x in %s", vha->vp_idx,
414  mcmd, __func__);
415  sess = NULL;
416  break;
417  }
418  } else
419  sess = NULL;
420 #endif
421  } else {
422  sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
423  }
424 
425  ql_dbg(ql_dbg_tgt, vha, 0xe000,
426  "Using sess for qla_tgt_reset: %p\n", sess);
427  if (!sess) {
428  res = -ESRCH;
429  return res;
430  }
431 
432  ql_dbg(ql_dbg_tgt, vha, 0xe047,
433  "scsi(%ld): resetting (session %p from port "
434  "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
435  "mcmd %x, loop_id %d)\n", vha->host_no, sess,
436  sess->port_name[0], sess->port_name[1],
437  sess->port_name[2], sess->port_name[3],
438  sess->port_name[4], sess->port_name[5],
439  sess->port_name[6], sess->port_name[7],
440  mcmd, loop_id);
441 
442  lun = a->u.isp24.fcp_cmnd.lun;
443  unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
444 
445  return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
446  iocb, QLA24XX_MGMT_SEND_NACK);
447 }
448 
449 /* ha->hardware_lock supposed to be held on entry */
450 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
451  bool immediate)
452 {
453  struct qla_tgt *tgt = sess->tgt;
454  uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
455 
456  if (sess->deleted)
457  return;
458 
459  ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
460  "Scheduling sess %p for deletion\n", sess);
462  sess->deleted = 1;
463 
464  if (immediate)
465  dev_loss_tmo = 0;
466 
467  sess->expires = jiffies + dev_loss_tmo * HZ;
468 
469  ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
470  "qla_target(%d): session for port %02x:%02x:%02x:"
471  "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
472  "deletion in %u secs (expires: %lu) immed: %d\n",
473  sess->vha->vp_idx,
474  sess->port_name[0], sess->port_name[1],
475  sess->port_name[2], sess->port_name[3],
476  sess->port_name[4], sess->port_name[5],
477  sess->port_name[6], sess->port_name[7],
478  sess->loop_id, dev_loss_tmo, sess->expires, immediate);
479 
480  if (immediate)
482  else
484  jiffies - sess->expires);
485 }
486 
487 /* ha->hardware_lock supposed to be held on entry */
488 static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only)
489 {
490  struct qla_tgt_sess *sess;
491 
493  qlt_schedule_sess_for_deletion(sess, true);
494 
495  /* At this point tgt could be already dead */
496 }
497 
498 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
499  uint16_t *loop_id)
500 {
501  struct qla_hw_data *ha = vha->hw;
503  struct gid_list_info *gid_list;
504  char *id_iter;
505  int res, rc, i;
507 
508  gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
509  &gid_list_dma, GFP_KERNEL);
510  if (!gid_list) {
511  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
512  "qla_target(%d): DMA Alloc failed of %u\n",
513  vha->vp_idx, qla2x00_gid_list_size(ha));
514  return -ENOMEM;
515  }
516 
517  /* Get list of logged in devices */
518  rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
519  if (rc != QLA_SUCCESS) {
520  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
521  "qla_target(%d): get_id_list() failed: %x\n",
522  vha->vp_idx, rc);
523  res = -1;
524  goto out_free_id_list;
525  }
526 
527  id_iter = (char *)gid_list;
528  res = -1;
529  for (i = 0; i < entries; i++) {
530  struct gid_list_info *gid = (struct gid_list_info *)id_iter;
531  if ((gid->al_pa == s_id[2]) &&
532  (gid->area == s_id[1]) &&
533  (gid->domain == s_id[0])) {
534  *loop_id = le16_to_cpu(gid->loop_id);
535  res = 0;
536  break;
537  }
538  id_iter += ha->gid_list_info_size;
539  }
540 
541 out_free_id_list:
542  dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
543  gid_list, gid_list_dma);
544  return res;
545 }
546 
547 static bool qlt_check_fcport_exist(struct scsi_qla_host *vha,
548  struct qla_tgt_sess *sess)
549 {
550  struct qla_hw_data *ha = vha->hw;
551  struct qla_port_24xx_data *pmap24;
552  bool res, found = false;
553  int rc, i;
554  uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */
556  void *pmap;
557  int pmap_len;
558  fc_port_t *fcport;
559  int global_resets;
560  unsigned long flags;
561 
562 retry:
563  global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
564 
565  rc = qla2x00_get_node_name_list(vha, &pmap, &pmap_len);
566  if (rc != QLA_SUCCESS) {
567  res = false;
568  goto out;
569  }
570 
571  pmap24 = pmap;
572  entries = pmap_len/sizeof(*pmap24);
573 
574  for (i = 0; i < entries; ++i) {
575  if (!memcmp(sess->port_name, pmap24[i].port_name, WWN_SIZE)) {
576  loop_id = le16_to_cpu(pmap24[i].loop_id);
577  found = true;
578  break;
579  }
580  }
581 
582  kfree(pmap);
583 
584  if (!found) {
585  res = false;
586  goto out;
587  }
588 
589  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf046,
590  "qlt_check_fcport_exist(): loop_id %d", loop_id);
591 
592  fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
593  if (fcport == NULL) {
594  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf047,
595  "qla_target(%d): Allocation of tmp FC port failed",
596  vha->vp_idx);
597  res = false;
598  goto out;
599  }
600 
601  fcport->loop_id = loop_id;
602 
603  rc = qla2x00_get_port_database(vha, fcport, 0);
604  if (rc != QLA_SUCCESS) {
605  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf048,
606  "qla_target(%d): Failed to retrieve fcport "
607  "information -- get_port_database() returned %x "
608  "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
609  res = false;
610  goto out_free_fcport;
611  }
612 
613  if (global_resets !=
614  atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
615  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
616  "qla_target(%d): global reset during session discovery"
617  " (counter was %d, new %d), retrying",
618  vha->vp_idx, global_resets,
619  atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
620  goto retry;
621  }
622 
623  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
624  "Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, "
625  "loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa,
626  sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain,
627  fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id);
628 
629  spin_lock_irqsave(&ha->hardware_lock, flags);
630  ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
631  (fcport->flags & FCF_CONF_COMP_SUPPORTED));
632  spin_unlock_irqrestore(&ha->hardware_lock, flags);
633 
634  res = true;
635 
636 out_free_fcport:
637  kfree(fcport);
638 
639 out:
640  return res;
641 }
642 
643 /* ha->hardware_lock supposed to be held on entry */
644 static void qlt_undelete_sess(struct qla_tgt_sess *sess)
645 {
646  BUG_ON(!sess->deleted);
647 
648  list_del(&sess->del_list_entry);
649  sess->deleted = 0;
650 }
651 
652 static void qlt_del_sess_work_fn(struct delayed_work *work)
653 {
654  struct qla_tgt *tgt = container_of(work, struct qla_tgt,
655  sess_del_work);
656  struct scsi_qla_host *vha = tgt->vha;
657  struct qla_hw_data *ha = vha->hw;
658  struct qla_tgt_sess *sess;
659  unsigned long flags;
660 
661  spin_lock_irqsave(&ha->hardware_lock, flags);
662  while (!list_empty(&tgt->del_sess_list)) {
663  sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
665  if (time_after_eq(jiffies, sess->expires)) {
666  bool cancel;
667 
668  qlt_undelete_sess(sess);
669 
670  spin_unlock_irqrestore(&ha->hardware_lock, flags);
671  cancel = qlt_check_fcport_exist(vha, sess);
672 
673  if (cancel) {
674  if (sess->deleted) {
675  /*
676  * sess was again deleted while we were
677  * discovering it
678  */
679  spin_lock_irqsave(&ha->hardware_lock,
680  flags);
681  continue;
682  }
683 
684  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf049,
685  "qla_target(%d): cancel deletion of "
686  "session for port %02x:%02x:%02x:%02x:%02x:"
687  "%02x:%02x:%02x (loop ID %d), because "
688  " it isn't deleted by firmware",
689  vha->vp_idx, sess->port_name[0],
690  sess->port_name[1], sess->port_name[2],
691  sess->port_name[3], sess->port_name[4],
692  sess->port_name[5], sess->port_name[6],
693  sess->port_name[7], sess->loop_id);
694  } else {
695  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
696  "Timeout: sess %p about to be deleted\n",
697  sess);
698  ha->tgt.tgt_ops->shutdown_sess(sess);
699  ha->tgt.tgt_ops->put_sess(sess);
700  }
701 
702  spin_lock_irqsave(&ha->hardware_lock, flags);
703  } else {
705  jiffies - sess->expires);
706  break;
707  }
708  }
709  spin_unlock_irqrestore(&ha->hardware_lock, flags);
710 }
711 
712 /*
713  * Adds an extra ref to allow to drop hw lock after adding sess to the list.
714  * Caller must put it.
715  */
716 static struct qla_tgt_sess *qlt_create_sess(
717  struct scsi_qla_host *vha,
718  fc_port_t *fcport,
719  bool local)
720 {
721  struct qla_hw_data *ha = vha->hw;
722  struct qla_tgt_sess *sess;
723  unsigned long flags;
724  unsigned char be_sid[3];
725 
726  /* Check to avoid double sessions */
727  spin_lock_irqsave(&ha->hardware_lock, flags);
728  list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list,
729  sess_list_entry) {
730  if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
731  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
732  "Double sess %p found (s_id %x:%x:%x, "
733  "loop_id %d), updating to d_id %x:%x:%x, "
734  "loop_id %d", sess, sess->s_id.b.domain,
735  sess->s_id.b.al_pa, sess->s_id.b.area,
736  sess->loop_id, fcport->d_id.b.domain,
737  fcport->d_id.b.al_pa, fcport->d_id.b.area,
738  fcport->loop_id);
739 
740  if (sess->deleted)
741  qlt_undelete_sess(sess);
742 
743  kref_get(&sess->se_sess->sess_kref);
744  ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
745  (fcport->flags & FCF_CONF_COMP_SUPPORTED));
746 
747  if (sess->local && !local)
748  sess->local = 0;
749  spin_unlock_irqrestore(&ha->hardware_lock, flags);
750 
751  return sess;
752  }
753  }
754  spin_unlock_irqrestore(&ha->hardware_lock, flags);
755 
756  sess = kzalloc(sizeof(*sess), GFP_KERNEL);
757  if (!sess) {
758  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
759  "qla_target(%u): session allocation failed, "
760  "all commands from port %02x:%02x:%02x:%02x:"
761  "%02x:%02x:%02x:%02x will be refused", vha->vp_idx,
762  fcport->port_name[0], fcport->port_name[1],
763  fcport->port_name[2], fcport->port_name[3],
764  fcport->port_name[4], fcport->port_name[5],
765  fcport->port_name[6], fcport->port_name[7]);
766 
767  return NULL;
768  }
769  sess->tgt = ha->tgt.qla_tgt;
770  sess->vha = vha;
771  sess->s_id = fcport->d_id;
772  sess->loop_id = fcport->loop_id;
773  sess->local = local;
774 
775  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
776  "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
777  sess, ha->tgt.qla_tgt);
778 
779  be_sid[0] = sess->s_id.b.domain;
780  be_sid[1] = sess->s_id.b.area;
781  be_sid[2] = sess->s_id.b.al_pa;
782  /*
783  * Determine if this fc_port->port_name is allowed to access
784  * target mode using explict NodeACLs+MappedLUNs, or using
785  * TPG demo mode. If this is successful a target mode FC nexus
786  * is created.
787  */
788  if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
789  &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
790  kfree(sess);
791  return NULL;
792  }
793  /*
794  * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
795  * access across ->hardware_lock reaquire.
796  */
797  kref_get(&sess->se_sess->sess_kref);
798 
800  BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
801  memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
802 
803  spin_lock_irqsave(&ha->hardware_lock, flags);
804  list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list);
805  ha->tgt.qla_tgt->sess_count++;
806  spin_unlock_irqrestore(&ha->hardware_lock, flags);
807 
808  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
809  "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:"
810  "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed"
811  " completion %ssupported) added\n",
812  vha->vp_idx, local ? "local " : "", fcport->port_name[0],
813  fcport->port_name[1], fcport->port_name[2], fcport->port_name[3],
814  fcport->port_name[4], fcport->port_name[5], fcport->port_name[6],
815  fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain,
816  sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ?
817  "" : "not ");
818 
819  return sess;
820 }
821 
822 /*
823  * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
824  */
825 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
826 {
827  struct qla_hw_data *ha = vha->hw;
828  struct qla_tgt *tgt = ha->tgt.qla_tgt;
829  struct qla_tgt_sess *sess;
830  unsigned long flags;
831 
832  if (!vha->hw->tgt.tgt_ops)
833  return;
834 
835  if (!tgt || (fcport->port_type != FCT_INITIATOR))
836  return;
837 
838  spin_lock_irqsave(&ha->hardware_lock, flags);
839  if (tgt->tgt_stop) {
840  spin_unlock_irqrestore(&ha->hardware_lock, flags);
841  return;
842  }
843  sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
844  if (!sess) {
845  spin_unlock_irqrestore(&ha->hardware_lock, flags);
846 
847  mutex_lock(&ha->tgt.tgt_mutex);
848  sess = qlt_create_sess(vha, fcport, false);
849  mutex_unlock(&ha->tgt.tgt_mutex);
850 
851  spin_lock_irqsave(&ha->hardware_lock, flags);
852  } else {
853  kref_get(&sess->se_sess->sess_kref);
854 
855  if (sess->deleted) {
856  qlt_undelete_sess(sess);
857 
858  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
859  "qla_target(%u): %ssession for port %02x:"
860  "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
861  "reappeared\n", vha->vp_idx, sess->local ? "local "
862  : "", sess->port_name[0], sess->port_name[1],
863  sess->port_name[2], sess->port_name[3],
864  sess->port_name[4], sess->port_name[5],
865  sess->port_name[6], sess->port_name[7],
866  sess->loop_id);
867 
868  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
869  "Reappeared sess %p\n", sess);
870  }
871  ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
872  (fcport->flags & FCF_CONF_COMP_SUPPORTED));
873  }
874 
875  if (sess && sess->local) {
876  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
877  "qla_target(%u): local session for "
878  "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
879  "(loop ID %d) became global\n", vha->vp_idx,
880  fcport->port_name[0], fcport->port_name[1],
881  fcport->port_name[2], fcport->port_name[3],
882  fcport->port_name[4], fcport->port_name[5],
883  fcport->port_name[6], fcport->port_name[7],
884  sess->loop_id);
885  sess->local = 0;
886  }
887  spin_unlock_irqrestore(&ha->hardware_lock, flags);
888 
889  ha->tgt.tgt_ops->put_sess(sess);
890 }
891 
892 void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
893 {
894  struct qla_hw_data *ha = vha->hw;
895  struct qla_tgt *tgt = ha->tgt.qla_tgt;
896  struct qla_tgt_sess *sess;
897  unsigned long flags;
898 
899  if (!vha->hw->tgt.tgt_ops)
900  return;
901 
902  if (!tgt || (fcport->port_type != FCT_INITIATOR))
903  return;
904 
905  spin_lock_irqsave(&ha->hardware_lock, flags);
906  if (tgt->tgt_stop) {
907  spin_unlock_irqrestore(&ha->hardware_lock, flags);
908  return;
909  }
910  sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
911  if (!sess) {
912  spin_unlock_irqrestore(&ha->hardware_lock, flags);
913  return;
914  }
915 
916  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
917 
918  sess->local = 1;
919  qlt_schedule_sess_for_deletion(sess, false);
920  spin_unlock_irqrestore(&ha->hardware_lock, flags);
921 }
922 
923 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
924 {
925  struct qla_hw_data *ha = tgt->ha;
926  unsigned long flags;
927  int res;
928  /*
929  * We need to protect against race, when tgt is freed before or
930  * inside wake_up()
931  */
932  spin_lock_irqsave(&ha->hardware_lock, flags);
933  ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
934  "tgt %p, empty(sess_list)=%d sess_count=%d\n",
935  tgt, list_empty(&tgt->sess_list), tgt->sess_count);
936  res = (tgt->sess_count == 0);
937  spin_unlock_irqrestore(&ha->hardware_lock, flags);
938 
939  return res;
940 }
941 
942 /* Called by tcm_qla2xxx configfs code */
943 void qlt_stop_phase1(struct qla_tgt *tgt)
944 {
945  struct scsi_qla_host *vha = tgt->vha;
946  struct qla_hw_data *ha = tgt->ha;
947  unsigned long flags;
948 
949  if (tgt->tgt_stop || tgt->tgt_stopped) {
950  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
951  "Already in tgt->tgt_stop or tgt_stopped state\n");
952  dump_stack();
953  return;
954  }
955 
956  ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
957  vha->host_no, vha);
958  /*
959  * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
960  * Lock is needed, because we still can get an incoming packet.
961  */
962  mutex_lock(&ha->tgt.tgt_mutex);
963  spin_lock_irqsave(&ha->hardware_lock, flags);
964  tgt->tgt_stop = 1;
965  qlt_clear_tgt_db(tgt, true);
966  spin_unlock_irqrestore(&ha->hardware_lock, flags);
967  mutex_unlock(&ha->tgt.tgt_mutex);
968 
970 
971  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
972  "Waiting for sess works (tgt %p)", tgt);
973  spin_lock_irqsave(&tgt->sess_work_lock, flags);
974  while (!list_empty(&tgt->sess_works_list)) {
975  spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
977  spin_lock_irqsave(&tgt->sess_work_lock, flags);
978  }
979  spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
980 
981  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
982  "Waiting for tgt %p: list_empty(sess_list)=%d "
983  "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
984  tgt->sess_count);
985 
986  wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
987 
988  /* Big hammer */
989  if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
990  qlt_disable_vha(vha);
991 
992  /* Wait for sessions to clear out (just in case) */
993  wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
994 }
996 
997 /* Called by tcm_qla2xxx configfs code */
998 void qlt_stop_phase2(struct qla_tgt *tgt)
999 {
1000  struct qla_hw_data *ha = tgt->ha;
1001  unsigned long flags;
1002 
1003  if (tgt->tgt_stopped) {
1004  ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f,
1005  "Already in tgt->tgt_stopped state\n");
1006  dump_stack();
1007  return;
1008  }
1009 
1010  ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b,
1011  "Waiting for %d IRQ commands to complete (tgt %p)",
1012  tgt->irq_cmd_count, tgt);
1013 
1014  mutex_lock(&ha->tgt.tgt_mutex);
1015  spin_lock_irqsave(&ha->hardware_lock, flags);
1016  while (tgt->irq_cmd_count != 0) {
1017  spin_unlock_irqrestore(&ha->hardware_lock, flags);
1018  udelay(2);
1019  spin_lock_irqsave(&ha->hardware_lock, flags);
1020  }
1021  tgt->tgt_stop = 0;
1022  tgt->tgt_stopped = 1;
1023  spin_unlock_irqrestore(&ha->hardware_lock, flags);
1024  mutex_unlock(&ha->tgt.tgt_mutex);
1025 
1026  ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished",
1027  tgt);
1028 }
1030 
1031 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
1032 void qlt_release(struct qla_tgt *tgt)
1033 {
1034  struct qla_hw_data *ha = tgt->ha;
1035 
1036  if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
1037  qlt_stop_phase2(tgt);
1038 
1039  ha->tgt.qla_tgt = NULL;
1040 
1041  ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d,
1042  "Release of tgt %p finished\n", tgt);
1043 
1044  kfree(tgt);
1045 }
1046 
1047 /* ha->hardware_lock supposed to be held on entry */
1048 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1049  const void *param, unsigned int param_size)
1050 {
1051  struct qla_tgt_sess_work_param *prm;
1052  unsigned long flags;
1053 
1054  prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1055  if (!prm) {
1056  ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1057  "qla_target(%d): Unable to create session "
1058  "work, command will be refused", 0);
1059  return -ENOMEM;
1060  }
1061 
1062  ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1063  "Scheduling work (type %d, prm %p)"
1064  " to find session for param %p (size %d, tgt %p)\n",
1065  type, prm, param, param_size, tgt);
1066 
1067  prm->type = type;
1068  memcpy(&prm->tm_iocb, param, param_size);
1069 
1070  spin_lock_irqsave(&tgt->sess_work_lock, flags);
1072  spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1073 
1074  schedule_work(&tgt->sess_work);
1075 
1076  return 0;
1077 }
1078 
1079 /*
1080  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1081  */
1082 static void qlt_send_notify_ack(struct scsi_qla_host *vha,
1083  struct imm_ntfy_from_isp *ntfy,
1084  uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1085  uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1086 {
1087  struct qla_hw_data *ha = vha->hw;
1088  request_t *pkt;
1089  struct nack_to_isp *nack;
1090 
1091  ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1092 
1093  /* Send marker if required */
1094  if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1095  return;
1096 
1097  pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
1098  if (!pkt) {
1099  ql_dbg(ql_dbg_tgt, vha, 0xe049,
1100  "qla_target(%d): %s failed: unable to allocate "
1101  "request packet\n", vha->vp_idx, __func__);
1102  return;
1103  }
1104 
1105  if (ha->tgt.qla_tgt != NULL)
1106  ha->tgt.qla_tgt->notify_ack_expected++;
1107 
1108  pkt->entry_type = NOTIFY_ACK_TYPE;
1109  pkt->entry_count = 1;
1110 
1111  nack = (struct nack_to_isp *)pkt;
1112  nack->ox_id = ntfy->ox_id;
1113 
1114  nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1115  if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1116  nack->u.isp24.flags = ntfy->u.isp24.flags &
1118  }
1119  nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1120  nack->u.isp24.status = ntfy->u.isp24.status;
1121  nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1122  nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1123  nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1124  nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1125  nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1126  nack->u.isp24.srr_reject_code = srr_reject_code;
1127  nack->u.isp24.srr_reject_code_expl = srr_explan;
1128  nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1129 
1130  ql_dbg(ql_dbg_tgt, vha, 0xe005,
1131  "qla_target(%d): Sending 24xx Notify Ack %d\n",
1132  vha->vp_idx, nack->u.isp24.status);
1133 
1134  qla2x00_start_iocbs(vha, vha->req);
1135 }
1136 
1137 /*
1138  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1139  */
1140 static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
1141  struct abts_recv_from_24xx *abts, uint32_t status,
1142  bool ids_reversed)
1143 {
1144  struct qla_hw_data *ha = vha->hw;
1145  struct abts_resp_to_24xx *resp;
1146  uint32_t f_ctl;
1147  uint8_t *p;
1148 
1149  ql_dbg(ql_dbg_tgt, vha, 0xe006,
1150  "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1151  ha, abts, status);
1152 
1153  /* Send marker if required */
1154  if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1155  return;
1156 
1157  resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
1158  if (!resp) {
1159  ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1160  "qla_target(%d): %s failed: unable to allocate "
1161  "request packet", vha->vp_idx, __func__);
1162  return;
1163  }
1164 
1165  resp->entry_type = ABTS_RESP_24XX;
1166  resp->entry_count = 1;
1167  resp->nport_handle = abts->nport_handle;
1168  resp->vp_index = vha->vp_idx;
1169  resp->sof_type = abts->sof_type;
1170  resp->exchange_address = abts->exchange_address;
1171  resp->fcp_hdr_le = abts->fcp_hdr_le;
1175  p = (uint8_t *)&f_ctl;
1176  resp->fcp_hdr_le.f_ctl[0] = *p++;
1177  resp->fcp_hdr_le.f_ctl[1] = *p++;
1178  resp->fcp_hdr_le.f_ctl[2] = *p;
1179  if (ids_reversed) {
1180  resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
1181  resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
1182  resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1183  resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1184  resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1185  resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1186  } else {
1187  resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1188  resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1189  resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1190  resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1191  resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1192  resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1193  }
1195  if (status == FCP_TMF_CMPL) {
1197  resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1198  resp->payload.ba_acct.low_seq_cnt = 0x0000;
1199  resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1200  resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1201  resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1202  } else {
1204  resp->payload.ba_rjt.reason_code =
1206  /* Other bytes are zero */
1207  }
1208 
1209  ha->tgt.qla_tgt->abts_resp_expected++;
1210 
1211  qla2x00_start_iocbs(vha, vha->req);
1212 }
1213 
1214 /*
1215  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1216  */
1217 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1218  struct abts_resp_from_24xx_fw *entry)
1219 {
1220  struct ctio7_to_24xx *ctio;
1221 
1222  ql_dbg(ql_dbg_tgt, vha, 0xe007,
1223  "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
1224  /* Send marker if required */
1225  if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1226  return;
1227 
1228  ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
1229  if (ctio == NULL) {
1230  ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1231  "qla_target(%d): %s failed: unable to allocate "
1232  "request packet\n", vha->vp_idx, __func__);
1233  return;
1234  }
1235 
1236  /*
1237  * We've got on entrance firmware's response on by us generated
1238  * ABTS response. So, in it ID fields are reversed.
1239  */
1240 
1241  ctio->entry_type = CTIO_TYPE7;
1242  ctio->entry_count = 1;
1243  ctio->nport_handle = entry->nport_handle;
1246  ctio->vp_index = vha->vp_idx;
1247  ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1248  ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1249  ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1250  ctio->exchange_addr = entry->exchange_addr_to_abort;
1251  ctio->u.status1.flags =
1254  ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
1255 
1256  qla2x00_start_iocbs(vha, vha->req);
1257 
1258  qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
1259  FCP_TMF_CMPL, true);
1260 }
1261 
1262 /* ha->hardware_lock supposed to be held on entry */
1263 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1264  struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
1265 {
1266  struct qla_hw_data *ha = vha->hw;
1267  struct qla_tgt_mgmt_cmd *mcmd;
1268  int rc;
1269 
1270  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1271  "qla_target(%d): task abort (tag=%d)\n",
1272  vha->vp_idx, abts->exchange_addr_to_abort);
1273 
1274  mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
1275  if (mcmd == NULL) {
1276  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
1277  "qla_target(%d): %s: Allocation of ABORT cmd failed",
1278  vha->vp_idx, __func__);
1279  return -ENOMEM;
1280  }
1281  memset(mcmd, 0, sizeof(*mcmd));
1282 
1283  mcmd->sess = sess;
1284  memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1285 
1286  rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK,
1287  abts->exchange_addr_to_abort);
1288  if (rc != 0) {
1289  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
1290  "qla_target(%d): tgt_ops->handle_tmr()"
1291  " failed: %d", vha->vp_idx, rc);
1292  mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1293  return -EFAULT;
1294  }
1295 
1296  return 0;
1297 }
1298 
1299 /*
1300  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1301  */
1302 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1303  struct abts_recv_from_24xx *abts)
1304 {
1305  struct qla_hw_data *ha = vha->hw;
1306  struct qla_tgt_sess *sess;
1308  uint8_t s_id[3];
1309  int rc;
1310 
1311  if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1312  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
1313  "qla_target(%d): ABTS: Abort Sequence not "
1314  "supported\n", vha->vp_idx);
1315  qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1316  return;
1317  }
1318 
1319  if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
1320  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
1321  "qla_target(%d): ABTS: Unknown Exchange "
1322  "Address received\n", vha->vp_idx);
1323  qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1324  return;
1325  }
1326 
1327  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
1328  "qla_target(%d): task abort (s_id=%x:%x:%x, "
1329  "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
1330  abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
1331  le32_to_cpu(abts->fcp_hdr_le.parameter));
1332 
1333  s_id[0] = abts->fcp_hdr_le.s_id[2];
1334  s_id[1] = abts->fcp_hdr_le.s_id[1];
1335  s_id[2] = abts->fcp_hdr_le.s_id[0];
1336 
1337  sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
1338  if (!sess) {
1339  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
1340  "qla_target(%d): task abort for non-existant session\n",
1341  vha->vp_idx);
1342  rc = qlt_sched_sess_work(ha->tgt.qla_tgt,
1343  QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1344  if (rc != 0) {
1345  qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
1346  false);
1347  }
1348  return;
1349  }
1350 
1351  rc = __qlt_24xx_handle_abts(vha, abts, sess);
1352  if (rc != 0) {
1353  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
1354  "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1355  vha->vp_idx, rc);
1356  qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1357  return;
1358  }
1359 }
1360 
1361 /*
1362  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1363  */
1364 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1365  struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
1366 {
1367  struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
1368  struct ctio7_to_24xx *ctio;
1369 
1370  ql_dbg(ql_dbg_tgt, ha, 0xe008,
1371  "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1372  ha, atio, resp_code);
1373 
1374  /* Send marker if required */
1375  if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
1376  return;
1377 
1378  ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
1379  if (ctio == NULL) {
1380  ql_dbg(ql_dbg_tgt, ha, 0xe04c,
1381  "qla_target(%d): %s failed: unable to allocate "
1382  "request packet\n", ha->vp_idx, __func__);
1383  return;
1384  }
1385 
1386  ctio->entry_type = CTIO_TYPE7;
1387  ctio->entry_count = 1;
1389  ctio->nport_handle = mcmd->sess->loop_id;
1391  ctio->vp_index = ha->vp_idx;
1392  ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1393  ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1394  ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1395  ctio->exchange_addr = atio->u.isp24.exchange_addr;
1396  ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
1399  ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
1400  ctio->u.status1.scsi_status =
1402  ctio->u.status1.response_len = __constant_cpu_to_le16(8);
1403  ctio->u.status1.sense_data[0] = resp_code;
1404 
1405  qla2x00_start_iocbs(ha, ha->req);
1406 }
1407 
1409 {
1410  mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1411 }
1413 
1414 /* callback from target fabric module code */
1416 {
1417  struct scsi_qla_host *vha = mcmd->sess->vha;
1418  struct qla_hw_data *ha = vha->hw;
1419  unsigned long flags;
1420 
1421  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
1422  "TM response mcmd (%p) status %#x state %#x",
1423  mcmd, mcmd->fc_tm_rsp, mcmd->flags);
1424 
1425  spin_lock_irqsave(&ha->hardware_lock, flags);
1426  if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
1427  qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
1428  0, 0, 0, 0, 0, 0);
1429  else {
1430  if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
1431  qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
1432  mcmd->fc_tm_rsp, false);
1433  else
1434  qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
1435  mcmd->fc_tm_rsp);
1436  }
1437  /*
1438  * Make the callback for ->free_mcmd() to queue_work() and invoke
1439  * target_put_sess_cmd() to drop cmd_kref to 1. The final
1440  * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1441  * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1442  * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1443  * qlt_xmit_tm_rsp() returns here..
1444  */
1445  ha->tgt.tgt_ops->free_mcmd(mcmd);
1446  spin_unlock_irqrestore(&ha->hardware_lock, flags);
1447 }
1449 
1450 /* No locks */
1451 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
1452 {
1453  struct qla_tgt_cmd *cmd = prm->cmd;
1454 
1455  BUG_ON(cmd->sg_cnt == 0);
1456 
1457  prm->sg = (struct scatterlist *)cmd->sg;
1458  prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
1459  cmd->sg_cnt, cmd->dma_data_direction);
1460  if (unlikely(prm->seg_cnt == 0))
1461  goto out_err;
1462 
1463  prm->cmd->sg_mapped = 1;
1464 
1465  /*
1466  * If greater than four sg entries then we need to allocate
1467  * the continuation entries
1468  */
1469  if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
1470  prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
1471  prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont);
1472 
1473  ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
1474  prm->seg_cnt, prm->req_cnt);
1475  return 0;
1476 
1477 out_err:
1478  ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
1479  "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1480  0, prm->cmd->sg_cnt);
1481  return -1;
1482 }
1483 
1484 static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
1485  struct qla_tgt_cmd *cmd)
1486 {
1487  struct qla_hw_data *ha = vha->hw;
1488 
1489  BUG_ON(!cmd->sg_mapped);
1490  pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
1491  cmd->sg_mapped = 0;
1492 }
1493 
1494 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
1495  uint32_t req_cnt)
1496 {
1497  struct qla_hw_data *ha = vha->hw;
1498  device_reg_t __iomem *reg = ha->iobase;
1499  uint32_t cnt;
1500 
1501  if (vha->req->cnt < (req_cnt + 2)) {
1502  cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
1503 
1504  ql_dbg(ql_dbg_tgt, vha, 0xe00a,
1505  "Request ring circled: cnt=%d, vha->->ring_index=%d, "
1506  "vha->req->cnt=%d, req_cnt=%d\n", cnt,
1507  vha->req->ring_index, vha->req->cnt, req_cnt);
1508  if (vha->req->ring_index < cnt)
1509  vha->req->cnt = cnt - vha->req->ring_index;
1510  else
1511  vha->req->cnt = vha->req->length -
1512  (vha->req->ring_index - cnt);
1513  }
1514 
1515  if (unlikely(vha->req->cnt < (req_cnt + 2))) {
1516  ql_dbg(ql_dbg_tgt, vha, 0xe00b,
1517  "qla_target(%d): There is no room in the "
1518  "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
1519  "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index,
1520  vha->req->cnt, req_cnt);
1521  return -EAGAIN;
1522  }
1523  vha->req->cnt -= req_cnt;
1524 
1525  return 0;
1526 }
1527 
1528 /*
1529  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1530  */
1531 static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
1532 {
1533  /* Adjust ring index. */
1534  vha->req->ring_index++;
1535  if (vha->req->ring_index == vha->req->length) {
1536  vha->req->ring_index = 0;
1537  vha->req->ring_ptr = vha->req->ring;
1538  } else {
1539  vha->req->ring_ptr++;
1540  }
1541  return (cont_entry_t *)vha->req->ring_ptr;
1542 }
1543 
1544 /* ha->hardware_lock supposed to be held on entry */
1545 static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
1546 {
1547  struct qla_hw_data *ha = vha->hw;
1548  uint32_t h;
1549 
1550  h = ha->tgt.current_handle;
1551  /* always increment cmd handle */
1552  do {
1553  ++h;
1554  if (h > MAX_OUTSTANDING_COMMANDS)
1555  h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
1556  if (h == ha->tgt.current_handle) {
1557  ql_dbg(ql_dbg_tgt, vha, 0xe04e,
1558  "qla_target(%d): Ran out of "
1559  "empty cmd slots in ha %p\n", vha->vp_idx, ha);
1560  h = QLA_TGT_NULL_HANDLE;
1561  break;
1562  }
1563  } while ((h == QLA_TGT_NULL_HANDLE) ||
1564  (h == QLA_TGT_SKIP_HANDLE) ||
1565  (ha->tgt.cmds[h-1] != NULL));
1566 
1567  if (h != QLA_TGT_NULL_HANDLE)
1568  ha->tgt.current_handle = h;
1569 
1570  return h;
1571 }
1572 
1573 /* ha->hardware_lock supposed to be held on entry */
1574 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1575  struct scsi_qla_host *vha)
1576 {
1577  uint32_t h;
1578  struct ctio7_to_24xx *pkt;
1579  struct qla_hw_data *ha = vha->hw;
1580  struct atio_from_isp *atio = &prm->cmd->atio;
1581 
1582  pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
1583  prm->pkt = pkt;
1584  memset(pkt, 0, sizeof(*pkt));
1585 
1586  pkt->entry_type = CTIO_TYPE7;
1587  pkt->entry_count = (uint8_t)prm->req_cnt;
1588  pkt->vp_index = vha->vp_idx;
1589 
1590  h = qlt_make_handle(vha);
1591  if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1592  /*
1593  * CTIO type 7 from the firmware doesn't provide a way to
1594  * know the initiator's LOOP ID, hence we can't find
1595  * the session and, so, the command.
1596  */
1597  return -EAGAIN;
1598  } else
1599  ha->tgt.cmds[h-1] = prm->cmd;
1600 
1602  pkt->nport_handle = prm->cmd->loop_id;
1604  pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1605  pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1606  pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1607  pkt->exchange_addr = atio->u.isp24.exchange_addr;
1608  pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
1609  pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
1610  pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
1611 
1612  ql_dbg(ql_dbg_tgt, vha, 0xe00c,
1613  "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
1614  vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT,
1615  le16_to_cpu(pkt->u.status0.ox_id));
1616  return 0;
1617 }
1618 
1619 /*
1620  * ha->hardware_lock supposed to be held on entry. We have already made sure
1621  * that there is sufficient amount of request entries to not drop it.
1622  */
1623 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
1624  struct scsi_qla_host *vha)
1625 {
1626  int cnt;
1627  uint32_t *dword_ptr;
1628  int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1629 
1630  /* Build continuation packets */
1631  while (prm->seg_cnt > 0) {
1632  cont_a64_entry_t *cont_pkt64 =
1633  (cont_a64_entry_t *)qlt_get_req_pkt(vha);
1634 
1635  /*
1636  * Make sure that from cont_pkt64 none of
1637  * 64-bit specific fields used for 32-bit
1638  * addressing. Cast to (cont_entry_t *) for
1639  * that.
1640  */
1641 
1642  memset(cont_pkt64, 0, sizeof(*cont_pkt64));
1643 
1644  cont_pkt64->entry_count = 1;
1645  cont_pkt64->sys_define = 0;
1646 
1647  if (enable_64bit_addressing) {
1648  cont_pkt64->entry_type = CONTINUE_A64_TYPE;
1649  dword_ptr =
1650  (uint32_t *)&cont_pkt64->dseg_0_address;
1651  } else {
1652  cont_pkt64->entry_type = CONTINUE_TYPE;
1653  dword_ptr =
1654  (uint32_t *)&((cont_entry_t *)
1655  cont_pkt64)->dseg_0_address;
1656  }
1657 
1658  /* Load continuation entry data segments */
1659  for (cnt = 0;
1660  cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
1661  cnt++, prm->seg_cnt--) {
1662  *dword_ptr++ =
1664  (sg_dma_address(prm->sg)));
1665  if (enable_64bit_addressing) {
1666  *dword_ptr++ =
1669  (prm->sg)));
1670  }
1671  *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1672 
1673  ql_dbg(ql_dbg_tgt, vha, 0xe00d,
1674  "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
1675  (long long unsigned int)
1677  (long long unsigned int)
1679  (int)sg_dma_len(prm->sg));
1680 
1681  prm->sg = sg_next(prm->sg);
1682  }
1683  }
1684 }
1685 
1686 /*
1687  * ha->hardware_lock supposed to be held on entry. We have already made sure
1688  * that there is sufficient amount of request entries to not drop it.
1689  */
1690 static void qlt_load_data_segments(struct qla_tgt_prm *prm,
1691  struct scsi_qla_host *vha)
1692 {
1693  int cnt;
1694  uint32_t *dword_ptr;
1695  int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1696  struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
1697 
1698  ql_dbg(ql_dbg_tgt, vha, 0xe00e,
1699  "iocb->scsi_status=%x, iocb->flags=%x\n",
1700  le16_to_cpu(pkt24->u.status0.scsi_status),
1701  le16_to_cpu(pkt24->u.status0.flags));
1702 
1703  pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
1704 
1705  /* Setup packet address segment pointer */
1706  dword_ptr = pkt24->u.status0.dseg_0_address;
1707 
1708  /* Set total data segment count */
1709  if (prm->seg_cnt)
1710  pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
1711 
1712  if (prm->seg_cnt == 0) {
1713  /* No data transfer */
1714  *dword_ptr++ = 0;
1715  *dword_ptr = 0;
1716  return;
1717  }
1718 
1719  /* If scatter gather */
1720  ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments...");
1721 
1722  /* Load command entry data segments */
1723  for (cnt = 0;
1724  (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
1725  cnt++, prm->seg_cnt--) {
1726  *dword_ptr++ =
1728  if (enable_64bit_addressing) {
1729  *dword_ptr++ =
1731  sg_dma_address(prm->sg)));
1732  }
1733  *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1734 
1735  ql_dbg(ql_dbg_tgt, vha, 0xe010,
1736  "S/G Segment phys_addr=%llx:%llx, len=%d\n",
1737  (long long unsigned int)pci_dma_hi32(sg_dma_address(
1738  prm->sg)),
1739  (long long unsigned int)pci_dma_lo32(sg_dma_address(
1740  prm->sg)),
1741  (int)sg_dma_len(prm->sg));
1742 
1743  prm->sg = sg_next(prm->sg);
1744  }
1745 
1746  qlt_load_cont_data_segments(prm, vha);
1747 }
1748 
1749 static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
1750 {
1751  return cmd->bufflen > 0;
1752 }
1753 
1754 /*
1755  * Called without ha->hardware_lock held
1756  */
1757 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1758  struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
1759  uint32_t *full_req_cnt)
1760 {
1761  struct qla_tgt *tgt = cmd->tgt;
1762  struct scsi_qla_host *vha = tgt->vha;
1763  struct qla_hw_data *ha = vha->hw;
1764  struct se_cmd *se_cmd = &cmd->se_cmd;
1765 
1766  if (unlikely(cmd->aborted)) {
1767  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
1768  "qla_target(%d): terminating exchange "
1769  "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
1770  se_cmd, cmd->tag);
1771 
1773 
1774  qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
1775 
1776  /* !! At this point cmd could be already freed !! */
1778  }
1779 
1780  ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n",
1781  vha->vp_idx, cmd->tag);
1782 
1783  prm->cmd = cmd;
1784  prm->tgt = tgt;
1785  prm->rq_result = scsi_status;
1786  prm->sense_buffer = &cmd->sense_buffer[0];
1788  prm->sg = NULL;
1789  prm->seg_cnt = -1;
1790  prm->req_cnt = 1;
1791  prm->add_status_pkt = 0;
1792 
1793  ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n",
1794  prm->rq_result, xmit_type);
1795 
1796  /* Send marker if required */
1797  if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
1798  return -EFAULT;
1799 
1800  ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx);
1801 
1802  if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
1803  if (qlt_pci_map_calc_cnt(prm) != 0)
1804  return -EAGAIN;
1805  }
1806 
1807  *full_req_cnt = prm->req_cnt;
1808 
1809  if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1810  prm->residual = se_cmd->residual_count;
1811  ql_dbg(ql_dbg_tgt, vha, 0xe014,
1812  "Residual underflow: %d (tag %d, "
1813  "op %x, bufflen %d, rq_result %x)\n", prm->residual,
1814  cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
1815  cmd->bufflen, prm->rq_result);
1816  prm->rq_result |= SS_RESIDUAL_UNDER;
1817  } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1818  prm->residual = se_cmd->residual_count;
1819  ql_dbg(ql_dbg_tgt, vha, 0xe015,
1820  "Residual overflow: %d (tag %d, "
1821  "op %x, bufflen %d, rq_result %x)\n", prm->residual,
1822  cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
1823  cmd->bufflen, prm->rq_result);
1824  prm->rq_result |= SS_RESIDUAL_OVER;
1825  }
1826 
1827  if (xmit_type & QLA_TGT_XMIT_STATUS) {
1828  /*
1829  * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
1830  * ignored in *xmit_response() below
1831  */
1832  if (qlt_has_data(cmd)) {
1833  if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
1834  (IS_FWI2_CAPABLE(ha) &&
1835  (prm->rq_result != 0))) {
1836  prm->add_status_pkt = 1;
1837  (*full_req_cnt)++;
1838  }
1839  }
1840  }
1841 
1842  ql_dbg(ql_dbg_tgt, vha, 0xe016,
1843  "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
1844  prm->req_cnt, *full_req_cnt, prm->add_status_pkt);
1845 
1846  return 0;
1847 }
1848 
1849 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
1850  struct qla_tgt_cmd *cmd, int sending_sense)
1851 {
1852  if (ha->tgt.enable_class_2)
1853  return 0;
1854 
1855  if (sending_sense)
1856  return cmd->conf_compl_supported;
1857  else
1858  return ha->tgt.enable_explicit_conf &&
1859  cmd->conf_compl_supported;
1860 }
1861 
1862 #ifdef CONFIG_QLA_TGT_DEBUG_SRR
1863 /*
1864  * Original taken from the XFS code
1865  */
1866 static unsigned long qlt_srr_random(void)
1867 {
1868  static int Inited;
1869  static unsigned long RandomValue;
1870  static DEFINE_SPINLOCK(lock);
1871  /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
1872  register long rv;
1873  register long lo;
1874  register long hi;
1875  unsigned long flags;
1876 
1877  spin_lock_irqsave(&lock, flags);
1878  if (!Inited) {
1879  RandomValue = jiffies;
1880  Inited = 1;
1881  }
1882  rv = RandomValue;
1883  hi = rv / 127773;
1884  lo = rv % 127773;
1885  rv = 16807 * lo - 2836 * hi;
1886  if (rv <= 0)
1887  rv += 2147483647;
1888  RandomValue = rv;
1889  spin_unlock_irqrestore(&lock, flags);
1890  return rv;
1891 }
1892 
1893 static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
1894 {
1895 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
1896  if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
1897  == 50) {
1898  *xmit_type &= ~QLA_TGT_XMIT_STATUS;
1899  ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
1900  "Dropping cmd %p (tag %d) status", cmd, cmd->tag);
1901  }
1902 #endif
1903  /*
1904  * It's currently not possible to simulate SRRs for FCP_WRITE without
1905  * a physical link layer failure, so don't even try here..
1906  */
1907  if (cmd->dma_data_direction != DMA_FROM_DEVICE)
1908  return;
1909 
1910  if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
1911  ((qlt_srr_random() % 100) == 20)) {
1912  int i, leave = 0;
1913  unsigned int tot_len = 0;
1914 
1915  while (leave == 0)
1916  leave = qlt_srr_random() % cmd->sg_cnt;
1917 
1918  for (i = 0; i < leave; i++)
1919  tot_len += cmd->sg[i].length;
1920 
1921  ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
1922  "Cutting cmd %p (tag %d) buffer"
1923  " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
1924  " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave,
1925  cmd->bufflen, cmd->sg_cnt);
1926 
1927  cmd->bufflen = tot_len;
1928  cmd->sg_cnt = leave;
1929  }
1930 
1931  if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
1932  unsigned int offset = qlt_srr_random() % cmd->bufflen;
1933 
1934  ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
1935  "Cutting cmd %p (tag %d) buffer head "
1936  "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset,
1937  cmd->bufflen);
1938  if (offset == 0)
1939  *xmit_type &= ~QLA_TGT_XMIT_DATA;
1940  else if (qlt_set_data_offset(cmd, offset)) {
1941  ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
1942  "qlt_set_data_offset() failed (tag %d)", cmd->tag);
1943  }
1944  }
1945 }
1946 #else
1947 static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
1948 {}
1949 #endif
1950 
1951 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
1952  struct qla_tgt_prm *prm)
1953 {
1955  (uint32_t)sizeof(ctio->u.status1.sense_data));
1956  ctio->u.status0.flags |=
1958  if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
1959  ctio->u.status0.flags |= __constant_cpu_to_le16(
1962  }
1963  ctio->u.status0.residual = cpu_to_le32(prm->residual);
1964  ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
1965  if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
1966  int i;
1967 
1968  if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
1969  if (prm->cmd->se_cmd.scsi_status != 0) {
1970  ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
1971  "Skipping EXPLICIT_CONFORM and "
1972  "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
1973  "non GOOD status\n");
1974  goto skip_explict_conf;
1975  }
1976  ctio->u.status1.flags |= __constant_cpu_to_le16(
1979  }
1980 skip_explict_conf:
1981  ctio->u.status1.flags &=
1983  ctio->u.status1.flags |=
1985  ctio->u.status1.scsi_status |=
1987  ctio->u.status1.sense_length =
1989  for (i = 0; i < prm->sense_buffer_len/4; i++)
1990  ((uint32_t *)ctio->u.status1.sense_data)[i] =
1991  cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
1992 #if 0
1993  if (unlikely((prm->sense_buffer_len % 4) != 0)) {
1994  static int q;
1995  if (q < 10) {
1996  ql_dbg(ql_dbg_tgt, vha, 0xe04f,
1997  "qla_target(%d): %d bytes of sense "
1998  "lost", prm->tgt->ha->vp_idx,
1999  prm->sense_buffer_len % 4);
2000  q++;
2001  }
2002  }
2003 #endif
2004  } else {
2005  ctio->u.status1.flags &=
2007  ctio->u.status1.flags |=
2009  ctio->u.status1.sense_length = 0;
2010  memset(ctio->u.status1.sense_data, 0,
2011  sizeof(ctio->u.status1.sense_data));
2012  }
2013 
2014  /* Sense with len > 24, is it possible ??? */
2015 }
2016 
2017 /*
2018  * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2019  * QLA_TGT_XMIT_STATUS for >= 24xx silicon
2020  */
2021 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2022  uint8_t scsi_status)
2023 {
2024  struct scsi_qla_host *vha = cmd->vha;
2025  struct qla_hw_data *ha = vha->hw;
2026  struct ctio7_to_24xx *pkt;
2027  struct qla_tgt_prm prm;
2028  uint32_t full_req_cnt = 0;
2029  unsigned long flags = 0;
2030  int res;
2031 
2032  memset(&prm, 0, sizeof(prm));
2033  qlt_check_srr_debug(cmd, &xmit_type);
2034 
2035  ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
2036  "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
2037  "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ?
2038  1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction);
2039 
2040  res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
2041  &full_req_cnt);
2042  if (unlikely(res != 0)) {
2044  return 0;
2045 
2046  return res;
2047  }
2048 
2049  spin_lock_irqsave(&ha->hardware_lock, flags);
2050 
2051  /* Does F/W have an IOCBs for this request */
2052  res = qlt_check_reserve_free_req(vha, full_req_cnt);
2053  if (unlikely(res))
2054  goto out_unmap_unlock;
2055 
2056  res = qlt_24xx_build_ctio_pkt(&prm, vha);
2057  if (unlikely(res != 0))
2058  goto out_unmap_unlock;
2059 
2060 
2061  pkt = (struct ctio7_to_24xx *)prm.pkt;
2062 
2063  if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
2064  pkt->u.status0.flags |=
2067 
2068  qlt_load_data_segments(&prm, vha);
2069 
2070  if (prm.add_status_pkt == 0) {
2071  if (xmit_type & QLA_TGT_XMIT_STATUS) {
2072  pkt->u.status0.scsi_status =
2073  cpu_to_le16(prm.rq_result);
2074  pkt->u.status0.residual =
2075  cpu_to_le32(prm.residual);
2076  pkt->u.status0.flags |= __constant_cpu_to_le16(
2078  if (qlt_need_explicit_conf(ha, cmd, 0)) {
2079  pkt->u.status0.flags |=
2083  }
2084  }
2085 
2086  } else {
2087  /*
2088  * We have already made sure that there is sufficient
2089  * amount of request entries to not drop HW lock in
2090  * req_pkt().
2091  */
2092  struct ctio7_to_24xx *ctio =
2093  (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
2094 
2095  ql_dbg(ql_dbg_tgt, vha, 0xe019,
2096  "Building additional status packet\n");
2097 
2098  memcpy(ctio, pkt, sizeof(*ctio));
2099  ctio->entry_count = 1;
2100  ctio->dseg_count = 0;
2101  ctio->u.status1.flags &= ~__constant_cpu_to_le16(
2103 
2104  /* Real finish is ctio_m1's finish */
2106  pkt->u.status0.flags |= __constant_cpu_to_le16(
2108  qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
2109  &prm);
2110  pr_debug("Status CTIO7: %p\n", ctio);
2111  }
2112  } else
2113  qlt_24xx_init_ctio_to_isp(pkt, &prm);
2114 
2115 
2116  cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
2117 
2118  ql_dbg(ql_dbg_tgt, vha, 0xe01a,
2119  "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
2120  pkt, scsi_status);
2121 
2122  qla2x00_start_iocbs(vha, vha->req);
2123  spin_unlock_irqrestore(&ha->hardware_lock, flags);
2124 
2125  return 0;
2126 
2127 out_unmap_unlock:
2128  if (cmd->sg_mapped)
2129  qlt_unmap_sg(vha, cmd);
2130  spin_unlock_irqrestore(&ha->hardware_lock, flags);
2131 
2132  return res;
2133 }
2135 
2137 {
2138  struct ctio7_to_24xx *pkt;
2139  struct scsi_qla_host *vha = cmd->vha;
2140  struct qla_hw_data *ha = vha->hw;
2141  struct qla_tgt *tgt = cmd->tgt;
2142  struct qla_tgt_prm prm;
2143  unsigned long flags;
2144  int res = 0;
2145 
2146  memset(&prm, 0, sizeof(prm));
2147  prm.cmd = cmd;
2148  prm.tgt = tgt;
2149  prm.sg = NULL;
2150  prm.req_cnt = 1;
2151 
2152  /* Send marker if required */
2153  if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
2154  return -EIO;
2155 
2156  ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)",
2157  (int)vha->vp_idx);
2158 
2159  /* Calculate number of entries and segments required */
2160  if (qlt_pci_map_calc_cnt(&prm) != 0)
2161  return -EAGAIN;
2162 
2163  spin_lock_irqsave(&ha->hardware_lock, flags);
2164 
2165  /* Does F/W have an IOCBs for this request */
2166  res = qlt_check_reserve_free_req(vha, prm.req_cnt);
2167  if (res != 0)
2168  goto out_unlock_free_unmap;
2169 
2170  res = qlt_24xx_build_ctio_pkt(&prm, vha);
2171  if (unlikely(res != 0))
2172  goto out_unlock_free_unmap;
2173  pkt = (struct ctio7_to_24xx *)prm.pkt;
2176  qlt_load_data_segments(&prm, vha);
2177 
2179 
2180  qla2x00_start_iocbs(vha, vha->req);
2181  spin_unlock_irqrestore(&ha->hardware_lock, flags);
2182 
2183  return res;
2184 
2185 out_unlock_free_unmap:
2186  if (cmd->sg_mapped)
2187  qlt_unmap_sg(vha, cmd);
2188  spin_unlock_irqrestore(&ha->hardware_lock, flags);
2189 
2190  return res;
2191 }
2193 
2194 /* If hardware_lock held on entry, might drop it, then reaquire */
2195 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2196 static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2197  struct qla_tgt_cmd *cmd,
2198  struct atio_from_isp *atio)
2199 {
2200  struct ctio7_to_24xx *ctio24;
2201  struct qla_hw_data *ha = vha->hw;
2202  request_t *pkt;
2203  int ret = 0;
2204 
2205  ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
2206 
2207  pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
2208  if (pkt == NULL) {
2209  ql_dbg(ql_dbg_tgt, vha, 0xe050,
2210  "qla_target(%d): %s failed: unable to allocate "
2211  "request packet\n", vha->vp_idx, __func__);
2212  return -ENOMEM;
2213  }
2214 
2215  if (cmd != NULL) {
2216  if (cmd->state < QLA_TGT_STATE_PROCESSED) {
2217  ql_dbg(ql_dbg_tgt, vha, 0xe051,
2218  "qla_target(%d): Terminating cmd %p with "
2219  "incorrect state %d\n", vha->vp_idx, cmd,
2220  cmd->state);
2221  } else
2222  ret = 1;
2223  }
2224 
2225  pkt->entry_count = 1;
2227 
2228  ctio24 = (struct ctio7_to_24xx *)pkt;
2229  ctio24->entry_type = CTIO_TYPE7;
2230  ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
2232  ctio24->vp_index = vha->vp_idx;
2233  ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2234  ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2235  ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2236  ctio24->exchange_addr = atio->u.isp24.exchange_addr;
2237  ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
2240  ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
2241 
2242  /* Most likely, it isn't needed */
2243  ctio24->u.status1.residual = get_unaligned((uint32_t *)
2244  &atio->u.isp24.fcp_cmnd.add_cdb[
2245  atio->u.isp24.fcp_cmnd.add_cdb_len]);
2246  if (ctio24->u.status1.residual != 0)
2247  ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
2248 
2249  qla2x00_start_iocbs(vha, vha->req);
2250  return ret;
2251 }
2252 
2253 static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2254  struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
2255 {
2256  unsigned long flags;
2257  int rc;
2258 
2259  if (qlt_issue_marker(vha, ha_locked) < 0)
2260  return;
2261 
2262  if (ha_locked) {
2263  rc = __qlt_send_term_exchange(vha, cmd, atio);
2264  goto done;
2265  }
2266  spin_lock_irqsave(&vha->hw->hardware_lock, flags);
2267  rc = __qlt_send_term_exchange(vha, cmd, atio);
2268  spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2269 done:
2270  if (rc == 1) {
2271  if (!ha_locked && !in_interrupt())
2272  msleep(250); /* just in case */
2273 
2274  vha->hw->tgt.tgt_ops->free_cmd(cmd);
2275  }
2276 }
2277 
2278 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2279 {
2280  BUG_ON(cmd->sg_mapped);
2281 
2282  if (unlikely(cmd->free_sg))
2283  kfree(cmd->sg);
2284  kmem_cache_free(qla_tgt_cmd_cachep, cmd);
2285 }
2287 
2288 /* ha->hardware_lock supposed to be held on entry */
2289 static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
2290  struct qla_tgt_cmd *cmd, void *ctio)
2291 {
2292  struct qla_tgt_srr_ctio *sc;
2293  struct qla_hw_data *ha = vha->hw;
2294  struct qla_tgt *tgt = ha->tgt.qla_tgt;
2295  struct qla_tgt_srr_imm *imm;
2296 
2297  tgt->ctio_srr_id++;
2298 
2299  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
2300  "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
2301 
2302  if (!ctio) {
2303  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
2304  "qla_target(%d): SRR CTIO, but ctio is NULL\n",
2305  vha->vp_idx);
2306  return -EINVAL;
2307  }
2308 
2309  sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
2310  if (sc != NULL) {
2311  sc->cmd = cmd;
2312  /* IRQ is already OFF */
2313  spin_lock(&tgt->srr_lock);
2314  sc->srr_id = tgt->ctio_srr_id;
2316  &tgt->srr_ctio_list);
2317  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
2318  "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
2319  if (tgt->imm_srr_id == tgt->ctio_srr_id) {
2320  int found = 0;
2321  list_for_each_entry(imm, &tgt->srr_imm_list,
2322  srr_list_entry) {
2323  if (imm->srr_id == sc->srr_id) {
2324  found = 1;
2325  break;
2326  }
2327  }
2328  if (found) {
2329  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
2330  "Scheduling srr work\n");
2331  schedule_work(&tgt->srr_work);
2332  } else {
2333  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
2334  "qla_target(%d): imm_srr_id "
2335  "== ctio_srr_id (%d), but there is no "
2336  "corresponding SRR IMM, deleting CTIO "
2337  "SRR %p\n", vha->vp_idx,
2338  tgt->ctio_srr_id, sc);
2339  list_del(&sc->srr_list_entry);
2340  spin_unlock(&tgt->srr_lock);
2341 
2342  kfree(sc);
2343  return -EINVAL;
2344  }
2345  }
2346  spin_unlock(&tgt->srr_lock);
2347  } else {
2348  struct qla_tgt_srr_imm *ti;
2349 
2350  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
2351  "qla_target(%d): Unable to allocate SRR CTIO entry\n",
2352  vha->vp_idx);
2353  spin_lock(&tgt->srr_lock);
2354  list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
2355  srr_list_entry) {
2356  if (imm->srr_id == tgt->ctio_srr_id) {
2357  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
2358  "IMM SRR %p deleted (id %d)\n",
2359  imm, imm->srr_id);
2360  list_del(&imm->srr_list_entry);
2361  qlt_reject_free_srr_imm(vha, imm, 1);
2362  }
2363  }
2364  spin_unlock(&tgt->srr_lock);
2365 
2366  return -ENOMEM;
2367  }
2368 
2369  return 0;
2370 }
2371 
2372 /*
2373  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2374  */
2375 static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
2376  struct qla_tgt_cmd *cmd, uint32_t status)
2377 {
2378  int term = 0;
2379 
2380  if (ctio != NULL) {
2381  struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
2382  term = !(c->flags &
2384  } else
2385  term = 1;
2386 
2387  if (term)
2388  qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
2389 
2390  return term;
2391 }
2392 
2393 /* ha->hardware_lock supposed to be held on entry */
2394 static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
2395  uint32_t handle)
2396 {
2397  struct qla_hw_data *ha = vha->hw;
2398 
2399  handle--;
2400  if (ha->tgt.cmds[handle] != NULL) {
2401  struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
2402  ha->tgt.cmds[handle] = NULL;
2403  return cmd;
2404  } else
2405  return NULL;
2406 }
2407 
2408 /* ha->hardware_lock supposed to be held on entry */
2409 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
2410  uint32_t handle, void *ctio)
2411 {
2412  struct qla_tgt_cmd *cmd = NULL;
2413 
2414  /* Clear out internal marks */
2415  handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
2417 
2418  if (handle != QLA_TGT_NULL_HANDLE) {
2419  if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) {
2420  ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s",
2421  "SKIP_HANDLE CTIO\n");
2422  return NULL;
2423  }
2424  /* handle-1 is actually used */
2425  if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
2426  ql_dbg(ql_dbg_tgt, vha, 0xe052,
2427  "qla_target(%d): Wrong handle %x received\n",
2428  vha->vp_idx, handle);
2429  return NULL;
2430  }
2431  cmd = qlt_get_cmd(vha, handle);
2432  if (unlikely(cmd == NULL)) {
2433  ql_dbg(ql_dbg_tgt, vha, 0xe053,
2434  "qla_target(%d): Suspicious: unable to "
2435  "find the command with handle %x\n", vha->vp_idx,
2436  handle);
2437  return NULL;
2438  }
2439  } else if (ctio != NULL) {
2440  /* We can't get loop ID from CTIO7 */
2441  ql_dbg(ql_dbg_tgt, vha, 0xe054,
2442  "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
2443  "support NULL handles\n", vha->vp_idx);
2444  return NULL;
2445  }
2446 
2447  return cmd;
2448 }
2449 
2450 /*
2451  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2452  */
2453 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
2454  uint32_t status, void *ctio)
2455 {
2456  struct qla_hw_data *ha = vha->hw;
2457  struct se_cmd *se_cmd;
2458  struct target_core_fabric_ops *tfo;
2459  struct qla_tgt_cmd *cmd;
2460 
2461  ql_dbg(ql_dbg_tgt, vha, 0xe01e,
2462  "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
2463  vha->vp_idx, ctio, status, handle);
2464 
2465  if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
2466  /* That could happen only in case of an error/reset/abort */
2467  if (status != CTIO_SUCCESS) {
2468  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
2469  "Intermediate CTIO received"
2470  " (status %x)\n", status);
2471  }
2472  return;
2473  }
2474 
2475  cmd = qlt_ctio_to_cmd(vha, handle, ctio);
2476  if (cmd == NULL)
2477  return;
2478 
2479  se_cmd = &cmd->se_cmd;
2480  tfo = se_cmd->se_tfo;
2481 
2482  if (cmd->sg_mapped)
2483  qlt_unmap_sg(vha, cmd);
2484 
2485  if (unlikely(status != CTIO_SUCCESS)) {
2486  switch (status & 0xFFFF) {
2487  case CTIO_LIP_RESET:
2488  case CTIO_TARGET_RESET:
2489  case CTIO_ABORTED:
2490  case CTIO_TIMEOUT:
2491  case CTIO_INVALID_RX_ID:
2492  /* They are OK */
2493  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
2494  "qla_target(%d): CTIO with "
2495  "status %#x received, state %x, se_cmd %p, "
2496  "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
2497  "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
2498  status, cmd->state, se_cmd);
2499  break;
2500 
2501  case CTIO_PORT_LOGGED_OUT:
2502  case CTIO_PORT_UNAVAILABLE:
2503  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
2504  "qla_target(%d): CTIO with PORT LOGGED "
2505  "OUT (29) or PORT UNAVAILABLE (28) status %x "
2506  "received (state %x, se_cmd %p)\n", vha->vp_idx,
2507  status, cmd->state, se_cmd);
2508  break;
2509 
2510  case CTIO_SRR_RECEIVED:
2511  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
2512  "qla_target(%d): CTIO with SRR_RECEIVED"
2513  " status %x received (state %x, se_cmd %p)\n",
2514  vha->vp_idx, status, cmd->state, se_cmd);
2515  if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
2516  break;
2517  else
2518  return;
2519 
2520  default:
2521  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
2522  "qla_target(%d): CTIO with error status "
2523  "0x%x received (state %x, se_cmd %p\n",
2524  vha->vp_idx, status, cmd->state, se_cmd);
2525  break;
2526  }
2527 
2528  if (cmd->state != QLA_TGT_STATE_NEED_DATA)
2529  if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
2530  return;
2531  }
2532 
2533  if (cmd->state == QLA_TGT_STATE_PROCESSED) {
2534  ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
2535  } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
2536  int rx_status = 0;
2537 
2539 
2540  if (unlikely(status != CTIO_SUCCESS))
2541  rx_status = -EIO;
2542  else
2543  cmd->write_data_transferred = 1;
2544 
2545  ql_dbg(ql_dbg_tgt, vha, 0xe020,
2546  "Data received, context %x, rx_status %d\n",
2547  0x0, rx_status);
2548 
2549  ha->tgt.tgt_ops->handle_data(cmd);
2550  return;
2551  } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
2552  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
2553  "Aborted command %p (tag %d) finished\n", cmd, cmd->tag);
2554  } else {
2555  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
2556  "qla_target(%d): A command in state (%d) should "
2557  "not return a CTIO complete\n", vha->vp_idx, cmd->state);
2558  }
2559 
2560  if (unlikely(status != CTIO_SUCCESS)) {
2561  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
2562  dump_stack();
2563  }
2564 
2565  ha->tgt.tgt_ops->free_cmd(cmd);
2566 }
2567 
2568 /* ha->hardware_lock supposed to be held on entry */
2569 /* called via callback from qla2xxx */
2570 void qlt_ctio_completion(struct scsi_qla_host *vha, uint32_t handle)
2571 {
2572  struct qla_hw_data *ha = vha->hw;
2573  struct qla_tgt *tgt = ha->tgt.qla_tgt;
2574 
2575  if (likely(tgt == NULL)) {
2576  ql_dbg(ql_dbg_tgt, vha, 0xe021,
2577  "CTIO, but target mode not enabled"
2578  " (ha %d %p handle %#x)", vha->vp_idx, ha, handle);
2579  return;
2580  }
2581 
2582  tgt->irq_cmd_count++;
2583  qlt_do_ctio_completion(vha, handle, CTIO_SUCCESS, NULL);
2584  tgt->irq_cmd_count--;
2585 }
2586 
2587 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
2588  uint8_t task_codes)
2589 {
2590  int fcp_task_attr;
2591 
2592  switch (task_codes) {
2593  case ATIO_SIMPLE_QUEUE:
2594  fcp_task_attr = MSG_SIMPLE_TAG;
2595  break;
2596  case ATIO_HEAD_OF_QUEUE:
2597  fcp_task_attr = MSG_HEAD_TAG;
2598  break;
2599  case ATIO_ORDERED_QUEUE:
2600  fcp_task_attr = MSG_ORDERED_TAG;
2601  break;
2602  case ATIO_ACA_QUEUE:
2603  fcp_task_attr = MSG_ACA_TAG;
2604  break;
2605  case ATIO_UNTAGGED:
2606  fcp_task_attr = MSG_SIMPLE_TAG;
2607  break;
2608  default:
2609  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
2610  "qla_target: unknown task code %x, use ORDERED instead\n",
2611  task_codes);
2612  fcp_task_attr = MSG_ORDERED_TAG;
2613  break;
2614  }
2615 
2616  return fcp_task_attr;
2617 }
2618 
2619 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
2620  uint8_t *);
2621 /*
2622  * Process context for I/O path into tcm_qla2xxx code
2623  */
2624 static void qlt_do_work(struct work_struct *work)
2625 {
2626  struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
2627  scsi_qla_host_t *vha = cmd->vha;
2628  struct qla_hw_data *ha = vha->hw;
2629  struct qla_tgt *tgt = ha->tgt.qla_tgt;
2630  struct qla_tgt_sess *sess = NULL;
2631  struct atio_from_isp *atio = &cmd->atio;
2632  unsigned char *cdb;
2633  unsigned long flags;
2635  int ret, fcp_task_attr, data_dir, bidi = 0;
2636 
2637  if (tgt->tgt_stop)
2638  goto out_term;
2639 
2640  spin_lock_irqsave(&ha->hardware_lock, flags);
2641  sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
2642  atio->u.isp24.fcp_hdr.s_id);
2643  /* Do kref_get() before dropping qla_hw_data->hardware_lock. */
2644  if (sess)
2645  kref_get(&sess->se_sess->sess_kref);
2646  spin_unlock_irqrestore(&ha->hardware_lock, flags);
2647 
2648  if (unlikely(!sess)) {
2649  uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id;
2650 
2651  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
2652  "qla_target(%d): Unable to find wwn login"
2653  " (s_id %x:%x:%x), trying to create it manually\n",
2654  vha->vp_idx, s_id[0], s_id[1], s_id[2]);
2655 
2656  if (atio->u.raw.entry_count > 1) {
2657  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
2658  "Dropping multy entry cmd %p\n", cmd);
2659  goto out_term;
2660  }
2661 
2662  mutex_lock(&ha->tgt.tgt_mutex);
2663  sess = qlt_make_local_sess(vha, s_id);
2664  /* sess has an extra creation ref. */
2665  mutex_unlock(&ha->tgt.tgt_mutex);
2666 
2667  if (!sess)
2668  goto out_term;
2669  }
2670 
2671  cmd->sess = sess;
2672  cmd->loop_id = sess->loop_id;
2674 
2675  cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
2676  cmd->tag = atio->u.isp24.exchange_addr;
2678  (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
2679 
2680  if (atio->u.isp24.fcp_cmnd.rddata &&
2681  atio->u.isp24.fcp_cmnd.wrdata) {
2682  bidi = 1;
2683  data_dir = DMA_TO_DEVICE;
2684  } else if (atio->u.isp24.fcp_cmnd.rddata)
2685  data_dir = DMA_FROM_DEVICE;
2686  else if (atio->u.isp24.fcp_cmnd.wrdata)
2687  data_dir = DMA_TO_DEVICE;
2688  else
2689  data_dir = DMA_NONE;
2690 
2691  fcp_task_attr = qlt_get_fcp_task_attr(vha,
2692  atio->u.isp24.fcp_cmnd.task_attr);
2693  data_length = be32_to_cpu(get_unaligned((uint32_t *)
2694  &atio->u.isp24.fcp_cmnd.add_cdb[
2695  atio->u.isp24.fcp_cmnd.add_cdb_len]));
2696 
2697  ql_dbg(ql_dbg_tgt, vha, 0xe022,
2698  "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
2699  cmd, cmd->unpacked_lun, cmd->tag);
2700 
2701  ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
2702  fcp_task_attr, data_dir, bidi);
2703  if (ret != 0)
2704  goto out_term;
2705  /*
2706  * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
2707  */
2708  ha->tgt.tgt_ops->put_sess(sess);
2709  return;
2710 
2711 out_term:
2712  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
2713  /*
2714  * cmd has not sent to target yet, so pass NULL as the second
2715  * argument to qlt_send_term_exchange() and free the memory here.
2716  */
2717  spin_lock_irqsave(&ha->hardware_lock, flags);
2718  qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
2719  kmem_cache_free(qla_tgt_cmd_cachep, cmd);
2720  spin_unlock_irqrestore(&ha->hardware_lock, flags);
2721  if (sess)
2722  ha->tgt.tgt_ops->put_sess(sess);
2723 }
2724 
2725 /* ha->hardware_lock supposed to be held on entry */
2726 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
2727  struct atio_from_isp *atio)
2728 {
2729  struct qla_hw_data *ha = vha->hw;
2730  struct qla_tgt *tgt = ha->tgt.qla_tgt;
2731  struct qla_tgt_cmd *cmd;
2732 
2733  if (unlikely(tgt->tgt_stop)) {
2734  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
2735  "New command while device %p is shutting down\n", tgt);
2736  return -EFAULT;
2737  }
2738 
2739  cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC);
2740  if (!cmd) {
2741  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
2742  "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
2743  return -ENOMEM;
2744  }
2745 
2746  INIT_LIST_HEAD(&cmd->cmd_list);
2747 
2748  memcpy(&cmd->atio, atio, sizeof(*atio));
2749  cmd->state = QLA_TGT_STATE_NEW;
2750  cmd->tgt = ha->tgt.qla_tgt;
2751  cmd->vha = vha;
2752 
2753  INIT_WORK(&cmd->work, qlt_do_work);
2754  queue_work(qla_tgt_wq, &cmd->work);
2755  return 0;
2756 
2757 }
2758 
2759 /* ha->hardware_lock supposed to be held on entry */
2760 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
2761  int fn, void *iocb, int flags)
2762 {
2763  struct scsi_qla_host *vha = sess->vha;
2764  struct qla_hw_data *ha = vha->hw;
2765  struct qla_tgt_mgmt_cmd *mcmd;
2766  int res;
2767  uint8_t tmr_func;
2768 
2769  mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2770  if (!mcmd) {
2771  ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
2772  "qla_target(%d): Allocation of management "
2773  "command failed, some commands and their data could "
2774  "leak\n", vha->vp_idx);
2775  return -ENOMEM;
2776  }
2777  memset(mcmd, 0, sizeof(*mcmd));
2778  mcmd->sess = sess;
2779 
2780  if (iocb) {
2781  memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
2782  sizeof(mcmd->orig_iocb.imm_ntfy));
2783  }
2784  mcmd->tmr_func = fn;
2785  mcmd->flags = flags;
2786 
2787  switch (fn) {
2788  case QLA_TGT_CLEAR_ACA:
2789  ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
2790  "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
2791  tmr_func = TMR_CLEAR_ACA;
2792  break;
2793 
2794  case QLA_TGT_TARGET_RESET:
2795  ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
2796  "qla_target(%d): TARGET_RESET received\n",
2797  sess->vha->vp_idx);
2798  tmr_func = TMR_TARGET_WARM_RESET;
2799  break;
2800 
2801  case QLA_TGT_LUN_RESET:
2802  ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
2803  "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
2804  tmr_func = TMR_LUN_RESET;
2805  break;
2806 
2807  case QLA_TGT_CLEAR_TS:
2808  ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
2809  "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
2810  tmr_func = TMR_CLEAR_TASK_SET;
2811  break;
2812 
2813  case QLA_TGT_ABORT_TS:
2814  ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
2815  "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
2816  tmr_func = TMR_ABORT_TASK_SET;
2817  break;
2818 #if 0
2819  case QLA_TGT_ABORT_ALL:
2820  ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
2821  "qla_target(%d): Doing ABORT_ALL_TASKS\n",
2822  sess->vha->vp_idx);
2823  tmr_func = 0;
2824  break;
2825 
2827  ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
2828  "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
2829  sess->vha->vp_idx);
2830  tmr_func = 0;
2831  break;
2832 
2834  ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
2835  "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
2836  sess->vha->vp_idx);
2837  tmr_func = 0;
2838  break;
2839 
2840  case QLA_TGT_NEXUS_LOSS:
2841  ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
2842  "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
2843  tmr_func = 0;
2844  break;
2845 #endif
2846  default:
2847  ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
2848  "qla_target(%d): Unknown task mgmt fn 0x%x\n",
2849  sess->vha->vp_idx, fn);
2850  mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2851  return -ENOSYS;
2852  }
2853 
2854  res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
2855  if (res != 0) {
2856  ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
2857  "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
2858  sess->vha->vp_idx, res);
2859  mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2860  return -EFAULT;
2861  }
2862 
2863  return 0;
2864 }
2865 
2866 /* ha->hardware_lock supposed to be held on entry */
2867 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
2868 {
2869  struct atio_from_isp *a = (struct atio_from_isp *)iocb;
2870  struct qla_hw_data *ha = vha->hw;
2871  struct qla_tgt *tgt;
2872  struct qla_tgt_sess *sess;
2873  uint32_t lun, unpacked_lun;
2874  int lun_size, fn;
2875 
2876  tgt = ha->tgt.qla_tgt;
2877 
2878  lun = a->u.isp24.fcp_cmnd.lun;
2879  lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
2880  fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
2881  sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
2882  a->u.isp24.fcp_hdr.s_id);
2883  unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
2884 
2885  if (!sess) {
2886  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
2887  "qla_target(%d): task mgmt fn 0x%x for "
2888  "non-existant session\n", vha->vp_idx, fn);
2889  return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
2890  sizeof(struct atio_from_isp));
2891  }
2892 
2893  return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
2894 }
2895 
2896 /* ha->hardware_lock supposed to be held on entry */
2897 static int __qlt_abort_task(struct scsi_qla_host *vha,
2898  struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
2899 {
2900  struct atio_from_isp *a = (struct atio_from_isp *)iocb;
2901  struct qla_hw_data *ha = vha->hw;
2902  struct qla_tgt_mgmt_cmd *mcmd;
2903  uint32_t lun, unpacked_lun;
2904  int rc;
2905 
2906  mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2907  if (mcmd == NULL) {
2908  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
2909  "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
2910  vha->vp_idx, __func__);
2911  return -ENOMEM;
2912  }
2913  memset(mcmd, 0, sizeof(*mcmd));
2914 
2915  mcmd->sess = sess;
2916  memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
2917  sizeof(mcmd->orig_iocb.imm_ntfy));
2918 
2919  lun = a->u.isp24.fcp_cmnd.lun;
2920  unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
2921 
2922  rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
2923  le16_to_cpu(iocb->u.isp2x.seq_id));
2924  if (rc != 0) {
2925  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
2926  "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
2927  vha->vp_idx, rc);
2928  mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2929  return -EFAULT;
2930  }
2931 
2932  return 0;
2933 }
2934 
2935 /* ha->hardware_lock supposed to be held on entry */
2936 static int qlt_abort_task(struct scsi_qla_host *vha,
2937  struct imm_ntfy_from_isp *iocb)
2938 {
2939  struct qla_hw_data *ha = vha->hw;
2940  struct qla_tgt_sess *sess;
2941  int loop_id;
2942 
2943  loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
2944 
2945  sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
2946  if (sess == NULL) {
2947  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
2948  "qla_target(%d): task abort for unexisting "
2949  "session\n", vha->vp_idx);
2950  return qlt_sched_sess_work(ha->tgt.qla_tgt,
2951  QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
2952  }
2953 
2954  return __qlt_abort_task(vha, iocb, sess);
2955 }
2956 
2957 /*
2958  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2959  */
2960 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
2961  struct imm_ntfy_from_isp *iocb)
2962 {
2963  struct qla_hw_data *ha = vha->hw;
2964  int res = 0;
2965 
2966  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
2967  "qla_target(%d): Port ID: 0x%02x:%02x:%02x"
2968  " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0],
2969  iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2],
2970  iocb->u.isp24.status_subcode);
2971 
2972  switch (iocb->u.isp24.status_subcode) {
2973  case ELS_PLOGI:
2974  case ELS_FLOGI:
2975  case ELS_PRLI:
2976  case ELS_LOGO:
2977  case ELS_PRLO:
2978  res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
2979  break;
2980  case ELS_PDISC:
2981  case ELS_ADISC:
2982  {
2983  struct qla_tgt *tgt = ha->tgt.qla_tgt;
2984  if (tgt->link_reinit_iocb_pending) {
2985  qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
2986  0, 0, 0, 0, 0, 0);
2987  tgt->link_reinit_iocb_pending = 0;
2988  }
2989  res = 1; /* send notify ack */
2990  break;
2991  }
2992 
2993  default:
2994  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
2995  "qla_target(%d): Unsupported ELS command %x "
2996  "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
2997  res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
2998  break;
2999  }
3000 
3001  return res;
3002 }
3003 
3004 static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
3005 {
3006  struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
3007  size_t first_offset = 0, rem_offset = offset, tmp = 0;
3008  int i, sg_srr_cnt, bufflen = 0;
3009 
3010  ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
3011  "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
3012  "cmd->sg_cnt: %u, direction: %d\n",
3013  cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
3014 
3015  /*
3016  * FIXME: Reject non zero SRR relative offset until we can test
3017  * this code properly.
3018  */
3019  pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
3020  return -1;
3021 
3022  if (!cmd->sg || !cmd->sg_cnt) {
3023  ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
3024  "Missing cmd->sg or zero cmd->sg_cnt in"
3025  " qla_tgt_set_data_offset\n");
3026  return -EINVAL;
3027  }
3028  /*
3029  * Walk the current cmd->sg list until we locate the new sg_srr_start
3030  */
3031  for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
3032  ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
3033  "sg[%d]: %p page: %p, length: %d, offset: %d\n",
3034  i, sg, sg_page(sg), sg->length, sg->offset);
3035 
3036  if ((sg->length + tmp) > offset) {
3037  first_offset = rem_offset;
3038  sg_srr_start = sg;
3039  ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
3040  "Found matching sg[%d], using %p as sg_srr_start, "
3041  "and using first_offset: %zu\n", i, sg,
3042  first_offset);
3043  break;
3044  }
3045  tmp += sg->length;
3046  rem_offset -= sg->length;
3047  }
3048 
3049  if (!sg_srr_start) {
3050  ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
3051  "Unable to locate sg_srr_start for offset: %u\n", offset);
3052  return -EINVAL;
3053  }
3054  sg_srr_cnt = (cmd->sg_cnt - i);
3055 
3056  sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
3057  if (!sg_srr) {
3058  ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
3059  "Unable to allocate sgp\n");
3060  return -ENOMEM;
3061  }
3062  sg_init_table(sg_srr, sg_srr_cnt);
3063  sgp = &sg_srr[0];
3064  /*
3065  * Walk the remaining list for sg_srr_start, mapping to the newly
3066  * allocated sg_srr taking first_offset into account.
3067  */
3068  for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
3069  if (first_offset) {
3070  sg_set_page(sgp, sg_page(sg),
3071  (sg->length - first_offset), first_offset);
3072  first_offset = 0;
3073  } else {
3074  sg_set_page(sgp, sg_page(sg), sg->length, 0);
3075  }
3076  bufflen += sgp->length;
3077 
3078  sgp = sg_next(sgp);
3079  if (!sgp)
3080  break;
3081  }
3082 
3083  cmd->sg = sg_srr;
3084  cmd->sg_cnt = sg_srr_cnt;
3085  cmd->bufflen = bufflen;
3086  cmd->offset += offset;
3087  cmd->free_sg = 1;
3088 
3089  ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
3090  ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
3091  cmd->sg_cnt);
3092  ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
3093  cmd->bufflen);
3094  ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
3095  cmd->offset);
3096 
3097  if (cmd->sg_cnt < 0)
3098  BUG();
3099 
3100  if (cmd->bufflen < 0)
3101  BUG();
3102 
3103  return 0;
3104 }
3105 
3106 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
3107  uint32_t srr_rel_offs, int *xmit_type)
3108 {
3109  int res = 0, rel_offs;
3110 
3111  rel_offs = srr_rel_offs - cmd->offset;
3112  ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
3113  srr_rel_offs, rel_offs);
3114 
3115  *xmit_type = QLA_TGT_XMIT_ALL;
3116 
3117  if (rel_offs < 0) {
3118  ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
3119  "qla_target(%d): SRR rel_offs (%d) < 0",
3120  cmd->vha->vp_idx, rel_offs);
3121  res = -1;
3122  } else if (rel_offs == cmd->bufflen)
3123  *xmit_type = QLA_TGT_XMIT_STATUS;
3124  else if (rel_offs > 0)
3125  res = qlt_set_data_offset(cmd, rel_offs);
3126 
3127  return res;
3128 }
3129 
3130 /* No locks, thread context */
3131 static void qlt_handle_srr(struct scsi_qla_host *vha,
3132  struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
3133 {
3134  struct imm_ntfy_from_isp *ntfy =
3135  (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
3136  struct qla_hw_data *ha = vha->hw;
3137  struct qla_tgt_cmd *cmd = sctio->cmd;
3138  struct se_cmd *se_cmd = &cmd->se_cmd;
3139  unsigned long flags;
3140  int xmit_type = 0, resp = 0;
3141  uint32_t offset;
3142  uint16_t srr_ui;
3143 
3144  offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
3145  srr_ui = ntfy->u.isp24.srr_ui;
3146 
3147  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
3148  cmd, srr_ui);
3149 
3150  switch (srr_ui) {
3151  case SRR_IU_STATUS:
3152  spin_lock_irqsave(&ha->hardware_lock, flags);
3153  qlt_send_notify_ack(vha, ntfy,
3154  0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3155  spin_unlock_irqrestore(&ha->hardware_lock, flags);
3156  xmit_type = QLA_TGT_XMIT_STATUS;
3157  resp = 1;
3158  break;
3159  case SRR_IU_DATA_IN:
3160  if (!cmd->sg || !cmd->sg_cnt) {
3161  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
3162  "Unable to process SRR_IU_DATA_IN due to"
3163  " missing cmd->sg, state: %d\n", cmd->state);
3164  dump_stack();
3165  goto out_reject;
3166  }
3167  if (se_cmd->scsi_status != 0) {
3168  ql_dbg(ql_dbg_tgt, vha, 0xe02a,
3169  "Rejecting SRR_IU_DATA_IN with non GOOD "
3170  "scsi_status\n");
3171  goto out_reject;
3172  }
3173  cmd->bufflen = se_cmd->data_length;
3174 
3175  if (qlt_has_data(cmd)) {
3176  if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
3177  goto out_reject;
3178  spin_lock_irqsave(&ha->hardware_lock, flags);
3179  qlt_send_notify_ack(vha, ntfy,
3180  0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3181  spin_unlock_irqrestore(&ha->hardware_lock, flags);
3182  resp = 1;
3183  } else {
3184  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
3185  "qla_target(%d): SRR for in data for cmd "
3186  "without them (tag %d, SCSI status %d), "
3187  "reject", vha->vp_idx, cmd->tag,
3188  cmd->se_cmd.scsi_status);
3189  goto out_reject;
3190  }
3191  break;
3192  case SRR_IU_DATA_OUT:
3193  if (!cmd->sg || !cmd->sg_cnt) {
3194  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
3195  "Unable to process SRR_IU_DATA_OUT due to"
3196  " missing cmd->sg\n");
3197  dump_stack();
3198  goto out_reject;
3199  }
3200  if (se_cmd->scsi_status != 0) {
3201  ql_dbg(ql_dbg_tgt, vha, 0xe02b,
3202  "Rejecting SRR_IU_DATA_OUT"
3203  " with non GOOD scsi_status\n");
3204  goto out_reject;
3205  }
3206  cmd->bufflen = se_cmd->data_length;
3207 
3208  if (qlt_has_data(cmd)) {
3209  if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
3210  goto out_reject;
3211  spin_lock_irqsave(&ha->hardware_lock, flags);
3212  qlt_send_notify_ack(vha, ntfy,
3213  0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3214  spin_unlock_irqrestore(&ha->hardware_lock, flags);
3215  if (xmit_type & QLA_TGT_XMIT_DATA)
3216  qlt_rdy_to_xfer(cmd);
3217  } else {
3218  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
3219  "qla_target(%d): SRR for out data for cmd "
3220  "without them (tag %d, SCSI status %d), "
3221  "reject", vha->vp_idx, cmd->tag,
3222  cmd->se_cmd.scsi_status);
3223  goto out_reject;
3224  }
3225  break;
3226  default:
3227  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
3228  "qla_target(%d): Unknown srr_ui value %x",
3229  vha->vp_idx, srr_ui);
3230  goto out_reject;
3231  }
3232 
3233  /* Transmit response in case of status and data-in cases */
3234  if (resp)
3235  qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
3236 
3237  return;
3238 
3239 out_reject:
3240  spin_lock_irqsave(&ha->hardware_lock, flags);
3241  qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
3245  if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3247  dump_stack();
3248  } else
3249  qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
3250  spin_unlock_irqrestore(&ha->hardware_lock, flags);
3251 }
3252 
3253 static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
3254  struct qla_tgt_srr_imm *imm, int ha_locked)
3255 {
3256  struct qla_hw_data *ha = vha->hw;
3257  unsigned long flags = 0;
3258 
3259  if (!ha_locked)
3260  spin_lock_irqsave(&ha->hardware_lock, flags);
3261 
3262  qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
3266 
3267  if (!ha_locked)
3268  spin_unlock_irqrestore(&ha->hardware_lock, flags);
3269 
3270  kfree(imm);
3271 }
3272 
3273 static void qlt_handle_srr_work(struct work_struct *work)
3274 {
3275  struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
3276  struct scsi_qla_host *vha = tgt->vha;
3277  struct qla_tgt_srr_ctio *sctio;
3278  unsigned long flags;
3279 
3280  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
3281  tgt);
3282 
3283 restart:
3284  spin_lock_irqsave(&tgt->srr_lock, flags);
3286  struct qla_tgt_srr_imm *imm, *i, *ti;
3287  struct qla_tgt_cmd *cmd;
3288  struct se_cmd *se_cmd;
3289 
3290  imm = NULL;
3292  srr_list_entry) {
3293  if (i->srr_id == sctio->srr_id) {
3294  list_del(&i->srr_list_entry);
3295  if (imm) {
3296  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
3297  "qla_target(%d): There must be "
3298  "only one IMM SRR per CTIO SRR "
3299  "(IMM SRR %p, id %d, CTIO %p\n",
3300  vha->vp_idx, i, i->srr_id, sctio);
3301  qlt_reject_free_srr_imm(tgt->vha, i, 0);
3302  } else
3303  imm = i;
3304  }
3305  }
3306 
3307  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
3308  "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
3309  sctio->srr_id);
3310 
3311  if (imm == NULL) {
3312  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
3313  "Not found matching IMM for SRR CTIO (id %d)\n",
3314  sctio->srr_id);
3315  continue;
3316  } else
3317  list_del(&sctio->srr_list_entry);
3318 
3319  spin_unlock_irqrestore(&tgt->srr_lock, flags);
3320 
3321  cmd = sctio->cmd;
3322  /*
3323  * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
3324  * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
3325  * logic..
3326  */
3327  cmd->offset = 0;
3328  if (cmd->free_sg) {
3329  kfree(cmd->sg);
3330  cmd->sg = NULL;
3331  cmd->free_sg = 0;
3332  }
3333  se_cmd = &cmd->se_cmd;
3334 
3335  cmd->sg_cnt = se_cmd->t_data_nents;
3336  cmd->sg = se_cmd->t_data_sg;
3337 
3338  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
3339  "SRR cmd %p (se_cmd %p, tag %d, op %x), "
3340  "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
3341  se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
3342 
3343  qlt_handle_srr(vha, sctio, imm);
3344 
3345  kfree(imm);
3346  kfree(sctio);
3347  goto restart;
3348  }
3349  spin_unlock_irqrestore(&tgt->srr_lock, flags);
3350 }
3351 
3352 /* ha->hardware_lock supposed to be held on entry */
3353 static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
3354  struct imm_ntfy_from_isp *iocb)
3355 {
3356  struct qla_tgt_srr_imm *imm;
3357  struct qla_hw_data *ha = vha->hw;
3358  struct qla_tgt *tgt = ha->tgt.qla_tgt;
3359  struct qla_tgt_srr_ctio *sctio;
3360 
3361  tgt->imm_srr_id++;
3362 
3363  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n",
3364  vha->vp_idx);
3365 
3366  imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
3367  if (imm != NULL) {
3368  memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
3369 
3370  /* IRQ is already OFF */
3371  spin_lock(&tgt->srr_lock);
3372  imm->srr_id = tgt->imm_srr_id;
3374  &tgt->srr_imm_list);
3375  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
3376  "IMM NTFY SRR %p added (id %d, ui %x)\n",
3377  imm, imm->srr_id, iocb->u.isp24.srr_ui);
3378  if (tgt->imm_srr_id == tgt->ctio_srr_id) {
3379  int found = 0;
3380  list_for_each_entry(sctio, &tgt->srr_ctio_list,
3381  srr_list_entry) {
3382  if (sctio->srr_id == imm->srr_id) {
3383  found = 1;
3384  break;
3385  }
3386  }
3387  if (found) {
3388  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
3389  "Scheduling srr work\n");
3390  schedule_work(&tgt->srr_work);
3391  } else {
3392  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
3393  "qla_target(%d): imm_srr_id "
3394  "== ctio_srr_id (%d), but there is no "
3395  "corresponding SRR CTIO, deleting IMM "
3396  "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
3397  imm);
3398  list_del(&imm->srr_list_entry);
3399 
3400  kfree(imm);
3401 
3402  spin_unlock(&tgt->srr_lock);
3403  goto out_reject;
3404  }
3405  }
3406  spin_unlock(&tgt->srr_lock);
3407  } else {
3408  struct qla_tgt_srr_ctio *ts;
3409 
3410  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
3411  "qla_target(%d): Unable to allocate SRR IMM "
3412  "entry, SRR request will be rejected\n", vha->vp_idx);
3413 
3414  /* IRQ is already OFF */
3415  spin_lock(&tgt->srr_lock);
3416  list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
3417  srr_list_entry) {
3418  if (sctio->srr_id == tgt->imm_srr_id) {
3419  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
3420  "CTIO SRR %p deleted (id %d)\n",
3421  sctio, sctio->srr_id);
3422  list_del(&sctio->srr_list_entry);
3423  qlt_send_term_exchange(vha, sctio->cmd,
3424  &sctio->cmd->atio, 1);
3425  kfree(sctio);
3426  }
3427  }
3428  spin_unlock(&tgt->srr_lock);
3429  goto out_reject;
3430  }
3431 
3432  return;
3433 
3434 out_reject:
3435  qlt_send_notify_ack(vha, iocb, 0, 0, 0,
3439 }
3440 
3441 /*
3442  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3443  */
3444 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
3445  struct imm_ntfy_from_isp *iocb)
3446 {
3447  struct qla_hw_data *ha = vha->hw;
3448  uint32_t add_flags = 0;
3449  int send_notify_ack = 1;
3450  uint16_t status;
3451 
3452  status = le16_to_cpu(iocb->u.isp2x.status);
3453  switch (status) {
3454  case IMM_NTFY_LIP_RESET:
3455  {
3456  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
3457  "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
3458  vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
3459  iocb->u.isp24.status_subcode);
3460 
3461  if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
3462  send_notify_ack = 0;
3463  break;
3464  }
3465 
3467  {
3468  struct qla_tgt *tgt = ha->tgt.qla_tgt;
3469  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
3470  "qla_target(%d): LINK REINIT (loop %#x, "
3471  "subcode %x)\n", vha->vp_idx,
3472  le16_to_cpu(iocb->u.isp24.nport_handle),
3473  iocb->u.isp24.status_subcode);
3474  if (tgt->link_reinit_iocb_pending) {
3475  qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
3476  0, 0, 0, 0, 0, 0);
3477  }
3478  memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
3479  tgt->link_reinit_iocb_pending = 1;
3480  /*
3481  * QLogic requires to wait after LINK REINIT for possible
3482  * PDISC or ADISC ELS commands
3483  */
3484  send_notify_ack = 0;
3485  break;
3486  }
3487 
3488  case IMM_NTFY_PORT_LOGOUT:
3489  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
3490  "qla_target(%d): Port logout (loop "
3491  "%#x, subcode %x)\n", vha->vp_idx,
3492  le16_to_cpu(iocb->u.isp24.nport_handle),
3493  iocb->u.isp24.status_subcode);
3494 
3495  if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
3496  send_notify_ack = 0;
3497  /* The sessions will be cleared in the callback, if needed */
3498  break;
3499 
3500  case IMM_NTFY_GLBL_TPRLO:
3501  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
3502  "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
3503  if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
3504  send_notify_ack = 0;
3505  /* The sessions will be cleared in the callback, if needed */
3506  break;
3507 
3508  case IMM_NTFY_PORT_CONFIG:
3509  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
3510  "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
3511  status);
3512  if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
3513  send_notify_ack = 0;
3514  /* The sessions will be cleared in the callback, if needed */
3515  break;
3516 
3517  case IMM_NTFY_GLBL_LOGO:
3518  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
3519  "qla_target(%d): Link failure detected\n",
3520  vha->vp_idx);
3521  /* I_T nexus loss */
3522  if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
3523  send_notify_ack = 0;
3524  break;
3525 
3527  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
3528  "qla_target(%d): Cannot provide requested "
3529  "capability (IOCB overflowed the immediate notify "
3530  "resource count)\n", vha->vp_idx);
3531  break;
3532 
3533  case IMM_NTFY_ABORT_TASK:
3534  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
3535  "qla_target(%d): Abort Task (S %08x I %#x -> "
3536  "L %#x)\n", vha->vp_idx,
3537  le16_to_cpu(iocb->u.isp2x.seq_id),
3538  GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
3539  le16_to_cpu(iocb->u.isp2x.lun));
3540  if (qlt_abort_task(vha, iocb) == 0)
3541  send_notify_ack = 0;
3542  break;
3543 
3544  case IMM_NTFY_RESOURCE:
3545  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
3546  "qla_target(%d): Out of resources, host %ld\n",
3547  vha->vp_idx, vha->host_no);
3548  break;
3549 
3550  case IMM_NTFY_MSG_RX:
3551  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
3552  "qla_target(%d): Immediate notify task %x\n",
3553  vha->vp_idx, iocb->u.isp2x.task_flags);
3554  if (qlt_handle_task_mgmt(vha, iocb) == 0)
3555  send_notify_ack = 0;
3556  break;
3557 
3558  case IMM_NTFY_ELS:
3559  if (qlt_24xx_handle_els(vha, iocb) == 0)
3560  send_notify_ack = 0;
3561  break;
3562 
3563  case IMM_NTFY_SRR:
3564  qlt_prepare_srr_imm(vha, iocb);
3565  send_notify_ack = 0;
3566  break;
3567 
3568  default:
3569  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
3570  "qla_target(%d): Received unknown immediate "
3571  "notify status %x\n", vha->vp_idx, status);
3572  break;
3573  }
3574 
3575  if (send_notify_ack)
3576  qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
3577 }
3578 
3579 /*
3580  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3581  * This function sends busy to ISP 2xxx or 24xx.
3582  */
3583 static void qlt_send_busy(struct scsi_qla_host *vha,
3584  struct atio_from_isp *atio, uint16_t status)
3585 {
3586  struct ctio7_to_24xx *ctio24;
3587  struct qla_hw_data *ha = vha->hw;
3588  request_t *pkt;
3589  struct qla_tgt_sess *sess = NULL;
3590 
3591  sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
3592  atio->u.isp24.fcp_hdr.s_id);
3593  if (!sess) {
3594  qlt_send_term_exchange(vha, NULL, atio, 1);
3595  return;
3596  }
3597  /* Sending marker isn't necessary, since we called from ISR */
3598 
3599  pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3600  if (!pkt) {
3601  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e,
3602  "qla_target(%d): %s failed: unable to allocate "
3603  "request packet", vha->vp_idx, __func__);
3604  return;
3605  }
3606 
3607  pkt->entry_count = 1;
3609 
3610  ctio24 = (struct ctio7_to_24xx *)pkt;
3611  ctio24->entry_type = CTIO_TYPE7;
3612  ctio24->nport_handle = sess->loop_id;
3614  ctio24->vp_index = vha->vp_idx;
3615  ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
3616  ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
3617  ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
3618  ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3619  ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
3623  /*
3624  * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
3625  * if the explicit conformation is used.
3626  */
3627  ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
3628  ctio24->u.status1.scsi_status = cpu_to_le16(status);
3629  ctio24->u.status1.residual = get_unaligned((uint32_t *)
3630  &atio->u.isp24.fcp_cmnd.add_cdb[
3631  atio->u.isp24.fcp_cmnd.add_cdb_len]);
3632  if (ctio24->u.status1.residual != 0)
3633  ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
3634 
3635  qla2x00_start_iocbs(vha, vha->req);
3636 }
3637 
3638 /* ha->hardware_lock supposed to be held on entry */
3639 /* called via callback from qla2xxx */
3640 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
3641  struct atio_from_isp *atio)
3642 {
3643  struct qla_hw_data *ha = vha->hw;
3644  struct qla_tgt *tgt = ha->tgt.qla_tgt;
3645  int rc;
3646 
3647  if (unlikely(tgt == NULL)) {
3648  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039,
3649  "ATIO pkt, but no tgt (ha %p)", ha);
3650  return;
3651  }
3652  ql_dbg(ql_dbg_tgt, vha, 0xe02c,
3653  "qla_target(%d): ATIO pkt %p: type %02x count %02x",
3654  vha->vp_idx, atio, atio->u.raw.entry_type,
3655  atio->u.raw.entry_count);
3656  /*
3657  * In tgt_stop mode we also should allow all requests to pass.
3658  * Otherwise, some commands can stuck.
3659  */
3660 
3661  tgt->irq_cmd_count++;
3662 
3663  switch (atio->u.raw.entry_type) {
3664  case ATIO_TYPE7:
3665  ql_dbg(ql_dbg_tgt, vha, 0xe02d,
3666  "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
3667  "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
3668  vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
3669  atio->u.isp24.fcp_cmnd.rddata,
3670  atio->u.isp24.fcp_cmnd.wrdata,
3671  atio->u.isp24.fcp_cmnd.add_cdb_len,
3673  &atio->u.isp24.fcp_cmnd.add_cdb[
3674  atio->u.isp24.fcp_cmnd.add_cdb_len])),
3675  atio->u.isp24.fcp_hdr.s_id[0],
3676  atio->u.isp24.fcp_hdr.s_id[1],
3677  atio->u.isp24.fcp_hdr.s_id[2]);
3678 
3679  if (unlikely(atio->u.isp24.exchange_addr ==
3681  ql_dbg(ql_dbg_tgt, vha, 0xe058,
3682  "qla_target(%d): ATIO_TYPE7 "
3683  "received with UNKNOWN exchange address, "
3684  "sending QUEUE_FULL\n", vha->vp_idx);
3685  qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
3686  break;
3687  }
3688  if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0))
3689  rc = qlt_handle_cmd_for_atio(vha, atio);
3690  else
3691  rc = qlt_handle_task_mgmt(vha, atio);
3692  if (unlikely(rc != 0)) {
3693  if (rc == -ESRCH) {
3694 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
3695  qlt_send_busy(vha, atio, SAM_STAT_BUSY);
3696 #else
3697  qlt_send_term_exchange(vha, NULL, atio, 1);
3698 #endif
3699  } else {
3700  if (tgt->tgt_stop) {
3701  ql_dbg(ql_dbg_tgt, vha, 0xe059,
3702  "qla_target: Unable to send "
3703  "command to target for req, "
3704  "ignoring.\n");
3705  } else {
3706  ql_dbg(ql_dbg_tgt, vha, 0xe05a,
3707  "qla_target(%d): Unable to send "
3708  "command to target, sending BUSY "
3709  "status.\n", vha->vp_idx);
3710  qlt_send_busy(vha, atio, SAM_STAT_BUSY);
3711  }
3712  }
3713  }
3714  break;
3715 
3716  case IMMED_NOTIFY_TYPE:
3717  {
3718  if (unlikely(atio->u.isp2x.entry_status != 0)) {
3719  ql_dbg(ql_dbg_tgt, vha, 0xe05b,
3720  "qla_target(%d): Received ATIO packet %x "
3721  "with error status %x\n", vha->vp_idx,
3722  atio->u.raw.entry_type,
3723  atio->u.isp2x.entry_status);
3724  break;
3725  }
3726  ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
3727  qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
3728  break;
3729  }
3730 
3731  default:
3732  ql_dbg(ql_dbg_tgt, vha, 0xe05c,
3733  "qla_target(%d): Received unknown ATIO atio "
3734  "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
3735  break;
3736  }
3737 
3738  tgt->irq_cmd_count--;
3739 }
3740 
3741 /* ha->hardware_lock supposed to be held on entry */
3742 /* called via callback from qla2xxx */
3743 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
3744 {
3745  struct qla_hw_data *ha = vha->hw;
3746  struct qla_tgt *tgt = ha->tgt.qla_tgt;
3747 
3748  if (unlikely(tgt == NULL)) {
3749  ql_dbg(ql_dbg_tgt, vha, 0xe05d,
3750  "qla_target(%d): Response pkt %x received, but no "
3751  "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
3752  return;
3753  }
3754 
3755  ql_dbg(ql_dbg_tgt, vha, 0xe02f,
3756  "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
3757  "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type,
3758  pkt->entry_count, pkt->entry_status, pkt->handle);
3759 
3760  /*
3761  * In tgt_stop mode we also should allow all requests to pass.
3762  * Otherwise, some commands can stuck.
3763  */
3764 
3765  tgt->irq_cmd_count++;
3766 
3767  switch (pkt->entry_type) {
3768  case CTIO_TYPE7:
3769  {
3770  struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
3771  ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n",
3772  vha->vp_idx);
3773  qlt_do_ctio_completion(vha, entry->handle,
3774  le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3775  entry);
3776  break;
3777  }
3778 
3779  case ACCEPT_TGT_IO_TYPE:
3780  {
3781  struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
3782  int rc;
3783  ql_dbg(ql_dbg_tgt, vha, 0xe031,
3784  "ACCEPT_TGT_IO instance %d status %04x "
3785  "lun %04x read/write %d data_length %04x "
3786  "target_id %02x rx_id %04x\n ", vha->vp_idx,
3787  le16_to_cpu(atio->u.isp2x.status),
3788  le16_to_cpu(atio->u.isp2x.lun),
3789  atio->u.isp2x.execution_codes,
3790  le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha,
3791  atio), atio->u.isp2x.rx_id);
3792  if (atio->u.isp2x.status !=
3794  ql_dbg(ql_dbg_tgt, vha, 0xe05e,
3795  "qla_target(%d): ATIO with error "
3796  "status %x received\n", vha->vp_idx,
3797  le16_to_cpu(atio->u.isp2x.status));
3798  break;
3799  }
3800  ql_dbg(ql_dbg_tgt, vha, 0xe032,
3801  "FCP CDB: 0x%02x, sizeof(cdb): %lu",
3802  atio->u.isp2x.cdb[0], (unsigned long
3803  int)sizeof(atio->u.isp2x.cdb));
3804 
3805  rc = qlt_handle_cmd_for_atio(vha, atio);
3806  if (unlikely(rc != 0)) {
3807  if (rc == -ESRCH) {
3808 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
3809  qlt_send_busy(vha, atio, 0);
3810 #else
3811  qlt_send_term_exchange(vha, NULL, atio, 1);
3812 #endif
3813  } else {
3814  if (tgt->tgt_stop) {
3815  ql_dbg(ql_dbg_tgt, vha, 0xe05f,
3816  "qla_target: Unable to send "
3817  "command to target, sending TERM "
3818  "EXCHANGE for rsp\n");
3819  qlt_send_term_exchange(vha, NULL,
3820  atio, 1);
3821  } else {
3822  ql_dbg(ql_dbg_tgt, vha, 0xe060,
3823  "qla_target(%d): Unable to send "
3824  "command to target, sending BUSY "
3825  "status\n", vha->vp_idx);
3826  qlt_send_busy(vha, atio, 0);
3827  }
3828  }
3829  }
3830  }
3831  break;
3832 
3833  case CONTINUE_TGT_IO_TYPE:
3834  {
3835  struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
3836  ql_dbg(ql_dbg_tgt, vha, 0xe033,
3837  "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx);
3838  qlt_do_ctio_completion(vha, entry->handle,
3839  le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3840  entry);
3841  break;
3842  }
3843 
3844  case CTIO_A64_TYPE:
3845  {
3846  struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
3847  ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n",
3848  vha->vp_idx);
3849  qlt_do_ctio_completion(vha, entry->handle,
3850  le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3851  entry);
3852  break;
3853  }
3854 
3855  case IMMED_NOTIFY_TYPE:
3856  ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
3857  qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
3858  break;
3859 
3860  case NOTIFY_ACK_TYPE:
3861  if (tgt->notify_ack_expected > 0) {
3862  struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
3863  ql_dbg(ql_dbg_tgt, vha, 0xe036,
3864  "NOTIFY_ACK seq %08x status %x\n",
3865  le16_to_cpu(entry->u.isp2x.seq_id),
3866  le16_to_cpu(entry->u.isp2x.status));
3867  tgt->notify_ack_expected--;
3868  if (entry->u.isp2x.status !=
3870  ql_dbg(ql_dbg_tgt, vha, 0xe061,
3871  "qla_target(%d): NOTIFY_ACK "
3872  "failed %x\n", vha->vp_idx,
3873  le16_to_cpu(entry->u.isp2x.status));
3874  }
3875  } else {
3876  ql_dbg(ql_dbg_tgt, vha, 0xe062,
3877  "qla_target(%d): Unexpected NOTIFY_ACK received\n",
3878  vha->vp_idx);
3879  }
3880  break;
3881 
3882  case ABTS_RECV_24XX:
3883  ql_dbg(ql_dbg_tgt, vha, 0xe037,
3884  "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
3885  qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
3886  break;
3887 
3888  case ABTS_RESP_24XX:
3889  if (tgt->abts_resp_expected > 0) {
3890  struct abts_resp_from_24xx_fw *entry =
3891  (struct abts_resp_from_24xx_fw *)pkt;
3892  ql_dbg(ql_dbg_tgt, vha, 0xe038,
3893  "ABTS_RESP_24XX: compl_status %x\n",
3894  entry->compl_status);
3895  tgt->abts_resp_expected--;
3896  if (le16_to_cpu(entry->compl_status) !=
3898  if ((entry->error_subcode1 == 0x1E) &&
3899  (entry->error_subcode2 == 0)) {
3900  /*
3901  * We've got a race here: aborted
3902  * exchange not terminated, i.e.
3903  * response for the aborted command was
3904  * sent between the abort request was
3905  * received and processed.
3906  * Unfortunately, the firmware has a
3907  * silly requirement that all aborted
3908  * exchanges must be explicitely
3909  * terminated, otherwise it refuses to
3910  * send responses for the abort
3911  * requests. So, we have to
3912  * (re)terminate the exchange and retry
3913  * the abort response.
3914  */
3915  qlt_24xx_retry_term_exchange(vha,
3916  entry);
3917  } else
3918  ql_dbg(ql_dbg_tgt, vha, 0xe063,
3919  "qla_target(%d): ABTS_RESP_24XX "
3920  "failed %x (subcode %x:%x)",
3921  vha->vp_idx, entry->compl_status,
3922  entry->error_subcode1,
3923  entry->error_subcode2);
3924  }
3925  } else {
3926  ql_dbg(ql_dbg_tgt, vha, 0xe064,
3927  "qla_target(%d): Unexpected ABTS_RESP_24XX "
3928  "received\n", vha->vp_idx);
3929  }
3930  break;
3931 
3932  default:
3933  ql_dbg(ql_dbg_tgt, vha, 0xe065,
3934  "qla_target(%d): Received unknown response pkt "
3935  "type %x\n", vha->vp_idx, pkt->entry_type);
3936  break;
3937  }
3938 
3939  tgt->irq_cmd_count--;
3940 }
3941 
3942 /*
3943  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3944  */
3946  uint16_t *mailbox)
3947 {
3948  struct qla_hw_data *ha = vha->hw;
3949  struct qla_tgt *tgt = ha->tgt.qla_tgt;
3950  int login_code;
3951 
3952  ql_dbg(ql_dbg_tgt, vha, 0xe039,
3953  "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
3954  vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done,
3955  ha->operating_mode, ha->current_topology);
3956 
3957  if (!ha->tgt.tgt_ops)
3958  return;
3959 
3960  if (unlikely(tgt == NULL)) {
3961  ql_dbg(ql_dbg_tgt, vha, 0xe03a,
3962  "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
3963  return;
3964  }
3965 
3966  if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
3967  IS_QLA2100(ha))
3968  return;
3969  /*
3970  * In tgt_stop mode we also should allow all requests to pass.
3971  * Otherwise, some commands can stuck.
3972  */
3973 
3974  tgt->irq_cmd_count++;
3975 
3976  switch (code) {
3977  case MBA_RESET: /* Reset */
3978  case MBA_SYSTEM_ERR: /* System Error */
3979  case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
3980  case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
3981  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
3982  "qla_target(%d): System error async event %#x "
3983  "occured", vha->vp_idx, code);
3984  break;
3985  case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
3987  break;
3988 
3989  case MBA_LOOP_UP:
3990  {
3991  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
3992  "qla_target(%d): Async LOOP_UP occured "
3993  "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
3994  le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
3995  le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
3996  if (tgt->link_reinit_iocb_pending) {
3997  qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
3998  0, 0, 0, 0, 0, 0);
3999  tgt->link_reinit_iocb_pending = 0;
4000  }
4001  break;
4002  }
4003 
4004  case MBA_LIP_OCCURRED:
4005  case MBA_LOOP_DOWN:
4006  case MBA_LIP_RESET:
4007  case MBA_RSCN_UPDATE:
4008  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
4009  "qla_target(%d): Async event %#x occured "
4010  "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
4011  le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4012  le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4013  break;
4014 
4015  case MBA_PORT_UPDATE:
4016  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
4017  "qla_target(%d): Port update async event %#x "
4018  "occured: updating the ports database (m[0]=%x, m[1]=%x, "
4019  "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
4020  le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4021  le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4022 
4023  login_code = le16_to_cpu(mailbox[2]);
4024  if (login_code == 0x4)
4025  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
4026  "Async MB 2: Got PLOGI Complete\n");
4027  else if (login_code == 0x7)
4028  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
4029  "Async MB 2: Port Logged Out\n");
4030  break;
4031 
4032  default:
4033  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
4034  "qla_target(%d): Async event %#x occured: "
4035  "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
4036  code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4037  le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4038  break;
4039  }
4040 
4041  tgt->irq_cmd_count--;
4042 }
4043 
4044 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
4045  uint16_t loop_id)
4046 {
4047  fc_port_t *fcport;
4048  int rc;
4049 
4050  fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
4051  if (!fcport) {
4052  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
4053  "qla_target(%d): Allocation of tmp FC port failed",
4054  vha->vp_idx);
4055  return NULL;
4056  }
4057 
4058  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id);
4059 
4060  fcport->loop_id = loop_id;
4061 
4062  rc = qla2x00_get_port_database(vha, fcport, 0);
4063  if (rc != QLA_SUCCESS) {
4064  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
4065  "qla_target(%d): Failed to retrieve fcport "
4066  "information -- get_port_database() returned %x "
4067  "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
4068  kfree(fcport);
4069  return NULL;
4070  }
4071 
4072  return fcport;
4073 }
4074 
4075 /* Must be called under tgt_mutex */
4076 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
4077  uint8_t *s_id)
4078 {
4079  struct qla_hw_data *ha = vha->hw;
4080  struct qla_tgt_sess *sess = NULL;
4081  fc_port_t *fcport = NULL;
4082  int rc, global_resets;
4083  uint16_t loop_id = 0;
4084 
4085 retry:
4086  global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
4087 
4088  rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
4089  if (rc != 0) {
4090  if ((s_id[0] == 0xFF) &&
4091  (s_id[1] == 0xFC)) {
4092  /*
4093  * This is Domain Controller, so it should be
4094  * OK to drop SCSI commands from it.
4095  */
4096  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
4097  "Unable to find initiator with S_ID %x:%x:%x",
4098  s_id[0], s_id[1], s_id[2]);
4099  } else
4100  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
4101  "qla_target(%d): Unable to find "
4102  "initiator with S_ID %x:%x:%x",
4103  vha->vp_idx, s_id[0], s_id[1],
4104  s_id[2]);
4105  return NULL;
4106  }
4107 
4108  fcport = qlt_get_port_database(vha, loop_id);
4109  if (!fcport)
4110  return NULL;
4111 
4112  if (global_resets !=
4113  atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
4114  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
4115  "qla_target(%d): global reset during session discovery "
4116  "(counter was %d, new %d), retrying", vha->vp_idx,
4117  global_resets,
4118  atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
4119  goto retry;
4120  }
4121 
4122  sess = qlt_create_sess(vha, fcport, true);
4123 
4124  kfree(fcport);
4125  return sess;
4126 }
4127 
4128 static void qlt_abort_work(struct qla_tgt *tgt,
4129  struct qla_tgt_sess_work_param *prm)
4130 {
4131  struct scsi_qla_host *vha = tgt->vha;
4132  struct qla_hw_data *ha = vha->hw;
4133  struct qla_tgt_sess *sess = NULL;
4134  unsigned long flags;
4135  uint32_t be_s_id;
4136  uint8_t s_id[3];
4137  int rc;
4138 
4139  spin_lock_irqsave(&ha->hardware_lock, flags);
4140 
4141  if (tgt->tgt_stop)
4142  goto out_term;
4143 
4144  s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
4145  s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
4146  s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
4147 
4148  sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4149  (unsigned char *)&be_s_id);
4150  if (!sess) {
4151  spin_unlock_irqrestore(&ha->hardware_lock, flags);
4152 
4153  mutex_lock(&ha->tgt.tgt_mutex);
4154  sess = qlt_make_local_sess(vha, s_id);
4155  /* sess has got an extra creation ref */
4156  mutex_unlock(&ha->tgt.tgt_mutex);
4157 
4158  spin_lock_irqsave(&ha->hardware_lock, flags);
4159  if (!sess)
4160  goto out_term;
4161  } else {
4162  kref_get(&sess->se_sess->sess_kref);
4163  }
4164 
4165  if (tgt->tgt_stop)
4166  goto out_term;
4167 
4168  rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
4169  if (rc != 0)
4170  goto out_term;
4171  spin_unlock_irqrestore(&ha->hardware_lock, flags);
4172 
4173  ha->tgt.tgt_ops->put_sess(sess);
4174  return;
4175 
4176 out_term:
4177  qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
4178  spin_unlock_irqrestore(&ha->hardware_lock, flags);
4179  if (sess)
4180  ha->tgt.tgt_ops->put_sess(sess);
4181 }
4182 
4183 static void qlt_tmr_work(struct qla_tgt *tgt,
4184  struct qla_tgt_sess_work_param *prm)
4185 {
4186  struct atio_from_isp *a = &prm->tm_iocb2;
4187  struct scsi_qla_host *vha = tgt->vha;
4188  struct qla_hw_data *ha = vha->hw;
4189  struct qla_tgt_sess *sess = NULL;
4190  unsigned long flags;
4191  uint8_t *s_id = NULL; /* to hide compiler warnings */
4192  int rc;
4193  uint32_t lun, unpacked_lun;
4194  int lun_size, fn;
4195  void *iocb;
4196 
4197  spin_lock_irqsave(&ha->hardware_lock, flags);
4198 
4199  if (tgt->tgt_stop)
4200  goto out_term;
4201 
4202  s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
4203  sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
4204  if (!sess) {
4205  spin_unlock_irqrestore(&ha->hardware_lock, flags);
4206 
4207  mutex_lock(&ha->tgt.tgt_mutex);
4208  sess = qlt_make_local_sess(vha, s_id);
4209  /* sess has got an extra creation ref */
4210  mutex_unlock(&ha->tgt.tgt_mutex);
4211 
4212  spin_lock_irqsave(&ha->hardware_lock, flags);
4213  if (!sess)
4214  goto out_term;
4215  } else {
4216  kref_get(&sess->se_sess->sess_kref);
4217  }
4218 
4219  iocb = a;
4220  lun = a->u.isp24.fcp_cmnd.lun;
4221  lun_size = sizeof(lun);
4222  fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4223  unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
4224 
4225  rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4226  if (rc != 0)
4227  goto out_term;
4228  spin_unlock_irqrestore(&ha->hardware_lock, flags);
4229 
4230  ha->tgt.tgt_ops->put_sess(sess);
4231  return;
4232 
4233 out_term:
4234  qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
4235  spin_unlock_irqrestore(&ha->hardware_lock, flags);
4236  if (sess)
4237  ha->tgt.tgt_ops->put_sess(sess);
4238 }
4239 
4240 static void qlt_sess_work_fn(struct work_struct *work)
4241 {
4242  struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
4243  struct scsi_qla_host *vha = tgt->vha;
4244  unsigned long flags;
4245 
4246  ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
4247 
4248  spin_lock_irqsave(&tgt->sess_work_lock, flags);
4249  while (!list_empty(&tgt->sess_works_list)) {
4250  struct qla_tgt_sess_work_param *prm = list_entry(
4251  tgt->sess_works_list.next, typeof(*prm),
4253 
4254  /*
4255  * This work can be scheduled on several CPUs at time, so we
4256  * must delete the entry to eliminate double processing
4257  */
4259 
4260  spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
4261 
4262  switch (prm->type) {
4264  qlt_abort_work(tgt, prm);
4265  break;
4266  case QLA_TGT_SESS_WORK_TM:
4267  qlt_tmr_work(tgt, prm);
4268  break;
4269  default:
4270  BUG_ON(1);
4271  break;
4272  }
4273 
4274  spin_lock_irqsave(&tgt->sess_work_lock, flags);
4275 
4276  kfree(prm);
4277  }
4278  spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
4279 }
4280 
4281 /* Must be called under tgt_host_action_mutex */
4282 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
4283 {
4284  struct qla_tgt *tgt;
4285 
4286  if (!QLA_TGT_MODE_ENABLED())
4287  return 0;
4288 
4289  ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
4290  "Registering target for host %ld(%p)", base_vha->host_no, ha);
4291 
4292  BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL));
4293 
4294  tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
4295  if (!tgt) {
4296  ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
4297  "Unable to allocate struct qla_tgt\n");
4298  return -ENOMEM;
4299  }
4300 
4301  if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
4302  base_vha->host->hostt->supported_mode |= MODE_TARGET;
4303 
4304  tgt->ha = ha;
4305  tgt->vha = base_vha;
4306  init_waitqueue_head(&tgt->waitQ);
4307  INIT_LIST_HEAD(&tgt->sess_list);
4308  INIT_LIST_HEAD(&tgt->del_sess_list);
4310  (void (*)(struct work_struct *))qlt_del_sess_work_fn);
4312  INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
4313  INIT_LIST_HEAD(&tgt->sess_works_list);
4314  spin_lock_init(&tgt->srr_lock);
4315  INIT_LIST_HEAD(&tgt->srr_ctio_list);
4316  INIT_LIST_HEAD(&tgt->srr_imm_list);
4317  INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
4319 
4320  ha->tgt.qla_tgt = tgt;
4321 
4322  ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
4323  "qla_target(%d): using 64 Bit PCI addressing",
4324  base_vha->vp_idx);
4325  tgt->tgt_enable_64bit_addr = 1;
4326  /* 3 is reserved */
4327  tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
4330 
4331  mutex_lock(&qla_tgt_mutex);
4332  list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
4333  mutex_unlock(&qla_tgt_mutex);
4334 
4335  return 0;
4336 }
4337 
4338 /* Must be called under tgt_host_action_mutex */
4339 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
4340 {
4341  if (!ha->tgt.qla_tgt)
4342  return 0;
4343 
4344  mutex_lock(&qla_tgt_mutex);
4345  list_del(&ha->tgt.qla_tgt->tgt_list_entry);
4346  mutex_unlock(&qla_tgt_mutex);
4347 
4348  ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
4349  vha->host_no, ha);
4350  qlt_release(ha->tgt.qla_tgt);
4351 
4352  return 0;
4353 }
4354 
4355 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
4356  unsigned char *b)
4357 {
4358  int i;
4359 
4360  pr_debug("qla2xxx HW vha->node_name: ");
4361  for (i = 0; i < WWN_SIZE; i++)
4362  pr_debug("%02x ", vha->node_name[i]);
4363  pr_debug("\n");
4364  pr_debug("qla2xxx HW vha->port_name: ");
4365  for (i = 0; i < WWN_SIZE; i++)
4366  pr_debug("%02x ", vha->port_name[i]);
4367  pr_debug("\n");
4368 
4369  pr_debug("qla2xxx passed configfs WWPN: ");
4370  put_unaligned_be64(wwpn, b);
4371  for (i = 0; i < WWN_SIZE; i++)
4372  pr_debug("%02x ", b[i]);
4373  pr_debug("\n");
4374 }
4375 
4384 int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
4385  int (*callback)(struct scsi_qla_host *), void *target_lport_ptr)
4386 {
4387  struct qla_tgt *tgt;
4388  struct scsi_qla_host *vha;
4389  struct qla_hw_data *ha;
4390  struct Scsi_Host *host;
4391  unsigned long flags;
4392  int rc;
4393  u8 b[WWN_SIZE];
4394 
4395  mutex_lock(&qla_tgt_mutex);
4396  list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
4397  vha = tgt->vha;
4398  ha = vha->hw;
4399 
4400  host = vha->host;
4401  if (!host)
4402  continue;
4403 
4404  if (ha->tgt.tgt_ops != NULL)
4405  continue;
4406 
4407  if (!(host->hostt->supported_mode & MODE_TARGET))
4408  continue;
4409 
4410  spin_lock_irqsave(&ha->hardware_lock, flags);
4411  if (host->active_mode & MODE_TARGET) {
4412  pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
4413  host->host_no);
4414  spin_unlock_irqrestore(&ha->hardware_lock, flags);
4415  continue;
4416  }
4417  spin_unlock_irqrestore(&ha->hardware_lock, flags);
4418 
4419  if (!scsi_host_get(host)) {
4420  ql_dbg(ql_dbg_tgt, vha, 0xe068,
4421  "Unable to scsi_host_get() for"
4422  " qla2xxx scsi_host\n");
4423  continue;
4424  }
4425  qlt_lport_dump(vha, wwpn, b);
4426 
4427  if (memcmp(vha->port_name, b, WWN_SIZE)) {
4428  scsi_host_put(host);
4429  continue;
4430  }
4431  /*
4432  * Setup passed parameters ahead of invoking callback
4433  */
4434  ha->tgt.tgt_ops = qla_tgt_ops;
4435  ha->tgt.target_lport_ptr = target_lport_ptr;
4436  rc = (*callback)(vha);
4437  if (rc != 0) {
4438  ha->tgt.tgt_ops = NULL;
4439  ha->tgt.target_lport_ptr = NULL;
4440  }
4441  mutex_unlock(&qla_tgt_mutex);
4442  return rc;
4443  }
4444  mutex_unlock(&qla_tgt_mutex);
4445 
4446  return -ENODEV;
4447 }
4449 
4456 {
4457  struct qla_hw_data *ha = vha->hw;
4458  struct Scsi_Host *sh = vha->host;
4459  /*
4460  * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
4461  */
4462  ha->tgt.target_lport_ptr = NULL;
4463  ha->tgt.tgt_ops = NULL;
4464  /*
4465  * Release the Scsi_Host reference for the underlying qla2xxx host
4466  */
4467  scsi_host_put(sh);
4468 }
4470 
4471 /* Must be called under HW lock */
4472 void qlt_set_mode(struct scsi_qla_host *vha)
4473 {
4474  struct qla_hw_data *ha = vha->hw;
4475 
4476  switch (ql2x_ini_mode) {
4479  vha->host->active_mode = MODE_TARGET;
4480  break;
4482  vha->host->active_mode |= MODE_TARGET;
4483  break;
4484  default:
4485  break;
4486  }
4487 
4488  if (ha->tgt.ini_mode_force_reverse)
4489  qla_reverse_ini_mode(vha);
4490 }
4491 
4492 /* Must be called under HW lock */
4494 {
4495  struct qla_hw_data *ha = vha->hw;
4496 
4497  switch (ql2x_ini_mode) {
4499  vha->host->active_mode = MODE_UNKNOWN;
4500  break;
4502  vha->host->active_mode = MODE_INITIATOR;
4503  break;
4505  vha->host->active_mode &= ~MODE_TARGET;
4506  break;
4507  default:
4508  break;
4509  }
4510 
4511  if (ha->tgt.ini_mode_force_reverse)
4512  qla_reverse_ini_mode(vha);
4513 }
4514 
4515 /*
4516  * qla_tgt_enable_vha - NO LOCK HELD
4517  *
4518  * host_reset, bring up w/ Target Mode Enabled
4519  */
4520 void
4522 {
4523  struct qla_hw_data *ha = vha->hw;
4524  struct qla_tgt *tgt = ha->tgt.qla_tgt;
4525  unsigned long flags;
4526 
4527  if (!tgt) {
4528  ql_dbg(ql_dbg_tgt, vha, 0xe069,
4529  "Unable to locate qla_tgt pointer from"
4530  " struct qla_hw_data\n");
4531  dump_stack();
4532  return;
4533  }
4534 
4535  spin_lock_irqsave(&ha->hardware_lock, flags);
4536  tgt->tgt_stopped = 0;
4537  qlt_set_mode(vha);
4538  spin_unlock_irqrestore(&ha->hardware_lock, flags);
4539 
4541  qla2xxx_wake_dpc(vha);
4543 }
4545 
4546 /*
4547  * qla_tgt_disable_vha - NO LOCK HELD
4548  *
4549  * Disable Target Mode and reset the adapter
4550  */
4551 void
4553 {
4554  struct qla_hw_data *ha = vha->hw;
4555  struct qla_tgt *tgt = ha->tgt.qla_tgt;
4556  unsigned long flags;
4557 
4558  if (!tgt) {
4559  ql_dbg(ql_dbg_tgt, vha, 0xe06a,
4560  "Unable to locate qla_tgt pointer from"
4561  " struct qla_hw_data\n");
4562  dump_stack();
4563  return;
4564  }
4565 
4566  spin_lock_irqsave(&ha->hardware_lock, flags);
4567  qlt_clear_mode(vha);
4568  spin_unlock_irqrestore(&ha->hardware_lock, flags);
4569 
4571  qla2xxx_wake_dpc(vha);
4573 }
4574 
4575 /*
4576  * Called from qla_init.c:qla24xx_vport_create() contex to setup
4577  * the target mode specific struct scsi_qla_host and struct qla_hw_data
4578  * members.
4579  */
4580 void
4582 {
4583  if (!qla_tgt_mode_enabled(vha))
4584  return;
4585 
4586  mutex_init(&ha->tgt.tgt_mutex);
4587  mutex_init(&ha->tgt.tgt_host_action_mutex);
4588 
4589  qlt_clear_mode(vha);
4590 
4591  /*
4592  * NOTE: Currently the value is kept the same for <24xx and
4593  * >=24xx ISPs. If it is necessary to change it,
4594  * the check should be added for specific ISPs,
4595  * assigning the value appropriately.
4596  */
4597  ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
4598 }
4599 
4600 void
4601 qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
4602 {
4603  /*
4604  * FC-4 Feature bit 0 indicates target functionality to the name server.
4605  */
4606  if (qla_tgt_mode_enabled(vha)) {
4607  if (qla_ini_mode_enabled(vha))
4608  ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
4609  else
4610  ct_req->req.rff_id.fc4_feature = BIT_0;
4611  } else if (qla_ini_mode_enabled(vha)) {
4612  ct_req->req.rff_id.fc4_feature = BIT_1;
4613  }
4614 }
4615 
4616 /*
4617  * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
4618  * @ha: HA context
4619  *
4620  * Beginning of ATIO ring has initialization control block already built
4621  * by nvram config routine.
4622  *
4623  * Returns 0 on success.
4624  */
4625 void
4627 {
4628  struct qla_hw_data *ha = vha->hw;
4629  uint16_t cnt;
4630  struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
4631 
4632  if (!qla_tgt_mode_enabled(vha))
4633  return;
4634 
4635  for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
4636  pkt->u.raw.signature = ATIO_PROCESSED;
4637  pkt++;
4638  }
4639 
4640 }
4641 
4642 /*
4643  * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
4644  * @ha: SCSI driver HA context
4645  */
4646 void
4648 {
4649  struct qla_hw_data *ha = vha->hw;
4650  struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4651  struct atio_from_isp *pkt;
4652  int cnt, i;
4653 
4654  if (!vha->flags.online)
4655  return;
4656 
4657  while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
4658  pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
4659  cnt = pkt->u.raw.entry_count;
4660 
4661  qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
4662 
4663  for (i = 0; i < cnt; i++) {
4664  ha->tgt.atio_ring_index++;
4665  if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
4666  ha->tgt.atio_ring_index = 0;
4667  ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
4668  } else
4669  ha->tgt.atio_ring_ptr++;
4670 
4671  pkt->u.raw.signature = ATIO_PROCESSED;
4672  pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
4673  }
4674  wmb();
4675  }
4676 
4677  /* Adjust ring index */
4678  WRT_REG_DWORD(&reg->atio_q_out, ha->tgt.atio_ring_index);
4679 }
4680 
4681 void
4683 {
4684  struct qla_hw_data *ha = vha->hw;
4685 
4686 /* FIXME: atio_q in/out for ha->mqenable=1..? */
4687  if (ha->mqenable) {
4688 #if 0
4689  WRT_REG_DWORD(&reg->isp25mq.atio_q_in, 0);
4690  WRT_REG_DWORD(&reg->isp25mq.atio_q_out, 0);
4691  RD_REG_DWORD(&reg->isp25mq.atio_q_out);
4692 #endif
4693  } else {
4694  /* Setup APTIO registers for target mode */
4695  WRT_REG_DWORD(&reg->isp24.atio_q_in, 0);
4696  WRT_REG_DWORD(&reg->isp24.atio_q_out, 0);
4697  RD_REG_DWORD(&reg->isp24.atio_q_out);
4698  }
4699 }
4700 
4701 void
4703 {
4704  struct qla_hw_data *ha = vha->hw;
4705 
4706  if (qla_tgt_mode_enabled(vha)) {
4707  if (!ha->tgt.saved_set) {
4708  /* We save only once */
4709  ha->tgt.saved_exchange_count = nv->exchange_count;
4710  ha->tgt.saved_firmware_options_1 =
4711  nv->firmware_options_1;
4712  ha->tgt.saved_firmware_options_2 =
4713  nv->firmware_options_2;
4714  ha->tgt.saved_firmware_options_3 =
4715  nv->firmware_options_3;
4716  ha->tgt.saved_set = 1;
4717  }
4718 
4719  nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
4720 
4721  /* Enable target mode */
4723 
4724  /* Disable ini mode, if requested */
4725  if (!qla_ini_mode_enabled(vha))
4727 
4728  /* Disable Full Login after LIP */
4730  /* Enable initial LIP */
4732  /* Enable FC tapes support */
4734  /* Disable Full Login after LIP */
4736  /* Enable target PRLI control */
4738  } else {
4739  if (ha->tgt.saved_set) {
4740  nv->exchange_count = ha->tgt.saved_exchange_count;
4741  nv->firmware_options_1 =
4742  ha->tgt.saved_firmware_options_1;
4743  nv->firmware_options_2 =
4744  ha->tgt.saved_firmware_options_2;
4745  nv->firmware_options_3 =
4746  ha->tgt.saved_firmware_options_3;
4747  }
4748  return;
4749  }
4750 
4751  /* out-of-order frames reassembly */
4753 
4754  if (ha->tgt.enable_class_2) {
4755  if (vha->flags.init_done)
4758 
4760  } else {
4761  if (vha->flags.init_done)
4763 
4765  }
4766 }
4767 
4768 void
4770  struct init_cb_24xx *icb)
4771 {
4772  struct qla_hw_data *ha = vha->hw;
4773 
4774  if (ha->tgt.node_name_set) {
4775  memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
4777  }
4778 }
4779 
4780 int
4782  struct sts_entry_24xx *pkt)
4783 {
4784  switch (pkt->entry_type) {
4785  case ABTS_RECV_24XX:
4786  case ABTS_RESP_24XX:
4787  case CTIO_TYPE7:
4788  case NOTIFY_ACK_TYPE:
4789  return 1;
4790  default:
4791  return 0;
4792  }
4793 }
4794 
4795 void
4797  struct vp_config_entry_24xx *vpmod)
4798 {
4799  if (qla_tgt_mode_enabled(vha))
4800  vpmod->options_idx1 &= ~BIT_5;
4801  /* Disable ini mode, if requested */
4802  if (!qla_ini_mode_enabled(vha))
4803  vpmod->options_idx1 &= ~BIT_4;
4804 }
4805 
4806 void
4807 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
4808 {
4809  if (!QLA_TGT_MODE_ENABLED())
4810  return;
4811 
4812  mutex_init(&ha->tgt.tgt_mutex);
4813  mutex_init(&ha->tgt.tgt_host_action_mutex);
4814  qlt_clear_mode(base_vha);
4815 }
4816 
4817 int
4819 {
4820  if (!QLA_TGT_MODE_ENABLED())
4821  return 0;
4822 
4823  ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
4825  if (!ha->tgt.tgt_vp_map)
4826  return -ENOMEM;
4827 
4828  ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
4829  (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
4830  &ha->tgt.atio_dma, GFP_KERNEL);
4831  if (!ha->tgt.atio_ring) {
4832  kfree(ha->tgt.tgt_vp_map);
4833  return -ENOMEM;
4834  }
4835  return 0;
4836 }
4837 
4838 void
4840 {
4841  if (!QLA_TGT_MODE_ENABLED())
4842  return;
4843 
4844  if (ha->tgt.atio_ring) {
4845  dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
4846  sizeof(struct atio_from_isp), ha->tgt.atio_ring,
4847  ha->tgt.atio_dma);
4848  }
4849  kfree(ha->tgt.tgt_vp_map);
4850 }
4851 
4852 /* vport_slock to be held by the caller */
4853 void
4854 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
4855 {
4856  if (!QLA_TGT_MODE_ENABLED())
4857  return;
4858 
4859  switch (cmd) {
4860  case SET_VP_IDX:
4861  vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
4862  break;
4863  case SET_AL_PA:
4864  vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
4865  break;
4866  case RESET_VP_IDX:
4867  vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
4868  break;
4869  case RESET_AL_PA:
4870  vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
4871  break;
4872  }
4873 }
4874 
4875 static int __init qlt_parse_ini_mode(void)
4876 {
4877  if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
4878  ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
4879  else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
4880  ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
4881  else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
4882  ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
4883  else
4884  return false;
4885 
4886  return true;
4887 }
4888 
4889 int __init qlt_init(void)
4890 {
4891  int ret;
4892 
4893  if (!qlt_parse_ini_mode()) {
4894  ql_log(ql_log_fatal, NULL, 0xe06b,
4895  "qlt_parse_ini_mode() failed\n");
4896  return -EINVAL;
4897  }
4898 
4899  if (!QLA_TGT_MODE_ENABLED())
4900  return 0;
4901 
4902  qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
4903  sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
4904  NULL);
4905  if (!qla_tgt_cmd_cachep) {
4906  ql_log(ql_log_fatal, NULL, 0xe06c,
4907  "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
4908  return -ENOMEM;
4909  }
4910 
4911  qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
4912  sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
4913  qla_tgt_mgmt_cmd), 0, NULL);
4914  if (!qla_tgt_mgmt_cmd_cachep) {
4915  ql_log(ql_log_fatal, NULL, 0xe06d,
4916  "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
4917  ret = -ENOMEM;
4918  goto out;
4919  }
4920 
4921  qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
4922  mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
4923  if (!qla_tgt_mgmt_cmd_mempool) {
4924  ql_log(ql_log_fatal, NULL, 0xe06e,
4925  "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
4926  ret = -ENOMEM;
4927  goto out_mgmt_cmd_cachep;
4928  }
4929 
4930  qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
4931  if (!qla_tgt_wq) {
4932  ql_log(ql_log_fatal, NULL, 0xe06f,
4933  "alloc_workqueue for qla_tgt_wq failed\n");
4934  ret = -ENOMEM;
4935  goto out_cmd_mempool;
4936  }
4937  /*
4938  * Return 1 to signal that initiator-mode is being disabled
4939  */
4940  return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
4941 
4942 out_cmd_mempool:
4943  mempool_destroy(qla_tgt_mgmt_cmd_mempool);
4944 out_mgmt_cmd_cachep:
4945  kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
4946 out:
4947  kmem_cache_destroy(qla_tgt_cmd_cachep);
4948  return ret;
4949 }
4950 
4951 void qlt_exit(void)
4952 {
4953  if (!QLA_TGT_MODE_ENABLED())
4954  return;
4955 
4956  destroy_workqueue(qla_tgt_wq);
4957  mempool_destroy(qla_tgt_mgmt_cmd_mempool);
4958  kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
4959  kmem_cache_destroy(qla_tgt_cmd_cachep);
4960 }