Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
target_core_transport.c
Go to the documentation of this file.
1 /*******************************************************************************
2  * Filename: target_core_transport.c
3  *
4  * This file contains the Generic Target Engine Core.
5  *
6  * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <[email protected]>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28 
29 #include <linux/net.h>
30 #include <linux/delay.h>
31 #include <linux/string.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/blkdev.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
37 #include <linux/in.h>
38 #include <linux/cdrom.h>
39 #include <linux/module.h>
40 #include <linux/ratelimit.h>
41 #include <asm/unaligned.h>
42 #include <net/sock.h>
43 #include <net/tcp.h>
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_cmnd.h>
46 #include <scsi/scsi_tcq.h>
47 
52 
53 #include "target_core_internal.h"
54 #include "target_core_alua.h"
55 #include "target_core_pr.h"
56 #include "target_core_ua.h"
57 
58 static struct workqueue_struct *target_completion_wq;
59 static struct kmem_cache *se_sess_cache;
66 
67 static void transport_complete_task_attr(struct se_cmd *cmd);
68 static void transport_handle_queue_full(struct se_cmd *cmd,
69  struct se_device *dev);
70 static int transport_generic_get_mem(struct se_cmd *cmd);
71 static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
72 static void transport_put_cmd(struct se_cmd *cmd);
73 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
74 static void target_complete_ok_work(struct work_struct *work);
75 
77 {
78  se_sess_cache = kmem_cache_create("se_sess_cache",
79  sizeof(struct se_session), __alignof__(struct se_session),
80  0, NULL);
81  if (!se_sess_cache) {
82  pr_err("kmem_cache_create() for struct se_session"
83  " failed\n");
84  goto out;
85  }
86  se_ua_cache = kmem_cache_create("se_ua_cache",
87  sizeof(struct se_ua), __alignof__(struct se_ua),
88  0, NULL);
89  if (!se_ua_cache) {
90  pr_err("kmem_cache_create() for struct se_ua failed\n");
91  goto out_free_sess_cache;
92  }
93  t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
94  sizeof(struct t10_pr_registration),
95  __alignof__(struct t10_pr_registration), 0, NULL);
96  if (!t10_pr_reg_cache) {
97  pr_err("kmem_cache_create() for struct t10_pr_registration"
98  " failed\n");
99  goto out_free_ua_cache;
100  }
101  t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
102  sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
103  0, NULL);
104  if (!t10_alua_lu_gp_cache) {
105  pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
106  " failed\n");
107  goto out_free_pr_reg_cache;
108  }
109  t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
110  sizeof(struct t10_alua_lu_gp_member),
111  __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
112  if (!t10_alua_lu_gp_mem_cache) {
113  pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
114  "cache failed\n");
115  goto out_free_lu_gp_cache;
116  }
117  t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
118  sizeof(struct t10_alua_tg_pt_gp),
119  __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
120  if (!t10_alua_tg_pt_gp_cache) {
121  pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
122  "cache failed\n");
123  goto out_free_lu_gp_mem_cache;
124  }
125  t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
126  "t10_alua_tg_pt_gp_mem_cache",
127  sizeof(struct t10_alua_tg_pt_gp_member),
128  __alignof__(struct t10_alua_tg_pt_gp_member),
129  0, NULL);
130  if (!t10_alua_tg_pt_gp_mem_cache) {
131  pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
132  "mem_t failed\n");
133  goto out_free_tg_pt_gp_cache;
134  }
135 
136  target_completion_wq = alloc_workqueue("target_completion",
137  WQ_MEM_RECLAIM, 0);
138  if (!target_completion_wq)
139  goto out_free_tg_pt_gp_mem_cache;
140 
141  return 0;
142 
143 out_free_tg_pt_gp_mem_cache:
144  kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
145 out_free_tg_pt_gp_cache:
146  kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
147 out_free_lu_gp_mem_cache:
148  kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
149 out_free_lu_gp_cache:
150  kmem_cache_destroy(t10_alua_lu_gp_cache);
151 out_free_pr_reg_cache:
152  kmem_cache_destroy(t10_pr_reg_cache);
153 out_free_ua_cache:
154  kmem_cache_destroy(se_ua_cache);
155 out_free_sess_cache:
156  kmem_cache_destroy(se_sess_cache);
157 out:
158  return -ENOMEM;
159 }
160 
162 {
163  destroy_workqueue(target_completion_wq);
164  kmem_cache_destroy(se_sess_cache);
165  kmem_cache_destroy(se_ua_cache);
166  kmem_cache_destroy(t10_pr_reg_cache);
167  kmem_cache_destroy(t10_alua_lu_gp_cache);
168  kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
169  kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
170  kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
171 }
172 
173 /* This code ensures unique mib indexes are handed out. */
174 static DEFINE_SPINLOCK(scsi_mib_index_lock);
175 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
176 
177 /*
178  * Allocate a new row index for the entry type specified
179  */
181 {
182  u32 new_index;
183 
184  BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
185 
186  spin_lock(&scsi_mib_index_lock);
187  new_index = ++scsi_mib_index[type];
188  spin_unlock(&scsi_mib_index_lock);
189 
190  return new_index;
191 }
192 
194 {
195  int ret;
196  static int sub_api_initialized;
197 
198  if (sub_api_initialized)
199  return;
200 
201  ret = request_module("target_core_iblock");
202  if (ret != 0)
203  pr_err("Unable to load target_core_iblock\n");
204 
205  ret = request_module("target_core_file");
206  if (ret != 0)
207  pr_err("Unable to load target_core_file\n");
208 
209  ret = request_module("target_core_pscsi");
210  if (ret != 0)
211  pr_err("Unable to load target_core_pscsi\n");
212 
213  sub_api_initialized = 1;
214 }
215 
217 {
218  struct se_session *se_sess;
219 
220  se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
221  if (!se_sess) {
222  pr_err("Unable to allocate struct se_session from"
223  " se_sess_cache\n");
224  return ERR_PTR(-ENOMEM);
225  }
226  INIT_LIST_HEAD(&se_sess->sess_list);
227  INIT_LIST_HEAD(&se_sess->sess_acl_list);
228  INIT_LIST_HEAD(&se_sess->sess_cmd_list);
229  spin_lock_init(&se_sess->sess_cmd_lock);
230  kref_init(&se_sess->sess_kref);
231 
232  return se_sess;
233 }
235 
236 /*
237  * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
238  */
240  struct se_portal_group *se_tpg,
241  struct se_node_acl *se_nacl,
242  struct se_session *se_sess,
243  void *fabric_sess_ptr)
244 {
245  unsigned char buf[PR_REG_ISID_LEN];
246 
247  se_sess->se_tpg = se_tpg;
248  se_sess->fabric_sess_ptr = fabric_sess_ptr;
249  /*
250  * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
251  *
252  * Only set for struct se_session's that will actually be moving I/O.
253  * eg: *NOT* discovery sessions.
254  */
255  if (se_nacl) {
256  /*
257  * If the fabric module supports an ISID based TransportID,
258  * save this value in binary from the fabric I_T Nexus now.
259  */
260  if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
261  memset(&buf[0], 0, PR_REG_ISID_LEN);
262  se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
263  &buf[0], PR_REG_ISID_LEN);
264  se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
265  }
266  kref_get(&se_nacl->acl_kref);
267 
268  spin_lock_irq(&se_nacl->nacl_sess_lock);
269  /*
270  * The se_nacl->nacl_sess pointer will be set to the
271  * last active I_T Nexus for each struct se_node_acl.
272  */
273  se_nacl->nacl_sess = se_sess;
274 
275  list_add_tail(&se_sess->sess_acl_list,
276  &se_nacl->acl_sess_list);
277  spin_unlock_irq(&se_nacl->nacl_sess_lock);
278  }
279  list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
280 
281  pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
282  se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
283 }
285 
287  struct se_portal_group *se_tpg,
288  struct se_node_acl *se_nacl,
289  struct se_session *se_sess,
290  void *fabric_sess_ptr)
291 {
292  unsigned long flags;
293 
294  spin_lock_irqsave(&se_tpg->session_lock, flags);
295  __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
296  spin_unlock_irqrestore(&se_tpg->session_lock, flags);
297 }
299 
301 {
302  struct se_session *se_sess = container_of(kref,
303  struct se_session, sess_kref);
304  struct se_portal_group *se_tpg = se_sess->se_tpg;
305 
306  se_tpg->se_tpg_tfo->close_session(se_sess);
307 }
308 
309 void target_get_session(struct se_session *se_sess)
310 {
311  kref_get(&se_sess->sess_kref);
312 }
314 
315 void target_put_session(struct se_session *se_sess)
316 {
317  struct se_portal_group *tpg = se_sess->se_tpg;
318 
319  if (tpg->se_tpg_tfo->put_session != NULL) {
320  tpg->se_tpg_tfo->put_session(se_sess);
321  return;
322  }
323  kref_put(&se_sess->sess_kref, target_release_session);
324 }
326 
327 static void target_complete_nacl(struct kref *kref)
328 {
329  struct se_node_acl *nacl = container_of(kref,
330  struct se_node_acl, acl_kref);
331 
332  complete(&nacl->acl_free_comp);
333 }
334 
335 void target_put_nacl(struct se_node_acl *nacl)
336 {
337  kref_put(&nacl->acl_kref, target_complete_nacl);
338 }
339 
341 {
342  struct se_node_acl *se_nacl;
343  unsigned long flags;
344  /*
345  * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
346  */
347  se_nacl = se_sess->se_node_acl;
348  if (se_nacl) {
349  spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
350  if (se_nacl->acl_stop == 0)
351  list_del(&se_sess->sess_acl_list);
352  /*
353  * If the session list is empty, then clear the pointer.
354  * Otherwise, set the struct se_session pointer from the tail
355  * element of the per struct se_node_acl active session list.
356  */
357  if (list_empty(&se_nacl->acl_sess_list))
358  se_nacl->nacl_sess = NULL;
359  else {
360  se_nacl->nacl_sess = container_of(
361  se_nacl->acl_sess_list.prev,
362  struct se_session, sess_acl_list);
363  }
364  spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
365  }
366 }
368 
369 void transport_free_session(struct se_session *se_sess)
370 {
371  kmem_cache_free(se_sess_cache, se_sess);
372 }
374 
376 {
377  struct se_portal_group *se_tpg = se_sess->se_tpg;
378  struct target_core_fabric_ops *se_tfo;
379  struct se_node_acl *se_nacl;
380  unsigned long flags;
381  bool comp_nacl = true;
382 
383  if (!se_tpg) {
384  transport_free_session(se_sess);
385  return;
386  }
387  se_tfo = se_tpg->se_tpg_tfo;
388 
389  spin_lock_irqsave(&se_tpg->session_lock, flags);
390  list_del(&se_sess->sess_list);
391  se_sess->se_tpg = NULL;
392  se_sess->fabric_sess_ptr = NULL;
393  spin_unlock_irqrestore(&se_tpg->session_lock, flags);
394 
395  /*
396  * Determine if we need to do extra work for this initiator node's
397  * struct se_node_acl if it had been previously dynamically generated.
398  */
399  se_nacl = se_sess->se_node_acl;
400 
401  spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
402  if (se_nacl && se_nacl->dynamic_node_acl) {
403  if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
404  list_del(&se_nacl->acl_list);
405  se_tpg->num_node_acls--;
406  spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
408  core_free_device_list_for_node(se_nacl, se_tpg);
409  se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl);
410 
411  comp_nacl = false;
412  spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
413  }
414  }
415  spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
416 
417  pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
418  se_tpg->se_tpg_tfo->get_fabric_name());
419  /*
420  * If last kref is dropping now for an explict NodeACL, awake sleeping
421  * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
422  * removal context.
423  */
424  if (se_nacl && comp_nacl == true)
425  target_put_nacl(se_nacl);
426 
427  transport_free_session(se_sess);
428 }
430 
431 /*
432  * Called with cmd->t_state_lock held.
433  */
434 static void target_remove_from_state_list(struct se_cmd *cmd)
435 {
436  struct se_device *dev = cmd->se_dev;
437  unsigned long flags;
438 
439  if (!dev)
440  return;
441 
442  if (cmd->transport_state & CMD_T_BUSY)
443  return;
444 
445  spin_lock_irqsave(&dev->execute_task_lock, flags);
446  if (cmd->state_active) {
447  list_del(&cmd->state_list);
448  cmd->state_active = false;
449  }
450  spin_unlock_irqrestore(&dev->execute_task_lock, flags);
451 }
452 
453 static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists)
454 {
455  unsigned long flags;
456 
457  spin_lock_irqsave(&cmd->t_state_lock, flags);
458  /*
459  * Determine if IOCTL context caller in requesting the stopping of this
460  * command for LUN shutdown purposes.
461  */
462  if (cmd->transport_state & CMD_T_LUN_STOP) {
463  pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
464  __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
465 
466  cmd->transport_state &= ~CMD_T_ACTIVE;
467  if (remove_from_lists)
468  target_remove_from_state_list(cmd);
469  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
470 
472  return 1;
473  }
474 
475  if (remove_from_lists) {
476  target_remove_from_state_list(cmd);
477 
478  /*
479  * Clear struct se_cmd->se_lun before the handoff to FE.
480  */
481  cmd->se_lun = NULL;
482  }
483 
484  /*
485  * Determine if frontend context caller is requesting the stopping of
486  * this command for frontend exceptions.
487  */
488  if (cmd->transport_state & CMD_T_STOP) {
489  pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
490  __func__, __LINE__,
491  cmd->se_tfo->get_task_tag(cmd));
492 
493  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
494 
496  return 1;
497  }
498 
499  cmd->transport_state &= ~CMD_T_ACTIVE;
500  if (remove_from_lists) {
501  /*
502  * Some fabric modules like tcm_loop can release
503  * their internally allocated I/O reference now and
504  * struct se_cmd now.
505  *
506  * Fabric modules are expected to return '1' here if the
507  * se_cmd being passed is released at this point,
508  * or zero if not being released.
509  */
510  if (cmd->se_tfo->check_stop_free != NULL) {
511  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
512  return cmd->se_tfo->check_stop_free(cmd);
513  }
514  }
515 
516  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
517  return 0;
518 }
519 
520 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
521 {
522  return transport_cmd_check_stop(cmd, true);
523 }
524 
525 static void transport_lun_remove_cmd(struct se_cmd *cmd)
526 {
527  struct se_lun *lun = cmd->se_lun;
528  unsigned long flags;
529 
530  if (!lun)
531  return;
532 
533  spin_lock_irqsave(&cmd->t_state_lock, flags);
534  if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
536  target_remove_from_state_list(cmd);
537  }
538  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
539 
540  spin_lock_irqsave(&lun->lun_cmd_lock, flags);
541  if (!list_empty(&cmd->se_lun_node))
542  list_del_init(&cmd->se_lun_node);
543  spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
544 }
545 
546 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
547 {
548  if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
549  transport_lun_remove_cmd(cmd);
550 
551  if (transport_cmd_check_stop_to_fabric(cmd))
552  return;
553  if (remove)
554  transport_put_cmd(cmd);
555 }
556 
557 static void target_complete_failure_work(struct work_struct *work)
558 {
559  struct se_cmd *cmd = container_of(work, struct se_cmd, work);
560 
562 }
563 
564 /*
565  * Used when asking transport to copy Sense Data from the underlying
566  * Linux/SCSI struct scsi_cmnd
567  */
568 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
569 {
570  struct se_device *dev = cmd->se_dev;
571 
572  WARN_ON(!cmd->se_lun);
573 
574  if (!dev)
575  return NULL;
576 
578  return NULL;
579 
581 
582  pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
583  dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
584  return cmd->sense_buffer;
585 }
586 
588 {
589  struct se_device *dev = cmd->se_dev;
590  int success = scsi_status == GOOD;
591  unsigned long flags;
592 
593  cmd->scsi_status = scsi_status;
594 
595 
596  spin_lock_irqsave(&cmd->t_state_lock, flags);
597  cmd->transport_state &= ~CMD_T_BUSY;
598 
599  if (dev && dev->transport->transport_complete) {
600  dev->transport->transport_complete(cmd,
601  cmd->t_data_sg,
602  transport_get_sense_buffer(cmd));
604  success = 1;
605  }
606 
607  /*
608  * See if we are waiting to complete for an exception condition.
609  */
610  if (cmd->transport_state & CMD_T_REQUEST_STOP) {
611  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
612  complete(&cmd->task_stop_comp);
613  return;
614  }
615 
616  if (!success)
618 
619  /*
620  * Check for case where an explict ABORT_TASK has been received
621  * and transport_wait_for_tasks() will be waiting for completion..
622  */
623  if (cmd->transport_state & CMD_T_ABORTED &&
624  cmd->transport_state & CMD_T_STOP) {
625  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
627  return;
628  } else if (cmd->transport_state & CMD_T_FAILED) {
630  INIT_WORK(&cmd->work, target_complete_failure_work);
631  } else {
632  INIT_WORK(&cmd->work, target_complete_ok_work);
633  }
634 
637  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
638 
639  queue_work(target_completion_wq, &cmd->work);
640 }
642 
643 static void target_add_to_state_list(struct se_cmd *cmd)
644 {
645  struct se_device *dev = cmd->se_dev;
646  unsigned long flags;
647 
648  spin_lock_irqsave(&dev->execute_task_lock, flags);
649  if (!cmd->state_active) {
650  list_add_tail(&cmd->state_list, &dev->state_list);
651  cmd->state_active = true;
652  }
653  spin_unlock_irqrestore(&dev->execute_task_lock, flags);
654 }
655 
656 /*
657  * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
658  */
659 static void transport_write_pending_qf(struct se_cmd *cmd);
660 static void transport_complete_qf(struct se_cmd *cmd);
661 
662 static void target_qf_do_work(struct work_struct *work)
663 {
664  struct se_device *dev = container_of(work, struct se_device,
665  qf_work_queue);
667  struct se_cmd *cmd, *cmd_tmp;
668 
669  spin_lock_irq(&dev->qf_cmd_lock);
670  list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
671  spin_unlock_irq(&dev->qf_cmd_lock);
672 
673  list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
674  list_del(&cmd->se_qf_node);
675  atomic_dec(&dev->dev_qf_count);
677 
678  pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
679  " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
680  (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
681  (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
682  : "UNKNOWN");
683 
684  if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
685  transport_write_pending_qf(cmd);
686  else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
687  transport_complete_qf(cmd);
688  }
689 }
690 
691 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
692 {
693  switch (cmd->data_direction) {
694  case DMA_NONE:
695  return "NONE";
696  case DMA_FROM_DEVICE:
697  return "READ";
698  case DMA_TO_DEVICE:
699  return "WRITE";
700  case DMA_BIDIRECTIONAL:
701  return "BIDI";
702  default:
703  break;
704  }
705 
706  return "UNKNOWN";
707 }
708 
710  struct se_device *dev,
711  char *b,
712  int *bl)
713 {
714  *bl += sprintf(b + *bl, "Status: ");
715  switch (dev->dev_status) {
717  *bl += sprintf(b + *bl, "ACTIVATED");
718  break;
720  *bl += sprintf(b + *bl, "DEACTIVATED");
721  break;
723  *bl += sprintf(b + *bl, "SHUTDOWN");
724  break;
727  *bl += sprintf(b + *bl, "OFFLINE");
728  break;
729  default:
730  *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
731  break;
732  }
733 
734  *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
735  *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
736  dev->se_sub_dev->se_dev_attrib.block_size,
737  dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
738  *bl += sprintf(b + *bl, " ");
739 }
740 
742  struct t10_vpd *vpd,
743  unsigned char *p_buf,
744  int p_buf_len)
745 {
746  unsigned char buf[VPD_TMP_BUF_SIZE];
747  int len;
748 
749  memset(buf, 0, VPD_TMP_BUF_SIZE);
750  len = sprintf(buf, "T10 VPD Protocol Identifier: ");
751 
752  switch (vpd->protocol_identifier) {
753  case 0x00:
754  sprintf(buf+len, "Fibre Channel\n");
755  break;
756  case 0x10:
757  sprintf(buf+len, "Parallel SCSI\n");
758  break;
759  case 0x20:
760  sprintf(buf+len, "SSA\n");
761  break;
762  case 0x30:
763  sprintf(buf+len, "IEEE 1394\n");
764  break;
765  case 0x40:
766  sprintf(buf+len, "SCSI Remote Direct Memory Access"
767  " Protocol\n");
768  break;
769  case 0x50:
770  sprintf(buf+len, "Internet SCSI (iSCSI)\n");
771  break;
772  case 0x60:
773  sprintf(buf+len, "SAS Serial SCSI Protocol\n");
774  break;
775  case 0x70:
776  sprintf(buf+len, "Automation/Drive Interface Transport"
777  " Protocol\n");
778  break;
779  case 0x80:
780  sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
781  break;
782  default:
783  sprintf(buf+len, "Unknown 0x%02x\n",
784  vpd->protocol_identifier);
785  break;
786  }
787 
788  if (p_buf)
789  strncpy(p_buf, buf, p_buf_len);
790  else
791  pr_debug("%s", buf);
792 }
793 
794 void
795 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
796 {
797  /*
798  * Check if the Protocol Identifier Valid (PIV) bit is set..
799  *
800  * from spc3r23.pdf section 7.5.1
801  */
802  if (page_83[1] & 0x80) {
803  vpd->protocol_identifier = (page_83[0] & 0xf0);
804  vpd->protocol_identifier_set = 1;
806  }
807 }
809 
811  struct t10_vpd *vpd,
812  unsigned char *p_buf,
813  int p_buf_len)
814 {
815  unsigned char buf[VPD_TMP_BUF_SIZE];
816  int ret = 0;
817  int len;
818 
819  memset(buf, 0, VPD_TMP_BUF_SIZE);
820  len = sprintf(buf, "T10 VPD Identifier Association: ");
821 
822  switch (vpd->association) {
823  case 0x00:
824  sprintf(buf+len, "addressed logical unit\n");
825  break;
826  case 0x10:
827  sprintf(buf+len, "target port\n");
828  break;
829  case 0x20:
830  sprintf(buf+len, "SCSI target device\n");
831  break;
832  default:
833  sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
834  ret = -EINVAL;
835  break;
836  }
837 
838  if (p_buf)
839  strncpy(p_buf, buf, p_buf_len);
840  else
841  pr_debug("%s", buf);
842 
843  return ret;
844 }
845 
846 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
847 {
848  /*
849  * The VPD identification association..
850  *
851  * from spc3r23.pdf Section 7.6.3.1 Table 297
852  */
853  vpd->association = (page_83[1] & 0x30);
854  return transport_dump_vpd_assoc(vpd, NULL, 0);
855 }
857 
859  struct t10_vpd *vpd,
860  unsigned char *p_buf,
861  int p_buf_len)
862 {
863  unsigned char buf[VPD_TMP_BUF_SIZE];
864  int ret = 0;
865  int len;
866 
867  memset(buf, 0, VPD_TMP_BUF_SIZE);
868  len = sprintf(buf, "T10 VPD Identifier Type: ");
869 
870  switch (vpd->device_identifier_type) {
871  case 0x00:
872  sprintf(buf+len, "Vendor specific\n");
873  break;
874  case 0x01:
875  sprintf(buf+len, "T10 Vendor ID based\n");
876  break;
877  case 0x02:
878  sprintf(buf+len, "EUI-64 based\n");
879  break;
880  case 0x03:
881  sprintf(buf+len, "NAA\n");
882  break;
883  case 0x04:
884  sprintf(buf+len, "Relative target port identifier\n");
885  break;
886  case 0x08:
887  sprintf(buf+len, "SCSI name string\n");
888  break;
889  default:
890  sprintf(buf+len, "Unsupported: 0x%02x\n",
892  ret = -EINVAL;
893  break;
894  }
895 
896  if (p_buf) {
897  if (p_buf_len < strlen(buf)+1)
898  return -EINVAL;
899  strncpy(p_buf, buf, p_buf_len);
900  } else {
901  pr_debug("%s", buf);
902  }
903 
904  return ret;
905 }
906 
907 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
908 {
909  /*
910  * The VPD identifier type..
911  *
912  * from spc3r23.pdf Section 7.6.3.1 Table 298
913  */
914  vpd->device_identifier_type = (page_83[1] & 0x0f);
915  return transport_dump_vpd_ident_type(vpd, NULL, 0);
916 }
918 
920  struct t10_vpd *vpd,
921  unsigned char *p_buf,
922  int p_buf_len)
923 {
924  unsigned char buf[VPD_TMP_BUF_SIZE];
925  int ret = 0;
926 
927  memset(buf, 0, VPD_TMP_BUF_SIZE);
928 
929  switch (vpd->device_identifier_code_set) {
930  case 0x01: /* Binary */
931  sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
932  &vpd->device_identifier[0]);
933  break;
934  case 0x02: /* ASCII */
935  sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
936  &vpd->device_identifier[0]);
937  break;
938  case 0x03: /* UTF-8 */
939  sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
940  &vpd->device_identifier[0]);
941  break;
942  default:
943  sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
944  " 0x%02x", vpd->device_identifier_code_set);
945  ret = -EINVAL;
946  break;
947  }
948 
949  if (p_buf)
950  strncpy(p_buf, buf, p_buf_len);
951  else
952  pr_debug("%s", buf);
953 
954  return ret;
955 }
956 
957 int
958 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
959 {
960  static const char hex_str[] = "0123456789abcdef";
961  int j = 0, i = 4; /* offset to start of the identifier */
962 
963  /*
964  * The VPD Code Set (encoding)
965  *
966  * from spc3r23.pdf Section 7.6.3.1 Table 296
967  */
968  vpd->device_identifier_code_set = (page_83[0] & 0x0f);
969  switch (vpd->device_identifier_code_set) {
970  case 0x01: /* Binary */
971  vpd->device_identifier[j++] =
972  hex_str[vpd->device_identifier_type];
973  while (i < (4 + page_83[3])) {
974  vpd->device_identifier[j++] =
975  hex_str[(page_83[i] & 0xf0) >> 4];
976  vpd->device_identifier[j++] =
977  hex_str[page_83[i] & 0x0f];
978  i++;
979  }
980  break;
981  case 0x02: /* ASCII */
982  case 0x03: /* UTF-8 */
983  while (i < (4 + page_83[3]))
984  vpd->device_identifier[j++] = page_83[i++];
985  break;
986  default:
987  break;
988  }
989 
990  return transport_dump_vpd_ident(vpd, NULL, 0);
991 }
993 
994 static void core_setup_task_attr_emulation(struct se_device *dev)
995 {
996  /*
997  * If this device is from Target_Core_Mod/pSCSI, disable the
998  * SAM Task Attribute emulation.
999  *
1000  * This is currently not available in upsream Linux/SCSI Target
1001  * mode code, and is assumed to be disabled while using TCM/pSCSI.
1002  */
1003  if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1005  return;
1006  }
1007 
1009  pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1010  " device\n", dev->transport->name,
1011  dev->transport->get_device_rev(dev));
1012 }
1013 
1014 static void scsi_dump_inquiry(struct se_device *dev)
1015 {
1016  struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1017  char buf[17];
1018  int i, device_type;
1019  /*
1020  * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1021  */
1022  for (i = 0; i < 8; i++)
1023  if (wwn->vendor[i] >= 0x20)
1024  buf[i] = wwn->vendor[i];
1025  else
1026  buf[i] = ' ';
1027  buf[i] = '\0';
1028  pr_debug(" Vendor: %s\n", buf);
1029 
1030  for (i = 0; i < 16; i++)
1031  if (wwn->model[i] >= 0x20)
1032  buf[i] = wwn->model[i];
1033  else
1034  buf[i] = ' ';
1035  buf[i] = '\0';
1036  pr_debug(" Model: %s\n", buf);
1037 
1038  for (i = 0; i < 4; i++)
1039  if (wwn->revision[i] >= 0x20)
1040  buf[i] = wwn->revision[i];
1041  else
1042  buf[i] = ' ';
1043  buf[i] = '\0';
1044  pr_debug(" Revision: %s\n", buf);
1045 
1046  device_type = dev->transport->get_device_type(dev);
1047  pr_debug(" Type: %s ", scsi_device_type(device_type));
1048  pr_debug(" ANSI SCSI revision: %02x\n",
1049  dev->transport->get_device_rev(dev));
1050 }
1051 
1053  struct se_hba *hba,
1054  struct se_subsystem_api *transport,
1055  struct se_subsystem_dev *se_dev,
1056  u32 device_flags,
1057  void *transport_dev,
1058  struct se_dev_limits *dev_limits,
1059  const char *inquiry_prod,
1060  const char *inquiry_rev)
1061 {
1062  int force_pt;
1063  struct se_device *dev;
1064 
1065  dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1066  if (!dev) {
1067  pr_err("Unable to allocate memory for se_dev_t\n");
1068  return NULL;
1069  }
1070 
1071  dev->dev_flags = device_flags;
1073  dev->dev_ptr = transport_dev;
1074  dev->se_hba = hba;
1075  dev->se_sub_dev = se_dev;
1076  dev->transport = transport;
1077  INIT_LIST_HEAD(&dev->dev_list);
1078  INIT_LIST_HEAD(&dev->dev_sep_list);
1079  INIT_LIST_HEAD(&dev->dev_tmr_list);
1080  INIT_LIST_HEAD(&dev->delayed_cmd_list);
1081  INIT_LIST_HEAD(&dev->state_list);
1082  INIT_LIST_HEAD(&dev->qf_cmd_list);
1088  spin_lock_init(&dev->se_tmr_lock);
1089  spin_lock_init(&dev->qf_cmd_lock);
1090  atomic_set(&dev->dev_ordered_id, 0);
1091 
1092  se_dev_set_default_attribs(dev, dev_limits);
1093 
1095  dev->creation_time = get_jiffies_64();
1096  spin_lock_init(&dev->stats_lock);
1097 
1098  spin_lock(&hba->device_lock);
1099  list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1100  hba->dev_count++;
1101  spin_unlock(&hba->device_lock);
1102  /*
1103  * Setup the SAM Task Attribute emulation for struct se_device
1104  */
1105  core_setup_task_attr_emulation(dev);
1106  /*
1107  * Force PR and ALUA passthrough emulation with internal object use.
1108  */
1109  force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1110  /*
1111  * Setup the Reservations infrastructure for struct se_device
1112  */
1113  core_setup_reservations(dev, force_pt);
1114  /*
1115  * Setup the Asymmetric Logical Unit Assignment for struct se_device
1116  */
1117  if (core_setup_alua(dev, force_pt) < 0)
1118  goto err_dev_list;
1119 
1120  /*
1121  * Startup the struct se_device processing thread
1122  */
1123  dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
1124  dev->transport->name);
1125  if (!dev->tmr_wq) {
1126  pr_err("Unable to create tmr workqueue for %s\n",
1127  dev->transport->name);
1128  goto err_dev_list;
1129  }
1130  /*
1131  * Setup work_queue for QUEUE_FULL
1132  */
1133  INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1134  /*
1135  * Preload the initial INQUIRY const values if we are doing
1136  * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1137  * passthrough because this is being provided by the backend LLD.
1138  * This is required so that transport_get_inquiry() copies these
1139  * originals once back into DEV_T10_WWN(dev) for the virtual device
1140  * setup.
1141  */
1142  if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1143  if (!inquiry_prod || !inquiry_rev) {
1144  pr_err("All non TCM/pSCSI plugins require"
1145  " INQUIRY consts\n");
1146  goto err_wq;
1147  }
1148 
1149  strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1150  strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
1151  strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1152  }
1153  scsi_dump_inquiry(dev);
1154 
1155  return dev;
1156 
1157 err_wq:
1158  destroy_workqueue(dev->tmr_wq);
1159 err_dev_list:
1160  spin_lock(&hba->device_lock);
1161  list_del(&dev->dev_list);
1162  hba->dev_count--;
1163  spin_unlock(&hba->device_lock);
1164 
1166 
1167  kfree(dev);
1168 
1169  return NULL;
1170 }
1172 
1173 int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1174 {
1175  struct se_device *dev = cmd->se_dev;
1176 
1177  if (cmd->unknown_data_length) {
1178  cmd->data_length = size;
1179  } else if (size != cmd->data_length) {
1180  pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
1181  " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1182  " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
1183  cmd->data_length, size, cmd->t_task_cdb[0]);
1184 
1185  if (cmd->data_direction == DMA_TO_DEVICE) {
1186  pr_err("Rejecting underflow/overflow"
1187  " WRITE data\n");
1188  goto out_invalid_cdb_field;
1189  }
1190  /*
1191  * Reject READ_* or WRITE_* with overflow/underflow for
1192  * type SCF_SCSI_DATA_CDB.
1193  */
1194  if (dev->se_sub_dev->se_dev_attrib.block_size != 512) {
1195  pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1196  " CDB on non 512-byte sector setup subsystem"
1197  " plugin: %s\n", dev->transport->name);
1198  /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
1199  goto out_invalid_cdb_field;
1200  }
1201  /*
1202  * For the overflow case keep the existing fabric provided
1203  * ->data_length. Otherwise for the underflow case, reset
1204  * ->data_length to the smaller SCSI expected data transfer
1205  * length.
1206  */
1207  if (size > cmd->data_length) {
1209  cmd->residual_count = (size - cmd->data_length);
1210  } else {
1212  cmd->residual_count = (cmd->data_length - size);
1213  cmd->data_length = size;
1214  }
1215  }
1216 
1217  return 0;
1218 
1219 out_invalid_cdb_field:
1222  return -EINVAL;
1223 }
1224 
1225 /*
1226  * Used by fabric modules containing a local struct se_cmd within their
1227  * fabric dependent per I/O descriptor.
1228  */
1230  struct se_cmd *cmd,
1231  struct target_core_fabric_ops *tfo,
1232  struct se_session *se_sess,
1233  u32 data_length,
1234  int data_direction,
1235  int task_attr,
1236  unsigned char *sense_buffer)
1237 {
1238  INIT_LIST_HEAD(&cmd->se_lun_node);
1239  INIT_LIST_HEAD(&cmd->se_delayed_node);
1240  INIT_LIST_HEAD(&cmd->se_qf_node);
1241  INIT_LIST_HEAD(&cmd->se_cmd_list);
1242  INIT_LIST_HEAD(&cmd->state_list);
1243  init_completion(&cmd->transport_lun_fe_stop_comp);
1244  init_completion(&cmd->transport_lun_stop_comp);
1245  init_completion(&cmd->t_transport_stop_comp);
1246  init_completion(&cmd->cmd_wait_comp);
1247  init_completion(&cmd->task_stop_comp);
1250 
1251  cmd->se_tfo = tfo;
1252  cmd->se_sess = se_sess;
1253  cmd->data_length = data_length;
1255  cmd->sam_task_attr = task_attr;
1256  cmd->sense_buffer = sense_buffer;
1257 
1258  cmd->state_active = false;
1259 }
1261 
1262 static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1263 {
1264  /*
1265  * Check if SAM Task Attribute emulation is enabled for this
1266  * struct se_device storage object
1267  */
1268  if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1269  return 0;
1270 
1271  if (cmd->sam_task_attr == MSG_ACA_TAG) {
1272  pr_debug("SAM Task Attribute ACA"
1273  " emulation is not supported\n");
1274  return -EINVAL;
1275  }
1276  /*
1277  * Used to determine when ORDERED commands should go from
1278  * Dormant to Active status.
1279  */
1280  cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1282  pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1283  cmd->se_ordered_id, cmd->sam_task_attr,
1284  cmd->se_dev->transport->name);
1285  return 0;
1286 }
1287 
1288 /* target_setup_cmd_from_cdb():
1289  *
1290  * Called from fabric RX Thread.
1291  */
1293  struct se_cmd *cmd,
1294  unsigned char *cdb)
1295 {
1296  struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
1297  u32 pr_reg_type = 0;
1298  u8 alua_ascq = 0;
1299  unsigned long flags;
1300  int ret;
1301 
1302  /*
1303  * Ensure that the received CDB is less than the max (252 + 8) bytes
1304  * for VARIABLE_LENGTH_CMD
1305  */
1306  if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1307  pr_err("Received SCSI CDB with command_size: %d that"
1308  " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1309  scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1312  return -EINVAL;
1313  }
1314  /*
1315  * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1316  * allocate the additional extended CDB buffer now.. Otherwise
1317  * setup the pointer from __t_task_cdb to t_task_cdb.
1318  */
1319  if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1320  cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1321  GFP_KERNEL);
1322  if (!cmd->t_task_cdb) {
1323  pr_err("Unable to allocate cmd->t_task_cdb"
1324  " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1325  scsi_command_size(cdb),
1326  (unsigned long)sizeof(cmd->__t_task_cdb));
1328  cmd->scsi_sense_reason =
1330  return -ENOMEM;
1331  }
1332  } else
1333  cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1334  /*
1335  * Copy the original CDB into cmd->
1336  */
1337  memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1338 
1339  /*
1340  * Check for an existing UNIT ATTENTION condition
1341  */
1342  if (core_scsi3_ua_check(cmd, cdb) < 0) {
1345  return -EINVAL;
1346  }
1347 
1348  ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
1349  if (ret != 0) {
1350  /*
1351  * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
1352  * The ALUA additional sense code qualifier (ASCQ) is determined
1353  * by the ALUA primary or secondary access state..
1354  */
1355  if (ret > 0) {
1356  pr_debug("[%s]: ALUA TG Port not available, "
1357  "SenseKey: NOT_READY, ASC/ASCQ: "
1358  "0x04/0x%02x\n",
1359  cmd->se_tfo->get_fabric_name(), alua_ascq);
1360 
1361  transport_set_sense_codes(cmd, 0x04, alua_ascq);
1364  return -EINVAL;
1365  }
1368  return -EINVAL;
1369  }
1370 
1371  /*
1372  * Check status for SPC-3 Persistent Reservations
1373  */
1374  if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) {
1375  if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
1376  cmd, cdb, pr_reg_type) != 0) {
1381  return -EBUSY;
1382  }
1383  /*
1384  * This means the CDB is allowed for the SCSI Initiator port
1385  * when said port is *NOT* holding the legacy SPC-2 or
1386  * SPC-3 Persistent Reservation.
1387  */
1388  }
1389 
1390  ret = cmd->se_dev->transport->parse_cdb(cmd);
1391  if (ret < 0)
1392  return ret;
1393 
1394  spin_lock_irqsave(&cmd->t_state_lock, flags);
1396  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1397 
1398  /*
1399  * Check for SAM Task Attribute Emulation
1400  */
1401  if (transport_check_alloc_task_attr(cmd) < 0) {
1404  return -EINVAL;
1405  }
1406  spin_lock(&cmd->se_lun->lun_sep_lock);
1407  if (cmd->se_lun->lun_sep)
1408  cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1409  spin_unlock(&cmd->se_lun->lun_sep_lock);
1410  return 0;
1411 }
1413 
1414 /*
1415  * Used by fabric module frontends to queue tasks directly.
1416  * Many only be used from process context only
1417  */
1419  struct se_cmd *cmd)
1420 {
1421  int ret;
1422 
1423  if (!cmd->se_lun) {
1424  dump_stack();
1425  pr_err("cmd->se_lun is NULL\n");
1426  return -EINVAL;
1427  }
1428  if (in_interrupt()) {
1429  dump_stack();
1430  pr_err("transport_generic_handle_cdb cannot be called"
1431  " from interrupt context\n");
1432  return -EINVAL;
1433  }
1434  /*
1435  * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
1436  * outstanding descriptors are handled correctly during shutdown via
1437  * transport_wait_for_tasks()
1438  *
1439  * Also, we don't take cmd->t_state_lock here as we only expect
1440  * this to be called for initial descriptor submission.
1441  */
1442  cmd->t_state = TRANSPORT_NEW_CMD;
1443  cmd->transport_state |= CMD_T_ACTIVE;
1444 
1445  /*
1446  * transport_generic_new_cmd() is already handling QUEUE_FULL,
1447  * so follow TRANSPORT_NEW_CMD processing thread context usage
1448  * and call transport_generic_request_failure() if necessary..
1449  */
1450  ret = transport_generic_new_cmd(cmd);
1451  if (ret < 0)
1453 
1454  return 0;
1455 }
1457 
1458 /*
1459  * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
1460  * se_cmd + use pre-allocated SGL memory.
1461  *
1462  * @se_cmd: command descriptor to submit
1463  * @se_sess: associated se_sess for endpoint
1464  * @cdb: pointer to SCSI CDB
1465  * @sense: pointer to SCSI sense buffer
1466  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1467  * @data_length: fabric expected data transfer length
1468  * @task_addr: SAM task attribute
1469  * @data_dir: DMA data direction
1470  * @flags: flags for command submission from target_sc_flags_tables
1471  * @sgl: struct scatterlist memory for unidirectional mapping
1472  * @sgl_count: scatterlist count for unidirectional mapping
1473  * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
1474  * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
1475  *
1476  * Returns non zero to signal active I/O shutdown failure. All other
1477  * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1478  * but still return zero here.
1479  *
1480  * This may only be called from process context, and also currently
1481  * assumes internal allocation of fabric payload buffer by target-core.
1482  */
1484  unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
1485  u32 data_length, int task_attr, int data_dir, int flags,
1486  struct scatterlist *sgl, u32 sgl_count,
1487  struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1488 {
1489  struct se_portal_group *se_tpg;
1490  int rc;
1491 
1492  se_tpg = se_sess->se_tpg;
1493  BUG_ON(!se_tpg);
1494  BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1495  BUG_ON(in_interrupt());
1496  /*
1497  * Initialize se_cmd for target operation. From this point
1498  * exceptions are handled by sending exception status via
1499  * target_core_fabric_ops->queue_status() callback
1500  */
1501  transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1502  data_length, data_dir, task_attr, sense);
1503  if (flags & TARGET_SCF_UNKNOWN_SIZE)
1504  se_cmd->unknown_data_length = 1;
1505  /*
1506  * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1507  * se_sess->sess_cmd_list. A second kref_get here is necessary
1508  * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1509  * kref_put() to happen during fabric packet acknowledgement.
1510  */
1511  rc = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
1512  if (rc)
1513  return rc;
1514  /*
1515  * Signal bidirectional data payloads to target-core
1516  */
1517  if (flags & TARGET_SCF_BIDI_OP)
1518  se_cmd->se_cmd_flags |= SCF_BIDI;
1519  /*
1520  * Locate se_lun pointer and attach it to struct se_cmd
1521  */
1522  if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
1524  se_cmd->scsi_sense_reason, 0);
1525  target_put_sess_cmd(se_sess, se_cmd);
1526  return 0;
1527  }
1528 
1529  rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1530  if (rc != 0) {
1532  return 0;
1533  }
1534  /*
1535  * When a non zero sgl_count has been passed perform SGL passthrough
1536  * mapping for pre-allocated fabric memory instead of having target
1537  * core perform an internal SGL allocation..
1538  */
1539  if (sgl_count != 0) {
1540  BUG_ON(!sgl);
1541 
1542  /*
1543  * A work-around for tcm_loop as some userspace code via
1544  * scsi-generic do not memset their associated read buffers,
1545  * so go ahead and do that here for type non-data CDBs. Also
1546  * note that this is currently guaranteed to be a single SGL
1547  * for this case by target core in target_setup_cmd_from_cdb()
1548  * -> transport_generic_cmd_sequencer().
1549  */
1550  if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1551  se_cmd->data_direction == DMA_FROM_DEVICE) {
1552  unsigned char *buf = NULL;
1553 
1554  if (sgl)
1555  buf = kmap(sg_page(sgl)) + sgl->offset;
1556 
1557  if (buf) {
1558  memset(buf, 0, sgl->length);
1559  kunmap(sg_page(sgl));
1560  }
1561  }
1562 
1563  rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1564  sgl_bidi, sgl_bidi_count);
1565  if (rc != 0) {
1567  return 0;
1568  }
1569  }
1570  /*
1571  * Check if we need to delay processing because of ALUA
1572  * Active/NonOptimized primary access state..
1573  */
1575 
1577  return 0;
1578 }
1580 
1581 /*
1582  * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1583  *
1584  * @se_cmd: command descriptor to submit
1585  * @se_sess: associated se_sess for endpoint
1586  * @cdb: pointer to SCSI CDB
1587  * @sense: pointer to SCSI sense buffer
1588  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1589  * @data_length: fabric expected data transfer length
1590  * @task_addr: SAM task attribute
1591  * @data_dir: DMA data direction
1592  * @flags: flags for command submission from target_sc_flags_tables
1593  *
1594  * Returns non zero to signal active I/O shutdown failure. All other
1595  * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1596  * but still return zero here.
1597  *
1598  * This may only be called from process context, and also currently
1599  * assumes internal allocation of fabric payload buffer by target-core.
1600  *
1601  * It also assumes interal target core SGL memory allocation.
1602  */
1603 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1604  unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
1605  u32 data_length, int task_attr, int data_dir, int flags)
1606 {
1607  return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1608  unpacked_lun, data_length, task_attr, data_dir,
1609  flags, NULL, 0, NULL, 0);
1610 }
1612 
1613 static void target_complete_tmr_failure(struct work_struct *work)
1614 {
1615  struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1616 
1617  se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1618  se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1619 }
1620 
1638 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1639  unsigned char *sense, u32 unpacked_lun,
1640  void *fabric_tmr_ptr, unsigned char tm_type,
1641  gfp_t gfp, unsigned int tag, int flags)
1642 {
1643  struct se_portal_group *se_tpg;
1644  int ret;
1645 
1646  se_tpg = se_sess->se_tpg;
1647  BUG_ON(!se_tpg);
1648 
1649  transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1650  0, DMA_NONE, MSG_SIMPLE_TAG, sense);
1651  /*
1652  * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1653  * allocation failure.
1654  */
1655  ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1656  if (ret < 0)
1657  return -ENOMEM;
1658 
1659  if (tm_type == TMR_ABORT_TASK)
1660  se_cmd->se_tmr_req->ref_task_tag = tag;
1661 
1662  /* See target_submit_cmd for commentary */
1663  ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
1664  if (ret) {
1666  return ret;
1667  }
1668 
1669  ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1670  if (ret) {
1671  /*
1672  * For callback during failure handling, push this work off
1673  * to process context with TMR_LUN_DOES_NOT_EXIST status.
1674  */
1675  INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1676  schedule_work(&se_cmd->work);
1677  return 0;
1678  }
1680  return 0;
1681 }
1683 
1684 /*
1685  * If the cmd is active, request it to be stopped and sleep until it
1686  * has completed.
1687  */
1688 bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
1689 {
1690  bool was_active = false;
1691 
1692  if (cmd->transport_state & CMD_T_BUSY) {
1694  spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1695 
1696  pr_debug("cmd %p waiting to complete\n", cmd);
1698  pr_debug("cmd %p stopped successfully\n", cmd);
1699 
1700  spin_lock_irqsave(&cmd->t_state_lock, *flags);
1702  cmd->transport_state &= ~CMD_T_BUSY;
1703  was_active = true;
1704  }
1705 
1706  return was_active;
1707 }
1708 
1709 /*
1710  * Handle SAM-esque emulation for generic transport request failures.
1711  */
1712 void transport_generic_request_failure(struct se_cmd *cmd)
1713 {
1714  int ret = 0;
1715 
1716  pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1717  " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1718  cmd->t_task_cdb[0]);
1719  pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1720  cmd->se_tfo->get_cmd_state(cmd),
1721  cmd->t_state, cmd->scsi_sense_reason);
1722  pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1723  (cmd->transport_state & CMD_T_ACTIVE) != 0,
1724  (cmd->transport_state & CMD_T_STOP) != 0,
1725  (cmd->transport_state & CMD_T_SENT) != 0);
1726 
1727  /*
1728  * For SAM Task Attribute emulation for failed struct se_cmd
1729  */
1730  if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1731  transport_complete_task_attr(cmd);
1732 
1733  switch (cmd->scsi_sense_reason) {
1734  case TCM_NON_EXISTENT_LUN:
1736  case TCM_INVALID_CDB_FIELD:
1739  case TCM_UNKNOWN_MODE_PAGE:
1740  case TCM_WRITE_PROTECTED:
1745  break;
1747  /*
1748  * No SENSE Data payload for this case, set SCSI Status
1749  * and queue the response to $FABRIC_MOD.
1750  *
1751  * Uses linux/include/scsi/scsi.h SAM status codes defs
1752  */
1754  /*
1755  * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1756  * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1757  * CONFLICT STATUS.
1758  *
1759  * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1760  */
1761  if (cmd->se_sess &&
1762  cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
1763  core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
1764  cmd->orig_fe_lun, 0x2C,
1766 
1767  ret = cmd->se_tfo->queue_status(cmd);
1768  if (ret == -EAGAIN || ret == -ENOMEM)
1769  goto queue_full;
1770  goto check_stop;
1771  default:
1772  pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1773  cmd->t_task_cdb[0], cmd->scsi_sense_reason);
1775  break;
1776  }
1777 
1779  cmd->scsi_sense_reason, 0);
1780  if (ret == -EAGAIN || ret == -ENOMEM)
1781  goto queue_full;
1782 
1783 check_stop:
1784  transport_lun_remove_cmd(cmd);
1785  if (!transport_cmd_check_stop_to_fabric(cmd))
1786  ;
1787  return;
1788 
1789 queue_full:
1791  transport_handle_queue_full(cmd, cmd->se_dev);
1792 }
1794 
1795 static void __target_execute_cmd(struct se_cmd *cmd)
1796 {
1797  int error = 0;
1798 
1799  spin_lock_irq(&cmd->t_state_lock);
1801  spin_unlock_irq(&cmd->t_state_lock);
1802 
1803  if (cmd->execute_cmd)
1804  error = cmd->execute_cmd(cmd);
1805 
1806  if (error) {
1807  spin_lock_irq(&cmd->t_state_lock);
1809  spin_unlock_irq(&cmd->t_state_lock);
1810 
1812  }
1813 }
1814 
1815 void target_execute_cmd(struct se_cmd *cmd)
1816 {
1817  struct se_device *dev = cmd->se_dev;
1818 
1819  /*
1820  * If the received CDB has aleady been aborted stop processing it here.
1821  */
1822  if (transport_check_aborted_status(cmd, 1)) {
1824  return;
1825  }
1826 
1827  /*
1828  * Determine if IOCTL context caller in requesting the stopping of this
1829  * command for LUN shutdown purposes.
1830  */
1831  spin_lock_irq(&cmd->t_state_lock);
1832  if (cmd->transport_state & CMD_T_LUN_STOP) {
1833  pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
1834  __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
1835 
1836  cmd->transport_state &= ~CMD_T_ACTIVE;
1837  spin_unlock_irq(&cmd->t_state_lock);
1839  return;
1840  }
1841  /*
1842  * Determine if frontend context caller is requesting the stopping of
1843  * this command for frontend exceptions.
1844  */
1845  if (cmd->transport_state & CMD_T_STOP) {
1846  pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
1847  __func__, __LINE__,
1848  cmd->se_tfo->get_task_tag(cmd));
1849 
1850  spin_unlock_irq(&cmd->t_state_lock);
1852  return;
1853  }
1854 
1856  spin_unlock_irq(&cmd->t_state_lock);
1857 
1859  goto execute;
1860 
1861  /*
1862  * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1863  * to allow the passed struct se_cmd list of tasks to the front of the list.
1864  */
1865  switch (cmd->sam_task_attr) {
1866  case MSG_HEAD_TAG:
1867  pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
1868  "se_ordered_id: %u\n",
1869  cmd->t_task_cdb[0], cmd->se_ordered_id);
1870  goto execute;
1871  case MSG_ORDERED_TAG:
1874 
1875  pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
1876  " se_ordered_id: %u\n",
1877  cmd->t_task_cdb[0], cmd->se_ordered_id);
1878 
1879  /*
1880  * Execute an ORDERED command if no other older commands
1881  * exist that need to be completed first.
1882  */
1883  if (!atomic_read(&dev->simple_cmds))
1884  goto execute;
1885  break;
1886  default:
1887  /*
1888  * For SIMPLE and UNTAGGED Task Attribute commands
1889  */
1890  atomic_inc(&dev->simple_cmds);
1892  break;
1893  }
1894 
1895  if (atomic_read(&dev->dev_ordered_sync) != 0) {
1896  spin_lock(&dev->delayed_cmd_lock);
1898  spin_unlock(&dev->delayed_cmd_lock);
1899 
1900  pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
1901  " delayed CMD list, se_ordered_id: %u\n",
1902  cmd->t_task_cdb[0], cmd->sam_task_attr,
1903  cmd->se_ordered_id);
1904  return;
1905  }
1906 
1907 execute:
1908  /*
1909  * Otherwise, no ORDERED task attributes exist..
1910  */
1911  __target_execute_cmd(cmd);
1912 }
1914 
1915 /*
1916  * Process all commands up to the last received ORDERED task attribute which
1917  * requires another blocking boundary
1918  */
1919 static void target_restart_delayed_cmds(struct se_device *dev)
1920 {
1921  for (;;) {
1922  struct se_cmd *cmd;
1923 
1924  spin_lock(&dev->delayed_cmd_lock);
1925  if (list_empty(&dev->delayed_cmd_list)) {
1926  spin_unlock(&dev->delayed_cmd_lock);
1927  break;
1928  }
1929 
1930  cmd = list_entry(dev->delayed_cmd_list.next,
1931  struct se_cmd, se_delayed_node);
1932  list_del(&cmd->se_delayed_node);
1933  spin_unlock(&dev->delayed_cmd_lock);
1934 
1935  __target_execute_cmd(cmd);
1936 
1937  if (cmd->sam_task_attr == MSG_ORDERED_TAG)
1938  break;
1939  }
1940 }
1941 
1942 /*
1943  * Called from I/O completion to determine which dormant/delayed
1944  * and ordered cmds need to have their tasks added to the execution queue.
1945  */
1946 static void transport_complete_task_attr(struct se_cmd *cmd)
1947 {
1948  struct se_device *dev = cmd->se_dev;
1949 
1950  if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
1951  atomic_dec(&dev->simple_cmds);
1953  dev->dev_cur_ordered_id++;
1954  pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
1955  " SIMPLE: %u\n", dev->dev_cur_ordered_id,
1956  cmd->se_ordered_id);
1957  } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
1958  dev->dev_cur_ordered_id++;
1959  pr_debug("Incremented dev_cur_ordered_id: %u for"
1960  " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
1961  cmd->se_ordered_id);
1962  } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
1965 
1966  dev->dev_cur_ordered_id++;
1967  pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
1968  " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
1969  }
1970 
1971  target_restart_delayed_cmds(dev);
1972 }
1973 
1974 static void transport_complete_qf(struct se_cmd *cmd)
1975 {
1976  int ret = 0;
1977 
1978  if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1979  transport_complete_task_attr(cmd);
1980 
1982  ret = cmd->se_tfo->queue_status(cmd);
1983  if (ret)
1984  goto out;
1985  }
1986 
1987  switch (cmd->data_direction) {
1988  case DMA_FROM_DEVICE:
1989  ret = cmd->se_tfo->queue_data_in(cmd);
1990  break;
1991  case DMA_TO_DEVICE:
1992  if (cmd->t_bidi_data_sg) {
1993  ret = cmd->se_tfo->queue_data_in(cmd);
1994  if (ret < 0)
1995  break;
1996  }
1997  /* Fall through for DMA_TO_DEVICE */
1998  case DMA_NONE:
1999  ret = cmd->se_tfo->queue_status(cmd);
2000  break;
2001  default:
2002  break;
2003  }
2004 
2005 out:
2006  if (ret < 0) {
2007  transport_handle_queue_full(cmd, cmd->se_dev);
2008  return;
2009  }
2010  transport_lun_remove_cmd(cmd);
2011  transport_cmd_check_stop_to_fabric(cmd);
2012 }
2013 
2014 static void transport_handle_queue_full(
2015  struct se_cmd *cmd,
2016  struct se_device *dev)
2017 {
2018  spin_lock_irq(&dev->qf_cmd_lock);
2019  list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2020  atomic_inc(&dev->dev_qf_count);
2022  spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
2023 
2024  schedule_work(&cmd->se_dev->qf_work_queue);
2025 }
2026 
2027 static void target_complete_ok_work(struct work_struct *work)
2028 {
2029  struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2030  int ret;
2031 
2032  /*
2033  * Check if we need to move delayed/dormant tasks from cmds on the
2034  * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
2035  * Attribute.
2036  */
2037  if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2038  transport_complete_task_attr(cmd);
2039  /*
2040  * Check to schedule QUEUE_FULL work, or execute an existing
2041  * cmd->transport_qf_callback()
2042  */
2043  if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2044  schedule_work(&cmd->se_dev->qf_work_queue);
2045 
2046  /*
2047  * Check if we need to send a sense buffer from
2048  * the struct se_cmd in question.
2049  */
2051  WARN_ON(!cmd->scsi_status);
2053  cmd, 0, 1);
2054  if (ret == -EAGAIN || ret == -ENOMEM)
2055  goto queue_full;
2056 
2057  transport_lun_remove_cmd(cmd);
2058  transport_cmd_check_stop_to_fabric(cmd);
2059  return;
2060  }
2061  /*
2062  * Check for a callback, used by amongst other things
2063  * XDWRITE_READ_10 emulation.
2064  */
2065  if (cmd->transport_complete_callback)
2066  cmd->transport_complete_callback(cmd);
2067 
2068  switch (cmd->data_direction) {
2069  case DMA_FROM_DEVICE:
2070  spin_lock(&cmd->se_lun->lun_sep_lock);
2071  if (cmd->se_lun->lun_sep) {
2072  cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
2073  cmd->data_length;
2074  }
2075  spin_unlock(&cmd->se_lun->lun_sep_lock);
2076 
2077  ret = cmd->se_tfo->queue_data_in(cmd);
2078  if (ret == -EAGAIN || ret == -ENOMEM)
2079  goto queue_full;
2080  break;
2081  case DMA_TO_DEVICE:
2082  spin_lock(&cmd->se_lun->lun_sep_lock);
2083  if (cmd->se_lun->lun_sep) {
2084  cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
2085  cmd->data_length;
2086  }
2087  spin_unlock(&cmd->se_lun->lun_sep_lock);
2088  /*
2089  * Check if we need to send READ payload for BIDI-COMMAND
2090  */
2091  if (cmd->t_bidi_data_sg) {
2092  spin_lock(&cmd->se_lun->lun_sep_lock);
2093  if (cmd->se_lun->lun_sep) {
2094  cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
2095  cmd->data_length;
2096  }
2097  spin_unlock(&cmd->se_lun->lun_sep_lock);
2098  ret = cmd->se_tfo->queue_data_in(cmd);
2099  if (ret == -EAGAIN || ret == -ENOMEM)
2100  goto queue_full;
2101  break;
2102  }
2103  /* Fall through for DMA_TO_DEVICE */
2104  case DMA_NONE:
2105  ret = cmd->se_tfo->queue_status(cmd);
2106  if (ret == -EAGAIN || ret == -ENOMEM)
2107  goto queue_full;
2108  break;
2109  default:
2110  break;
2111  }
2112 
2113  transport_lun_remove_cmd(cmd);
2114  transport_cmd_check_stop_to_fabric(cmd);
2115  return;
2116 
2117 queue_full:
2118  pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2119  " data_direction: %d\n", cmd, cmd->data_direction);
2121  transport_handle_queue_full(cmd, cmd->se_dev);
2122 }
2123 
2124 static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
2125 {
2126  struct scatterlist *sg;
2127  int count;
2128 
2129  for_each_sg(sgl, sg, nents, count)
2130  __free_page(sg_page(sg));
2131 
2132  kfree(sgl);
2133 }
2134 
2135 static inline void transport_free_pages(struct se_cmd *cmd)
2136 {
2137  if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
2138  return;
2139 
2140  transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2141  cmd->t_data_sg = NULL;
2142  cmd->t_data_nents = 0;
2143 
2144  transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2145  cmd->t_bidi_data_sg = NULL;
2146  cmd->t_bidi_data_nents = 0;
2147 }
2148 
2156 static void transport_release_cmd(struct se_cmd *cmd)
2157 {
2158  BUG_ON(!cmd->se_tfo);
2159 
2160  if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2162  if (cmd->t_task_cdb != cmd->__t_task_cdb)
2163  kfree(cmd->t_task_cdb);
2164  /*
2165  * If this cmd has been setup with target_get_sess_cmd(), drop
2166  * the kref and call ->release_cmd() in kref callback.
2167  */
2168  if (cmd->check_release != 0) {
2169  target_put_sess_cmd(cmd->se_sess, cmd);
2170  return;
2171  }
2172  cmd->se_tfo->release_cmd(cmd);
2173 }
2174 
2181 static void transport_put_cmd(struct se_cmd *cmd)
2182 {
2183  unsigned long flags;
2184 
2185  spin_lock_irqsave(&cmd->t_state_lock, flags);
2186  if (atomic_read(&cmd->t_fe_count)) {
2187  if (!atomic_dec_and_test(&cmd->t_fe_count))
2188  goto out_busy;
2189  }
2190 
2191  if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
2193  target_remove_from_state_list(cmd);
2194  }
2195  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2196 
2197  transport_free_pages(cmd);
2198  transport_release_cmd(cmd);
2199  return;
2200 out_busy:
2201  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2202 }
2203 
2204 /*
2205  * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
2206  * allocating in the core.
2207  * @cmd: Associated se_cmd descriptor
2208  * @mem: SGL style memory for TCM WRITE / READ
2209  * @sg_mem_num: Number of SGL elements
2210  * @mem_bidi_in: SGL style memory for TCM BIDI READ
2211  * @sg_mem_bidi_num: Number of BIDI READ SGL elements
2212  *
2213  * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
2214  * of parameters.
2215  */
2217  struct se_cmd *cmd,
2218  struct scatterlist *sgl,
2219  u32 sgl_count,
2220  struct scatterlist *sgl_bidi,
2221  u32 sgl_bidi_count)
2222 {
2223  if (!sgl || !sgl_count)
2224  return 0;
2225 
2226  /*
2227  * Reject SCSI data overflow with map_mem_to_cmd() as incoming
2228  * scatterlists already have been set to follow what the fabric
2229  * passes for the original expected data transfer length.
2230  */
2231  if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
2232  pr_warn("Rejecting SCSI DATA overflow for fabric using"
2233  " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
2236  return -EINVAL;
2237  }
2238 
2239  cmd->t_data_sg = sgl;
2240  cmd->t_data_nents = sgl_count;
2241 
2242  if (sgl_bidi && sgl_bidi_count) {
2243  cmd->t_bidi_data_sg = sgl_bidi;
2244  cmd->t_bidi_data_nents = sgl_bidi_count;
2245  }
2247  return 0;
2248 }
2250 
2251 void *transport_kmap_data_sg(struct se_cmd *cmd)
2252 {
2253  struct scatterlist *sg = cmd->t_data_sg;
2254  struct page **pages;
2255  int i;
2256 
2257  /*
2258  * We need to take into account a possible offset here for fabrics like
2259  * tcm_loop who may be using a contig buffer from the SCSI midlayer for
2260  * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
2261  */
2262  if (!cmd->t_data_nents)
2263  return NULL;
2264 
2265  BUG_ON(!sg);
2266  if (cmd->t_data_nents == 1)
2267  return kmap(sg_page(sg)) + sg->offset;
2268 
2269  /* >1 page. use vmap */
2270  pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
2271  if (!pages) {
2273  return NULL;
2274  }
2275 
2276  /* convert sg[] to pages[] */
2277  for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2278  pages[i] = sg_page(sg);
2279  }
2280 
2281  cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
2282  kfree(pages);
2283  if (!cmd->t_data_vmap) {
2285  return NULL;
2286  }
2287 
2288  return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2289 }
2291 
2292 void transport_kunmap_data_sg(struct se_cmd *cmd)
2293 {
2294  if (!cmd->t_data_nents) {
2295  return;
2296  } else if (cmd->t_data_nents == 1) {
2297  kunmap(sg_page(cmd->t_data_sg));
2298  return;
2299  }
2300 
2301  vunmap(cmd->t_data_vmap);
2302  cmd->t_data_vmap = NULL;
2303 }
2305 
2306 static int
2307 transport_generic_get_mem(struct se_cmd *cmd)
2308 {
2309  u32 length = cmd->data_length;
2310  unsigned int nents;
2311  struct page *page;
2312  gfp_t zero_flag;
2313  int i = 0;
2314 
2315  nents = DIV_ROUND_UP(length, PAGE_SIZE);
2316  cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
2317  if (!cmd->t_data_sg)
2318  return -ENOMEM;
2319 
2320  cmd->t_data_nents = nents;
2321  sg_init_table(cmd->t_data_sg, nents);
2322 
2323  zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_CDB ? 0 : __GFP_ZERO;
2324 
2325  while (length) {
2326  u32 page_len = min_t(u32, length, PAGE_SIZE);
2327  page = alloc_page(GFP_KERNEL | zero_flag);
2328  if (!page)
2329  goto out;
2330 
2331  sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
2332  length -= page_len;
2333  i++;
2334  }
2335  return 0;
2336 
2337 out:
2338  while (i > 0) {
2339  i--;
2340  __free_page(sg_page(&cmd->t_data_sg[i]));
2341  }
2342  kfree(cmd->t_data_sg);
2343  cmd->t_data_sg = NULL;
2344  return -ENOMEM;
2345 }
2346 
2347 /*
2348  * Allocate any required resources to execute the command. For writes we
2349  * might not have the payload yet, so notify the fabric via a call to
2350  * ->write_pending instead. Otherwise place it on the execution queue.
2351  */
2352 int transport_generic_new_cmd(struct se_cmd *cmd)
2353 {
2354  int ret = 0;
2355 
2356  /*
2357  * Determine is the TCM fabric module has already allocated physical
2358  * memory, and is directly calling transport_generic_map_mem_to_cmd()
2359  * beforehand.
2360  */
2362  cmd->data_length) {
2363  ret = transport_generic_get_mem(cmd);
2364  if (ret < 0)
2365  goto out_fail;
2366  }
2367 
2368  atomic_inc(&cmd->t_fe_count);
2369 
2370  /*
2371  * If this command is not a write we can execute it right here,
2372  * for write buffers we need to notify the fabric driver first
2373  * and let it call back once the write buffers are ready.
2374  */
2375  target_add_to_state_list(cmd);
2376  if (cmd->data_direction != DMA_TO_DEVICE) {
2377  target_execute_cmd(cmd);
2378  return 0;
2379  }
2380 
2381  spin_lock_irq(&cmd->t_state_lock);
2383  spin_unlock_irq(&cmd->t_state_lock);
2384 
2385  transport_cmd_check_stop(cmd, false);
2386 
2387  ret = cmd->se_tfo->write_pending(cmd);
2388  if (ret == -EAGAIN || ret == -ENOMEM)
2389  goto queue_full;
2390 
2391  if (ret < 0)
2392  return ret;
2393  return 1;
2394 
2395 out_fail:
2398  return -EINVAL;
2399 queue_full:
2400  pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2402  transport_handle_queue_full(cmd, cmd->se_dev);
2403  return 0;
2404 }
2406 
2407 static void transport_write_pending_qf(struct se_cmd *cmd)
2408 {
2409  int ret;
2410 
2411  ret = cmd->se_tfo->write_pending(cmd);
2412  if (ret == -EAGAIN || ret == -ENOMEM) {
2413  pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2414  cmd);
2415  transport_handle_queue_full(cmd, cmd->se_dev);
2416  }
2417 }
2418 
2419 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2420 {
2421  if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
2422  if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2424 
2425  transport_release_cmd(cmd);
2426  } else {
2427  if (wait_for_tasks)
2429 
2430  core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
2431 
2432  if (cmd->se_lun)
2433  transport_lun_remove_cmd(cmd);
2434 
2435  transport_put_cmd(cmd);
2436  }
2437 }
2439 
2440 /* target_get_sess_cmd - Add command to active ->sess_cmd_list
2441  * @se_sess: session to reference
2442  * @se_cmd: command descriptor to add
2443  * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
2444  */
2445 static int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
2446  bool ack_kref)
2447 {
2448  unsigned long flags;
2449  int ret = 0;
2450 
2451  kref_init(&se_cmd->cmd_kref);
2452  /*
2453  * Add a second kref if the fabric caller is expecting to handle
2454  * fabric acknowledgement that requires two target_put_sess_cmd()
2455  * invocations before se_cmd descriptor release.
2456  */
2457  if (ack_kref == true) {
2458  kref_get(&se_cmd->cmd_kref);
2459  se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2460  }
2461 
2462  spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2463  if (se_sess->sess_tearing_down) {
2464  ret = -ESHUTDOWN;
2465  goto out;
2466  }
2467  list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2468  se_cmd->check_release = 1;
2469 
2470 out:
2471  spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2472  return ret;
2473 }
2474 
2475 static void target_release_cmd_kref(struct kref *kref)
2476 {
2477  struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2478  struct se_session *se_sess = se_cmd->se_sess;
2479  unsigned long flags;
2480 
2481  spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2482  if (list_empty(&se_cmd->se_cmd_list)) {
2483  spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2484  se_cmd->se_tfo->release_cmd(se_cmd);
2485  return;
2486  }
2487  if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
2488  spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2489  complete(&se_cmd->cmd_wait_comp);
2490  return;
2491  }
2492  list_del(&se_cmd->se_cmd_list);
2493  spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2494 
2495  se_cmd->se_tfo->release_cmd(se_cmd);
2496 }
2497 
2498 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put
2499  * @se_sess: session to reference
2500  * @se_cmd: command descriptor to drop
2501  */
2502 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
2503 {
2504  return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2505 }
2507 
2508 /* target_sess_cmd_list_set_waiting - Flag all commands in
2509  * sess_cmd_list to complete cmd_wait_comp. Set
2510  * sess_tearing_down so no more commands are queued.
2511  * @se_sess: session to flag
2512  */
2514 {
2515  struct se_cmd *se_cmd;
2516  unsigned long flags;
2517 
2518  spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2519 
2520  WARN_ON(se_sess->sess_tearing_down);
2521  se_sess->sess_tearing_down = 1;
2522 
2523  list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list)
2524  se_cmd->cmd_wait_set = 1;
2525 
2526  spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2527 }
2529 
2530 /* target_wait_for_sess_cmds - Wait for outstanding descriptors
2531  * @se_sess: session to wait for active I/O
2532  * @wait_for_tasks: Make extra transport_wait_for_tasks call
2533  */
2535  struct se_session *se_sess,
2536  int wait_for_tasks)
2537 {
2538  struct se_cmd *se_cmd, *tmp_cmd;
2539  bool rc = false;
2540 
2541  list_for_each_entry_safe(se_cmd, tmp_cmd,
2542  &se_sess->sess_cmd_list, se_cmd_list) {
2543  list_del(&se_cmd->se_cmd_list);
2544 
2545  pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
2546  " %d\n", se_cmd, se_cmd->t_state,
2547  se_cmd->se_tfo->get_cmd_state(se_cmd));
2548 
2549  if (wait_for_tasks) {
2550  pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
2551  " fabric state: %d\n", se_cmd, se_cmd->t_state,
2552  se_cmd->se_tfo->get_cmd_state(se_cmd));
2553 
2554  rc = transport_wait_for_tasks(se_cmd);
2555 
2556  pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
2557  " fabric state: %d\n", se_cmd, se_cmd->t_state,
2558  se_cmd->se_tfo->get_cmd_state(se_cmd));
2559  }
2560 
2561  if (!rc) {
2563  pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
2564  " fabric state: %d\n", se_cmd, se_cmd->t_state,
2565  se_cmd->se_tfo->get_cmd_state(se_cmd));
2566  }
2567 
2568  se_cmd->se_tfo->release_cmd(se_cmd);
2569  }
2570 }
2572 
2573 /* transport_lun_wait_for_tasks():
2574  *
2575  * Called from ConfigFS context to stop the passed struct se_cmd to allow
2576  * an struct se_lun to be successfully shutdown.
2577  */
2578 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
2579 {
2580  unsigned long flags;
2581  int ret = 0;
2582 
2583  /*
2584  * If the frontend has already requested this struct se_cmd to
2585  * be stopped, we can safely ignore this struct se_cmd.
2586  */
2587  spin_lock_irqsave(&cmd->t_state_lock, flags);
2588  if (cmd->transport_state & CMD_T_STOP) {
2590 
2591  pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
2592  cmd->se_tfo->get_task_tag(cmd));
2593  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2594  transport_cmd_check_stop(cmd, false);
2595  return -EPERM;
2596  }
2598  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2599 
2600  // XXX: audit task_flags checks.
2601  spin_lock_irqsave(&cmd->t_state_lock, flags);
2602  if ((cmd->transport_state & CMD_T_BUSY) &&
2603  (cmd->transport_state & CMD_T_SENT)) {
2604  if (!target_stop_cmd(cmd, &flags))
2605  ret++;
2606  }
2607  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2608 
2609  pr_debug("ConfigFS: cmd: %p stop tasks ret:"
2610  " %d\n", cmd, ret);
2611  if (!ret) {
2612  pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
2613  cmd->se_tfo->get_task_tag(cmd));
2615  pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
2616  cmd->se_tfo->get_task_tag(cmd));
2617  }
2618 
2619  return 0;
2620 }
2621 
2622 static void __transport_clear_lun_from_sessions(struct se_lun *lun)
2623 {
2624  struct se_cmd *cmd = NULL;
2625  unsigned long lun_flags, cmd_flags;
2626  /*
2627  * Do exception processing and return CHECK_CONDITION status to the
2628  * Initiator Port.
2629  */
2630  spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
2631  while (!list_empty(&lun->lun_cmd_list)) {
2632  cmd = list_first_entry(&lun->lun_cmd_list,
2633  struct se_cmd, se_lun_node);
2634  list_del_init(&cmd->se_lun_node);
2635 
2636  spin_lock(&cmd->t_state_lock);
2637  pr_debug("SE_LUN[%d] - Setting cmd->transport"
2638  "_lun_stop for ITT: 0x%08x\n",
2639  cmd->se_lun->unpacked_lun,
2640  cmd->se_tfo->get_task_tag(cmd));
2642  spin_unlock(&cmd->t_state_lock);
2643 
2644  spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
2645 
2646  if (!cmd->se_lun) {
2647  pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
2648  cmd->se_tfo->get_task_tag(cmd),
2649  cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2650  BUG();
2651  }
2652  /*
2653  * If the Storage engine still owns the iscsi_cmd_t, determine
2654  * and/or stop its context.
2655  */
2656  pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
2657  "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
2658  cmd->se_tfo->get_task_tag(cmd));
2659 
2660  if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
2661  spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
2662  continue;
2663  }
2664 
2665  pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
2666  "_wait_for_tasks(): SUCCESS\n",
2667  cmd->se_lun->unpacked_lun,
2668  cmd->se_tfo->get_task_tag(cmd));
2669 
2670  spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
2671  if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
2672  spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
2673  goto check_cond;
2674  }
2676  target_remove_from_state_list(cmd);
2677  spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
2678 
2679  /*
2680  * The Storage engine stopped this struct se_cmd before it was
2681  * send to the fabric frontend for delivery back to the
2682  * Initiator Node. Return this SCSI CDB back with an
2683  * CHECK_CONDITION status.
2684  */
2685 check_cond:
2688  /*
2689  * If the fabric frontend is waiting for this iscsi_cmd_t to
2690  * be released, notify the waiting thread now that LU has
2691  * finished accessing it.
2692  */
2693  spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
2694  if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
2695  pr_debug("SE_LUN[%d] - Detected FE stop for"
2696  " struct se_cmd: %p ITT: 0x%08x\n",
2697  lun->unpacked_lun,
2698  cmd, cmd->se_tfo->get_task_tag(cmd));
2699 
2700  spin_unlock_irqrestore(&cmd->t_state_lock,
2701  cmd_flags);
2702  transport_cmd_check_stop(cmd, false);
2704  spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
2705  continue;
2706  }
2707  pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
2708  lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
2709 
2710  spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
2711  spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
2712  }
2713  spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
2714 }
2715 
2716 static int transport_clear_lun_thread(void *p)
2717 {
2718  struct se_lun *lun = p;
2719 
2720  __transport_clear_lun_from_sessions(lun);
2721  complete(&lun->lun_shutdown_comp);
2722 
2723  return 0;
2724 }
2725 
2727 {
2728  struct task_struct *kt;
2729 
2730  kt = kthread_run(transport_clear_lun_thread, lun,
2731  "tcm_cl_%u", lun->unpacked_lun);
2732  if (IS_ERR(kt)) {
2733  pr_err("Unable to start clear_lun thread\n");
2734  return PTR_ERR(kt);
2735  }
2737 
2738  return 0;
2739 }
2740 
2748 bool transport_wait_for_tasks(struct se_cmd *cmd)
2749 {
2750  unsigned long flags;
2751 
2752  spin_lock_irqsave(&cmd->t_state_lock, flags);
2753  if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
2754  !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2755  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2756  return false;
2757  }
2758 
2759  if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
2760  !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2761  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2762  return false;
2763  }
2764  /*
2765  * If we are already stopped due to an external event (ie: LUN shutdown)
2766  * sleep until the connection can have the passed struct se_cmd back.
2767  * The cmd->transport_lun_stopped_sem will be upped by
2768  * transport_clear_lun_from_sessions() once the ConfigFS context caller
2769  * has completed its operation on the struct se_cmd.
2770  */
2771  if (cmd->transport_state & CMD_T_LUN_STOP) {
2772  pr_debug("wait_for_tasks: Stopping"
2773  " wait_for_completion(&cmd->t_tasktransport_lun_fe"
2774  "_stop_comp); for ITT: 0x%08x\n",
2775  cmd->se_tfo->get_task_tag(cmd));
2776  /*
2777  * There is a special case for WRITES where a FE exception +
2778  * LUN shutdown means ConfigFS context is still sleeping on
2779  * transport_lun_stop_comp in transport_lun_wait_for_tasks().
2780  * We go ahead and up transport_lun_stop_comp just to be sure
2781  * here.
2782  */
2783  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2786  spin_lock_irqsave(&cmd->t_state_lock, flags);
2787 
2788  target_remove_from_state_list(cmd);
2789  /*
2790  * At this point, the frontend who was the originator of this
2791  * struct se_cmd, now owns the structure and can be released through
2792  * normal means below.
2793  */
2794  pr_debug("wait_for_tasks: Stopped"
2795  " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
2796  "stop_comp); for ITT: 0x%08x\n",
2797  cmd->se_tfo->get_task_tag(cmd));
2798 
2800  }
2801 
2802  if (!(cmd->transport_state & CMD_T_ACTIVE)) {
2803  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2804  return false;
2805  }
2806 
2807  cmd->transport_state |= CMD_T_STOP;
2808 
2809  pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
2810  " i_state: %d, t_state: %d, CMD_T_STOP\n",
2811  cmd, cmd->se_tfo->get_task_tag(cmd),
2812  cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2813 
2814  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2815 
2817 
2818  spin_lock_irqsave(&cmd->t_state_lock, flags);
2820 
2821  pr_debug("wait_for_tasks: Stopped wait_for_completion("
2822  "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
2823  cmd->se_tfo->get_task_tag(cmd));
2824 
2825  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2826 
2827  return true;
2828 }
2830 
2831 static int transport_get_sense_codes(
2832  struct se_cmd *cmd,
2833  u8 *asc,
2834  u8 *ascq)
2835 {
2836  *asc = cmd->scsi_asc;
2837  *ascq = cmd->scsi_ascq;
2838 
2839  return 0;
2840 }
2841 
2842 static int transport_set_sense_codes(
2843  struct se_cmd *cmd,
2844  u8 asc,
2845  u8 ascq)
2846 {
2847  cmd->scsi_asc = asc;
2848  cmd->scsi_ascq = ascq;
2849 
2850  return 0;
2851 }
2852 
2854  struct se_cmd *cmd,
2855  u8 reason,
2856  int from_transport)
2857 {
2858  unsigned char *buffer = cmd->sense_buffer;
2859  unsigned long flags;
2860  u8 asc = 0, ascq = 0;
2861 
2862  spin_lock_irqsave(&cmd->t_state_lock, flags);
2864  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2865  return 0;
2866  }
2868  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2869 
2870  if (!reason && from_transport)
2871  goto after_reason;
2872 
2873  if (!from_transport)
2875 
2876  /*
2877  * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
2878  * SENSE KEY values from include/scsi/scsi.h
2879  */
2880  switch (reason) {
2881  case TCM_NON_EXISTENT_LUN:
2882  /* CURRENT ERROR */
2883  buffer[0] = 0x70;
2884  buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2885  /* ILLEGAL REQUEST */
2887  /* LOGICAL UNIT NOT SUPPORTED */
2888  buffer[SPC_ASC_KEY_OFFSET] = 0x25;
2889  break;
2892  /* CURRENT ERROR */
2893  buffer[0] = 0x70;
2894  buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2895  /* ILLEGAL REQUEST */
2897  /* INVALID COMMAND OPERATION CODE */
2898  buffer[SPC_ASC_KEY_OFFSET] = 0x20;
2899  break;
2900  case TCM_UNKNOWN_MODE_PAGE:
2901  /* CURRENT ERROR */
2902  buffer[0] = 0x70;
2903  buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2904  /* ILLEGAL REQUEST */
2906  /* INVALID FIELD IN CDB */
2907  buffer[SPC_ASC_KEY_OFFSET] = 0x24;
2908  break;
2910  /* CURRENT ERROR */
2911  buffer[0] = 0x70;
2912  buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2913  /* ABORTED COMMAND */
2915  /* BUS DEVICE RESET FUNCTION OCCURRED */
2916  buffer[SPC_ASC_KEY_OFFSET] = 0x29;
2917  buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
2918  break;
2920  /* CURRENT ERROR */
2921  buffer[0] = 0x70;
2922  buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2923  /* ABORTED COMMAND */
2925  /* WRITE ERROR */
2926  buffer[SPC_ASC_KEY_OFFSET] = 0x0c;
2927  /* NOT ENOUGH UNSOLICITED DATA */
2928  buffer[SPC_ASCQ_KEY_OFFSET] = 0x0d;
2929  break;
2930  case TCM_INVALID_CDB_FIELD:
2931  /* CURRENT ERROR */
2932  buffer[0] = 0x70;
2933  buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2934  /* ILLEGAL REQUEST */
2936  /* INVALID FIELD IN CDB */
2937  buffer[SPC_ASC_KEY_OFFSET] = 0x24;
2938  break;
2940  /* CURRENT ERROR */
2941  buffer[0] = 0x70;
2942  buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2943  /* ILLEGAL REQUEST */
2945  /* INVALID FIELD IN PARAMETER LIST */
2946  buffer[SPC_ASC_KEY_OFFSET] = 0x26;
2947  break;
2949  /* CURRENT ERROR */
2950  buffer[0] = 0x70;
2951  buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2952  /* ABORTED COMMAND */
2954  /* WRITE ERROR */
2955  buffer[SPC_ASC_KEY_OFFSET] = 0x0c;
2956  /* UNEXPECTED_UNSOLICITED_DATA */
2957  buffer[SPC_ASCQ_KEY_OFFSET] = 0x0c;
2958  break;
2959  case TCM_SERVICE_CRC_ERROR:
2960  /* CURRENT ERROR */
2961  buffer[0] = 0x70;
2962  buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2963  /* ABORTED COMMAND */
2965  /* PROTOCOL SERVICE CRC ERROR */
2966  buffer[SPC_ASC_KEY_OFFSET] = 0x47;
2967  /* N/A */
2968  buffer[SPC_ASCQ_KEY_OFFSET] = 0x05;
2969  break;
2970  case TCM_SNACK_REJECTED:
2971  /* CURRENT ERROR */
2972  buffer[0] = 0x70;
2973  buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2974  /* ABORTED COMMAND */
2976  /* READ ERROR */
2977  buffer[SPC_ASC_KEY_OFFSET] = 0x11;
2978  /* FAILED RETRANSMISSION REQUEST */
2979  buffer[SPC_ASCQ_KEY_OFFSET] = 0x13;
2980  break;
2981  case TCM_WRITE_PROTECTED:
2982  /* CURRENT ERROR */
2983  buffer[0] = 0x70;
2984  buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2985  /* DATA PROTECT */
2987  /* WRITE PROTECTED */
2988  buffer[SPC_ASC_KEY_OFFSET] = 0x27;
2989  break;
2991  /* CURRENT ERROR */
2992  buffer[0] = 0x70;
2993  buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2994  /* ILLEGAL REQUEST */
2996  /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
2997  buffer[SPC_ASC_KEY_OFFSET] = 0x21;
2998  break;
3000  /* CURRENT ERROR */
3001  buffer[0] = 0x70;
3002  buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
3003  /* UNIT ATTENTION */
3006  buffer[SPC_ASC_KEY_OFFSET] = asc;
3007  buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
3008  break;
3010  /* CURRENT ERROR */
3011  buffer[0] = 0x70;
3012  buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
3013  /* Not Ready */
3014  buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
3015  transport_get_sense_codes(cmd, &asc, &ascq);
3016  buffer[SPC_ASC_KEY_OFFSET] = asc;
3017  buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
3018  break;
3020  default:
3021  /* CURRENT ERROR */
3022  buffer[0] = 0x70;
3023  buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
3024  /* ILLEGAL REQUEST */
3026  /* LOGICAL UNIT COMMUNICATION FAILURE */
3027  buffer[SPC_ASC_KEY_OFFSET] = 0x80;
3028  break;
3029  }
3030  /*
3031  * This code uses linux/include/scsi/scsi.h SAM status codes!
3032  */
3034  /*
3035  * Automatically padded, this value is encoded in the fabric's
3036  * data_length response PDU containing the SCSI defined sense data.
3037  */
3039 
3040 after_reason:
3041  return cmd->se_tfo->queue_status(cmd);
3042 }
3044 
3045 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3046 {
3047  int ret = 0;
3048 
3049  if (cmd->transport_state & CMD_T_ABORTED) {
3050  if (!send_status ||
3052  return 1;
3053 
3054  pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
3055  " status for CDB: 0x%02x ITT: 0x%08x\n",
3056  cmd->t_task_cdb[0],
3057  cmd->se_tfo->get_task_tag(cmd));
3058 
3060  cmd->se_tfo->queue_status(cmd);
3061  ret = 1;
3062  }
3063  return ret;
3064 }
3066 
3067 void transport_send_task_abort(struct se_cmd *cmd)
3068 {
3069  unsigned long flags;
3070 
3071  spin_lock_irqsave(&cmd->t_state_lock, flags);
3073  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3074  return;
3075  }
3076  spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3077 
3078  /*
3079  * If there are still expected incoming fabric WRITEs, we wait
3080  * until until they have completed before sending a TASK_ABORTED
3081  * response. This response with TASK_ABORTED status will be
3082  * queued back to fabric module by transport_check_aborted_status().
3083  */
3084  if (cmd->data_direction == DMA_TO_DEVICE) {
3085  if (cmd->se_tfo->write_pending_status(cmd) != 0) {
3088  }
3089  }
3091 
3092  pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
3093  " ITT: 0x%08x\n", cmd->t_task_cdb[0],
3094  cmd->se_tfo->get_task_tag(cmd));
3095 
3096  cmd->se_tfo->queue_status(cmd);
3097 }
3098 
3099 static void target_tmr_work(struct work_struct *work)
3100 {
3101  struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3102  struct se_device *dev = cmd->se_dev;
3103  struct se_tmr_req *tmr = cmd->se_tmr_req;
3104  int ret;
3105 
3106  switch (tmr->function) {
3107  case TMR_ABORT_TASK:
3108  core_tmr_abort_task(dev, tmr, cmd->se_sess);
3109  break;
3110  case TMR_ABORT_TASK_SET:
3111  case TMR_CLEAR_ACA:
3112  case TMR_CLEAR_TASK_SET:
3114  break;
3115  case TMR_LUN_RESET:
3116  ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3117  tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3119  break;
3120  case TMR_TARGET_WARM_RESET:
3122  break;
3123  case TMR_TARGET_COLD_RESET:
3125  break;
3126  default:
3127  pr_err("Uknown TMR function: 0x%02x.\n",
3128  tmr->function);
3130  break;
3131  }
3132 
3134  cmd->se_tfo->queue_tm_rsp(cmd);
3135 
3136  transport_cmd_check_stop_to_fabric(cmd);
3137 }
3138 
3140  struct se_cmd *cmd)
3141 {
3142  INIT_WORK(&cmd->work, target_tmr_work);
3143  queue_work(cmd->se_dev->tmr_wq, &cmd->work);
3144  return 0;
3145 }