Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
iscsi_target.c
Go to the documentation of this file.
1 /*******************************************************************************
2  * This file contains main functions related to the iSCSI Target Core Driver.
3  *
4  * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5  *
6  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7  *
8  * Author: Nicholas A. Bellinger <[email protected]>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18  * GNU General Public License for more details.
19  ******************************************************************************/
20 
21 #include <linux/string.h>
22 #include <linux/kthread.h>
23 #include <linux/crypto.h>
24 #include <linux/completion.h>
25 #include <linux/module.h>
26 #include <linux/idr.h>
27 #include <asm/unaligned.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/iscsi_proto.h>
30 #include <scsi/scsi_tcq.h>
34 
35 #include "iscsi_target_core.h"
38 #include "iscsi_target_tq.h"
39 #include "iscsi_target_configfs.h"
41 #include "iscsi_target_erl0.h"
42 #include "iscsi_target_erl1.h"
43 #include "iscsi_target_erl2.h"
44 #include "iscsi_target_login.h"
45 #include "iscsi_target_tmr.h"
46 #include "iscsi_target_tpg.h"
47 #include "iscsi_target_util.h"
48 #include "iscsi_target.h"
49 #include "iscsi_target_device.h"
50 #include "iscsi_target_stat.h"
51 
52 static LIST_HEAD(g_tiqn_list);
53 static LIST_HEAD(g_np_list);
54 static DEFINE_SPINLOCK(tiqn_lock);
55 static DEFINE_SPINLOCK(np_lock);
56 
57 static struct idr tiqn_idr;
58 struct idr sess_idr;
61 
63 
69 
70 static int iscsit_handle_immediate_data(struct iscsi_cmd *,
71  unsigned char *buf, u32);
72 static int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
73 
74 struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
75 {
76  struct iscsi_tiqn *tiqn = NULL;
77 
78  spin_lock(&tiqn_lock);
80  if (!strcmp(tiqn->tiqn, buf)) {
81 
82  spin_lock(&tiqn->tiqn_state_lock);
83  if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
84  tiqn->tiqn_access_count++;
85  spin_unlock(&tiqn->tiqn_state_lock);
86  spin_unlock(&tiqn_lock);
87  return tiqn;
88  }
89  spin_unlock(&tiqn->tiqn_state_lock);
90  }
91  }
92  spin_unlock(&tiqn_lock);
93 
94  return NULL;
95 }
96 
97 static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
98 {
99  spin_lock(&tiqn->tiqn_state_lock);
100  if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
102  spin_unlock(&tiqn->tiqn_state_lock);
103  return 0;
104  }
105  spin_unlock(&tiqn->tiqn_state_lock);
106 
107  return -1;
108 }
109 
111 {
112  spin_lock(&tiqn->tiqn_state_lock);
113  tiqn->tiqn_access_count--;
114  spin_unlock(&tiqn->tiqn_state_lock);
115 }
116 
117 /*
118  * Note that IQN formatting is expected to be done in userspace, and
119  * no explict IQN format checks are done here.
120  */
121 struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
122 {
123  struct iscsi_tiqn *tiqn = NULL;
124  int ret;
125 
126  if (strlen(buf) >= ISCSI_IQN_LEN) {
127  pr_err("Target IQN exceeds %d bytes\n",
128  ISCSI_IQN_LEN);
129  return ERR_PTR(-EINVAL);
130  }
131 
132  tiqn = kzalloc(sizeof(struct iscsi_tiqn), GFP_KERNEL);
133  if (!tiqn) {
134  pr_err("Unable to allocate struct iscsi_tiqn\n");
135  return ERR_PTR(-ENOMEM);
136  }
137 
138  sprintf(tiqn->tiqn, "%s", buf);
139  INIT_LIST_HEAD(&tiqn->tiqn_list);
140  INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
143  spin_lock_init(&tiqn->sess_err_stats.lock);
144  spin_lock_init(&tiqn->login_stats.lock);
145  spin_lock_init(&tiqn->logout_stats.lock);
146 
147  if (!idr_pre_get(&tiqn_idr, GFP_KERNEL)) {
148  pr_err("idr_pre_get() for tiqn_idr failed\n");
149  kfree(tiqn);
150  return ERR_PTR(-ENOMEM);
151  }
153 
154  spin_lock(&tiqn_lock);
155  ret = idr_get_new(&tiqn_idr, NULL, &tiqn->tiqn_index);
156  if (ret < 0) {
157  pr_err("idr_get_new() failed for tiqn->tiqn_index\n");
158  spin_unlock(&tiqn_lock);
159  kfree(tiqn);
160  return ERR_PTR(ret);
161  }
163  spin_unlock(&tiqn_lock);
164 
165  pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
166 
167  return tiqn;
168 
169 }
170 
171 static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
172 {
173  /*
174  * Wait for accesses to said struct iscsi_tiqn to end.
175  */
176  spin_lock(&tiqn->tiqn_state_lock);
177  while (tiqn->tiqn_access_count != 0) {
178  spin_unlock(&tiqn->tiqn_state_lock);
179  msleep(10);
180  spin_lock(&tiqn->tiqn_state_lock);
181  }
182  spin_unlock(&tiqn->tiqn_state_lock);
183 }
184 
185 void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
186 {
187  /*
188  * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
189  * while holding tiqn->tiqn_state_lock. This means that all subsequent
190  * attempts to access this struct iscsi_tiqn will fail from both transport
191  * fabric and control code paths.
192  */
193  if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
194  pr_err("iscsit_set_tiqn_shutdown() failed\n");
195  return;
196  }
197 
198  iscsit_wait_for_tiqn(tiqn);
199 
200  spin_lock(&tiqn_lock);
201  list_del(&tiqn->tiqn_list);
202  idr_remove(&tiqn_idr, tiqn->tiqn_index);
203  spin_unlock(&tiqn_lock);
204 
205  pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
206  tiqn->tiqn);
207  kfree(tiqn);
208 }
209 
210 int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
211 {
212  int ret;
213  /*
214  * Determine if the network portal is accepting storage traffic.
215  */
216  spin_lock_bh(&np->np_thread_lock);
218  spin_unlock_bh(&np->np_thread_lock);
219  return -1;
220  }
221  if (np->np_login_tpg) {
222  pr_err("np->np_login_tpg() is not NULL!\n");
223  spin_unlock_bh(&np->np_thread_lock);
224  return -1;
225  }
226  spin_unlock_bh(&np->np_thread_lock);
227  /*
228  * Determine if the portal group is accepting storage traffic.
229  */
230  spin_lock_bh(&tpg->tpg_state_lock);
231  if (tpg->tpg_state != TPG_STATE_ACTIVE) {
232  spin_unlock_bh(&tpg->tpg_state_lock);
233  return -1;
234  }
235  spin_unlock_bh(&tpg->tpg_state_lock);
236 
237  /*
238  * Here we serialize access across the TIQN+TPG Tuple.
239  */
241  if ((ret != 0) || signal_pending(current))
242  return -1;
243 
244  spin_lock_bh(&np->np_thread_lock);
245  np->np_login_tpg = tpg;
246  spin_unlock_bh(&np->np_thread_lock);
247 
248  return 0;
249 }
250 
251 int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
252 {
253  struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
254 
255  spin_lock_bh(&np->np_thread_lock);
256  np->np_login_tpg = NULL;
257  spin_unlock_bh(&np->np_thread_lock);
258 
260 
261  if (tiqn)
263 
264  return 0;
265 }
266 
267 static struct iscsi_np *iscsit_get_np(
269  int network_transport)
270 {
271  struct sockaddr_in *sock_in, *sock_in_e;
272  struct sockaddr_in6 *sock_in6, *sock_in6_e;
273  struct iscsi_np *np;
274  int ip_match = 0;
275  u16 port;
276 
277  spin_lock_bh(&np_lock);
278  list_for_each_entry(np, &g_np_list, np_list) {
279  spin_lock(&np->np_thread_lock);
281  spin_unlock(&np->np_thread_lock);
282  continue;
283  }
284 
285  if (sockaddr->ss_family == AF_INET6) {
286  sock_in6 = (struct sockaddr_in6 *)sockaddr;
287  sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
288 
289  if (!memcmp(&sock_in6->sin6_addr.in6_u,
290  &sock_in6_e->sin6_addr.in6_u,
291  sizeof(struct in6_addr)))
292  ip_match = 1;
293 
294  port = ntohs(sock_in6->sin6_port);
295  } else {
296  sock_in = (struct sockaddr_in *)sockaddr;
297  sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
298 
299  if (sock_in->sin_addr.s_addr ==
300  sock_in_e->sin_addr.s_addr)
301  ip_match = 1;
302 
303  port = ntohs(sock_in->sin_port);
304  }
305 
306  if ((ip_match == 1) && (np->np_port == port) &&
307  (np->np_network_transport == network_transport)) {
308  /*
309  * Increment the np_exports reference count now to
310  * prevent iscsit_del_np() below from being called
311  * while iscsi_tpg_add_network_portal() is called.
312  */
313  np->np_exports++;
314  spin_unlock(&np->np_thread_lock);
315  spin_unlock_bh(&np_lock);
316  return np;
317  }
318  spin_unlock(&np->np_thread_lock);
319  }
320  spin_unlock_bh(&np_lock);
321 
322  return NULL;
323 }
324 
326  struct __kernel_sockaddr_storage *sockaddr,
327  char *ip_str,
328  int network_transport)
329 {
330  struct sockaddr_in *sock_in;
331  struct sockaddr_in6 *sock_in6;
332  struct iscsi_np *np;
333  int ret;
334  /*
335  * Locate the existing struct iscsi_np if already active..
336  */
337  np = iscsit_get_np(sockaddr, network_transport);
338  if (np)
339  return np;
340 
341  np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
342  if (!np) {
343  pr_err("Unable to allocate memory for struct iscsi_np\n");
344  return ERR_PTR(-ENOMEM);
345  }
346 
347  np->np_flags |= NPF_IP_NETWORK;
348  if (sockaddr->ss_family == AF_INET6) {
349  sock_in6 = (struct sockaddr_in6 *)sockaddr;
350  snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str);
351  np->np_port = ntohs(sock_in6->sin6_port);
352  } else {
353  sock_in = (struct sockaddr_in *)sockaddr;
354  sprintf(np->np_ip, "%s", ip_str);
355  np->np_port = ntohs(sock_in->sin_port);
356  }
357 
358  np->np_network_transport = network_transport;
360  init_completion(&np->np_restart_comp);
361  INIT_LIST_HEAD(&np->np_list);
362 
363  ret = iscsi_target_setup_login_socket(np, sockaddr);
364  if (ret != 0) {
365  kfree(np);
366  return ERR_PTR(ret);
367  }
368 
369  np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
370  if (IS_ERR(np->np_thread)) {
371  pr_err("Unable to create kthread: iscsi_np\n");
372  ret = PTR_ERR(np->np_thread);
373  kfree(np);
374  return ERR_PTR(ret);
375  }
376  /*
377  * Increment the np_exports reference count now to prevent
378  * iscsit_del_np() below from being run while a new call to
379  * iscsi_tpg_add_network_portal() for a matching iscsi_np is
380  * active. We don't need to hold np->np_thread_lock at this
381  * point because iscsi_np has not been added to g_np_list yet.
382  */
383  np->np_exports = 1;
384 
385  spin_lock_bh(&np_lock);
386  list_add_tail(&np->np_list, &g_np_list);
387  spin_unlock_bh(&np_lock);
388 
389  pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
390  np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
391  "TCP" : "SCTP");
392 
393  return np;
394 }
395 
397  struct iscsi_np *np,
398  struct iscsi_tpg_np *tpg_np,
399  struct iscsi_portal_group *tpg)
400 {
401  spin_lock_bh(&np->np_thread_lock);
402  if (tpg && tpg_np) {
403  /*
404  * The reset operation need only be performed when the
405  * passed struct iscsi_portal_group has a login in progress
406  * to one of the network portals.
407  */
408  if (tpg_np->tpg_np->np_login_tpg != tpg) {
409  spin_unlock_bh(&np->np_thread_lock);
410  return 0;
411  }
412  }
414  spin_unlock_bh(&np->np_thread_lock);
415  return 0;
416  }
418 
419  if (np->np_thread) {
420  spin_unlock_bh(&np->np_thread_lock);
421  send_sig(SIGINT, np->np_thread, 1);
423  spin_lock_bh(&np->np_thread_lock);
424  }
425  spin_unlock_bh(&np->np_thread_lock);
426 
427  return 0;
428 }
429 
430 static int iscsit_del_np_comm(struct iscsi_np *np)
431 {
432  if (np->np_socket)
433  sock_release(np->np_socket);
434  return 0;
435 }
436 
437 int iscsit_del_np(struct iscsi_np *np)
438 {
439  spin_lock_bh(&np->np_thread_lock);
440  np->np_exports--;
441  if (np->np_exports) {
442  spin_unlock_bh(&np->np_thread_lock);
443  return 0;
444  }
446  spin_unlock_bh(&np->np_thread_lock);
447 
448  if (np->np_thread) {
449  /*
450  * We need to send the signal to wakeup Linux/Net
451  * which may be sleeping in sock_accept()..
452  */
453  send_sig(SIGINT, np->np_thread, 1);
454  kthread_stop(np->np_thread);
455  }
456  iscsit_del_np_comm(np);
457 
458  spin_lock_bh(&np_lock);
459  list_del(&np->np_list);
460  spin_unlock_bh(&np_lock);
461 
462  pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
463  np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
464  "TCP" : "SCTP");
465 
466  kfree(np);
467  return 0;
468 }
469 
470 static int __init iscsi_target_init_module(void)
471 {
472  int ret = 0;
473 
474  pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
475 
476  iscsit_global = kzalloc(sizeof(struct iscsit_global), GFP_KERNEL);
477  if (!iscsit_global) {
478  pr_err("Unable to allocate memory for iscsit_global\n");
479  return -1;
480  }
482  spin_lock_init(&sess_idr_lock);
483  idr_init(&tiqn_idr);
484  idr_init(&sess_idr);
485 
487  if (ret < 0)
488  goto out;
489 
490  ret = iscsi_thread_set_init();
491  if (ret < 0)
492  goto configfs_out;
493 
496  pr_err("iscsi_allocate_thread_sets() returned"
497  " unexpected value!\n");
498  goto ts_out1;
499  }
500 
501  lio_cmd_cache = kmem_cache_create("lio_cmd_cache",
502  sizeof(struct iscsi_cmd), __alignof__(struct iscsi_cmd),
503  0, NULL);
504  if (!lio_cmd_cache) {
505  pr_err("Unable to kmem_cache_create() for"
506  " lio_cmd_cache\n");
507  goto ts_out2;
508  }
509 
510  lio_qr_cache = kmem_cache_create("lio_qr_cache",
511  sizeof(struct iscsi_queue_req),
512  __alignof__(struct iscsi_queue_req), 0, NULL);
513  if (!lio_qr_cache) {
514  pr_err("nable to kmem_cache_create() for"
515  " lio_qr_cache\n");
516  goto cmd_out;
517  }
518 
519  lio_dr_cache = kmem_cache_create("lio_dr_cache",
520  sizeof(struct iscsi_datain_req),
521  __alignof__(struct iscsi_datain_req), 0, NULL);
522  if (!lio_dr_cache) {
523  pr_err("Unable to kmem_cache_create() for"
524  " lio_dr_cache\n");
525  goto qr_out;
526  }
527 
528  lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
529  sizeof(struct iscsi_ooo_cmdsn),
530  __alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
531  if (!lio_ooo_cache) {
532  pr_err("Unable to kmem_cache_create() for"
533  " lio_ooo_cache\n");
534  goto dr_out;
535  }
536 
537  lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
538  sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
539  0, NULL);
540  if (!lio_r2t_cache) {
541  pr_err("Unable to kmem_cache_create() for"
542  " lio_r2t_cache\n");
543  goto ooo_out;
544  }
545 
546  if (iscsit_load_discovery_tpg() < 0)
547  goto r2t_out;
548 
549  return ret;
550 r2t_out:
551  kmem_cache_destroy(lio_r2t_cache);
552 ooo_out:
553  kmem_cache_destroy(lio_ooo_cache);
554 dr_out:
555  kmem_cache_destroy(lio_dr_cache);
556 qr_out:
557  kmem_cache_destroy(lio_qr_cache);
558 cmd_out:
559  kmem_cache_destroy(lio_cmd_cache);
560 ts_out2:
562 ts_out1:
564 configfs_out:
566 out:
567  kfree(iscsit_global);
568  return -ENOMEM;
569 }
570 
571 static void __exit iscsi_target_cleanup_module(void)
572 {
576  kmem_cache_destroy(lio_cmd_cache);
577  kmem_cache_destroy(lio_qr_cache);
578  kmem_cache_destroy(lio_dr_cache);
579  kmem_cache_destroy(lio_ooo_cache);
580  kmem_cache_destroy(lio_r2t_cache);
581 
583 
584  kfree(iscsit_global);
585 }
586 
587 static int iscsit_add_reject(
588  u8 reason,
589  int fail_conn,
590  unsigned char *buf,
591  struct iscsi_conn *conn)
592 {
593  struct iscsi_cmd *cmd;
594  struct iscsi_reject *hdr;
595  int ret;
596 
597  cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
598  if (!cmd)
599  return -1;
600 
602  if (fail_conn)
604 
605  hdr = (struct iscsi_reject *) cmd->pdu;
606  hdr->reason = reason;
607 
608  cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
609  if (!cmd->buf_ptr) {
610  pr_err("Unable to allocate memory for cmd->buf_ptr\n");
611  iscsit_release_cmd(cmd);
612  return -1;
613  }
614 
615  spin_lock_bh(&conn->cmd_lock);
616  list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
617  spin_unlock_bh(&conn->cmd_lock);
618 
620  iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
621 
623  if (ret != 0)
624  return -1;
625 
626  return (!fail_conn) ? 0 : -1;
627 }
628 
630  u8 reason,
631  int fail_conn,
632  int add_to_conn,
633  unsigned char *buf,
634  struct iscsi_cmd *cmd)
635 {
636  struct iscsi_conn *conn;
637  struct iscsi_reject *hdr;
638  int ret;
639 
640  if (!cmd->conn) {
641  pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
642  cmd->init_task_tag);
643  return -1;
644  }
645  conn = cmd->conn;
646 
648  if (fail_conn)
650 
651  hdr = (struct iscsi_reject *) cmd->pdu;
652  hdr->reason = reason;
653 
654  cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
655  if (!cmd->buf_ptr) {
656  pr_err("Unable to allocate memory for cmd->buf_ptr\n");
657  iscsit_release_cmd(cmd);
658  return -1;
659  }
660 
661  if (add_to_conn) {
662  spin_lock_bh(&conn->cmd_lock);
663  list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
664  spin_unlock_bh(&conn->cmd_lock);
665  }
666 
668  iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
669 
671  if (ret != 0)
672  return -1;
673 
674  return (!fail_conn) ? 0 : -1;
675 }
676 
677 /*
678  * Map some portion of the allocated scatterlist to an iovec, suitable for
679  * kernel sockets to copy data in/out.
680  */
681 static int iscsit_map_iovec(
682  struct iscsi_cmd *cmd,
683  struct kvec *iov,
686 {
687  u32 i = 0;
688  struct scatterlist *sg;
689  unsigned int page_off;
690 
691  /*
692  * We know each entry in t_data_sg contains a page.
693  */
694  sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
695  page_off = (data_offset % PAGE_SIZE);
696 
697  cmd->first_data_sg = sg;
698  cmd->first_data_sg_off = page_off;
699 
700  while (data_length) {
701  u32 cur_len = min_t(u32, data_length, sg->length - page_off);
702 
703  iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
704  iov[i].iov_len = cur_len;
705 
706  data_length -= cur_len;
707  page_off = 0;
708  sg = sg_next(sg);
709  i++;
710  }
711 
712  cmd->kmapped_nents = i;
713 
714  return i;
715 }
716 
717 static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
718 {
719  u32 i;
720  struct scatterlist *sg;
721 
722  sg = cmd->first_data_sg;
723 
724  for (i = 0; i < cmd->kmapped_nents; i++)
725  kunmap(sg_page(&sg[i]));
726 }
727 
728 static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
729 {
730  struct iscsi_cmd *cmd;
731 
732  conn->exp_statsn = exp_statsn;
733 
734  spin_lock_bh(&conn->cmd_lock);
736  spin_lock(&cmd->istate_lock);
737  if ((cmd->i_state == ISTATE_SENT_STATUS) &&
738  (cmd->stat_sn < exp_statsn)) {
739  cmd->i_state = ISTATE_REMOVE;
740  spin_unlock(&cmd->istate_lock);
742  cmd->i_state);
743  continue;
744  }
745  spin_unlock(&cmd->istate_lock);
746  }
747  spin_unlock_bh(&conn->cmd_lock);
748 }
749 
750 static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
751 {
752  u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
753 
754  iov_count += ISCSI_IOV_DATA_BUFFER;
755 
756  cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL);
757  if (!cmd->iov_data) {
758  pr_err("Unable to allocate cmd->iov_data\n");
759  return -ENOMEM;
760  }
761 
762  cmd->orig_iov_data_count = iov_count;
763  return 0;
764 }
765 
766 static int iscsit_handle_scsi_cmd(
767  struct iscsi_conn *conn,
768  unsigned char *buf)
769 {
770  int data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret;
771  int dump_immediate_data = 0, send_check_condition = 0, payload_length;
772  struct iscsi_cmd *cmd = NULL;
773  struct iscsi_scsi_req *hdr;
774  int iscsi_task_attr;
775  int sam_task_attr;
776 
777  spin_lock_bh(&conn->sess->session_stats_lock);
778  conn->sess->cmd_pdus++;
779  if (conn->sess->se_sess->se_node_acl) {
780  spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
781  conn->sess->se_sess->se_node_acl->num_cmds++;
782  spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
783  }
784  spin_unlock_bh(&conn->sess->session_stats_lock);
785 
786  hdr = (struct iscsi_scsi_req *) buf;
787  payload_length = ntoh24(hdr->dlength);
788 
789  /* FIXME; Add checks for AdditionalHeaderSegment */
790 
791  if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
792  !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
793  pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
794  " not set. Bad iSCSI Initiator.\n");
795  return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
796  buf, conn);
797  }
798 
799  if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
800  (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
801  /*
802  * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
803  * that adds support for RESERVE/RELEASE. There is a bug
804  * add with this new functionality that sets R/W bits when
805  * neither CDB carries any READ or WRITE datapayloads.
806  */
807  if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
808  hdr->flags &= ~ISCSI_FLAG_CMD_READ;
809  hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
810  goto done;
811  }
812 
813  pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
814  " set when Expected Data Transfer Length is 0 for"
815  " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
816  return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
817  buf, conn);
818  }
819 done:
820 
821  if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
822  !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
823  pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
824  " MUST be set if Expected Data Transfer Length is not 0."
825  " Bad iSCSI Initiator\n");
826  return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
827  buf, conn);
828  }
829 
830  if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
831  (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
832  pr_err("Bidirectional operations not supported!\n");
833  return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
834  buf, conn);
835  }
836 
837  if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
838  pr_err("Illegally set Immediate Bit in iSCSI Initiator"
839  " Scsi Command PDU.\n");
840  return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
841  buf, conn);
842  }
843 
844  if (payload_length && !conn->sess->sess_ops->ImmediateData) {
845  pr_err("ImmediateData=No but DataSegmentLength=%u,"
846  " protocol error.\n", payload_length);
847  return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
848  buf, conn);
849  }
850 
851  if ((be32_to_cpu(hdr->data_length )== payload_length) &&
852  (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
853  pr_err("Expected Data Transfer Length and Length of"
854  " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
855  " bit is not set protocol error\n");
856  return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
857  buf, conn);
858  }
859 
860  if (payload_length > be32_to_cpu(hdr->data_length)) {
861  pr_err("DataSegmentLength: %u is greater than"
862  " EDTL: %u, protocol error.\n", payload_length,
863  hdr->data_length);
864  return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
865  buf, conn);
866  }
867 
868  if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
869  pr_err("DataSegmentLength: %u is greater than"
870  " MaxXmitDataSegmentLength: %u, protocol error.\n",
871  payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
872  return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
873  buf, conn);
874  }
875 
876  if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
877  pr_err("DataSegmentLength: %u is greater than"
878  " FirstBurstLength: %u, protocol error.\n",
879  payload_length, conn->sess->sess_ops->FirstBurstLength);
880  return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
881  buf, conn);
882  }
883 
884  data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
886  DMA_NONE;
887 
888  cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
889  if (!cmd)
890  return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
891  buf, conn);
892 
894  iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK;
895  /*
896  * Figure out the SAM Task Attribute for the incoming SCSI CDB
897  */
898  if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
899  (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
900  sam_task_attr = MSG_SIMPLE_TAG;
901  else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
902  sam_task_attr = MSG_ORDERED_TAG;
903  else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
904  sam_task_attr = MSG_HEAD_TAG;
905  else if (iscsi_task_attr == ISCSI_ATTR_ACA)
906  sam_task_attr = MSG_ACA_TAG;
907  else {
908  pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
909  " MSG_SIMPLE_TAG\n", iscsi_task_attr);
910  sam_task_attr = MSG_SIMPLE_TAG;
911  }
912 
914  cmd->i_state = ISTATE_NEW_CMD;
915  cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
916  cmd->immediate_data = (payload_length) ? 1 : 0;
917  cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) &&
918  (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0);
919  if (cmd->unsolicited_data)
921 
922  conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
923  if (hdr->flags & ISCSI_FLAG_CMD_READ) {
924  spin_lock_bh(&conn->sess->ttt_lock);
925  cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
926  if (cmd->targ_xfer_tag == 0xFFFFFFFF)
927  cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
928  spin_unlock_bh(&conn->sess->ttt_lock);
929  } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
930  cmd->targ_xfer_tag = 0xFFFFFFFF;
931  cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
932  cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
934 
935  if (cmd->data_direction == DMA_FROM_DEVICE) {
936  struct iscsi_datain_req *dr;
937 
939  if (!dr)
942  1, 1, buf, cmd);
943 
944  iscsit_attach_datain_req(cmd, dr);
945  }
946 
947  /*
948  * Initialize struct se_cmd descriptor from target_core_mod infrastructure
949  */
951  conn->sess->se_sess, be32_to_cpu(hdr->data_length),
952  cmd->data_direction, sam_task_attr,
953  cmd->sense_buffer + 2);
954 
955  pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
956  " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
957  hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
958 
959  /*
960  * The CDB is going to an se_device_t.
961  */
962  ret = transport_lookup_cmd_lun(&cmd->se_cmd,
963  scsilun_to_int(&hdr->lun));
964  if (ret < 0) {
965  if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) {
966  pr_debug("Responding to non-acl'ed,"
967  " non-existent or non-exported iSCSI LUN:"
968  " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
969  }
970  send_check_condition = 1;
971  goto attach_cmd;
972  }
973 
974  transport_ret = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
975  if (transport_ret == -ENOMEM) {
978  1, 1, buf, cmd);
979  } else if (transport_ret < 0) {
980  /*
981  * Unsupported SAM Opcode. CHECK_CONDITION will be sent
982  * in iscsit_execute_cmd() during the CmdSN OOO Execution
983  * Mechinism.
984  */
985  send_check_condition = 1;
986  } else {
990  1, 1, buf, cmd);
991  }
992 
993 attach_cmd:
994  spin_lock_bh(&conn->cmd_lock);
995  list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
996  spin_unlock_bh(&conn->cmd_lock);
997  /*
998  * Check if we need to delay processing because of ALUA
999  * Active/NonOptimized primary access state..
1000  */
1002 
1003  ret = iscsit_allocate_iovecs(cmd);
1004  if (ret < 0)
1007  1, 0, buf, cmd);
1008  /*
1009  * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
1010  * the Immediate Bit is not set, and no Immediate
1011  * Data is attached.
1012  *
1013  * A PDU/CmdSN carrying Immediate Data can only
1014  * be processed after the DataCRC has passed.
1015  * If the DataCRC fails, the CmdSN MUST NOT
1016  * be acknowledged. (See below)
1017  */
1018  if (!cmd->immediate_data) {
1019  cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1020  if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
1021  return 0;
1022  else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1025  1, 0, buf, cmd);
1026  }
1027 
1028  iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1029 
1030  /*
1031  * If no Immediate Data is attached, it's OK to return now.
1032  */
1033  if (!cmd->immediate_data) {
1034  if (send_check_condition)
1035  return 0;
1036 
1037  if (cmd->unsolicited_data) {
1039 
1040  spin_lock_bh(&cmd->dataout_timeout_lock);
1041  iscsit_start_dataout_timer(cmd, cmd->conn);
1042  spin_unlock_bh(&cmd->dataout_timeout_lock);
1043  }
1044 
1045  return 0;
1046  }
1047 
1048  /*
1049  * Early CHECK_CONDITIONs never make it to the transport processing
1050  * thread. They are processed in CmdSN order by
1051  * iscsit_check_received_cmdsn() below.
1052  */
1053  if (send_check_condition) {
1054  immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1055  dump_immediate_data = 1;
1056  goto after_immediate_data;
1057  }
1058  /*
1059  * Call directly into transport_generic_new_cmd() to perform
1060  * the backend memory allocation.
1061  */
1062  ret = transport_generic_new_cmd(&cmd->se_cmd);
1063  if (ret < 0) {
1064  immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1065  dump_immediate_data = 1;
1066  goto after_immediate_data;
1067  }
1068 
1069  immed_ret = iscsit_handle_immediate_data(cmd, buf, payload_length);
1070 after_immediate_data:
1071  if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
1072  /*
1073  * A PDU/CmdSN carrying Immediate Data passed
1074  * DataCRC, check against ExpCmdSN/MaxCmdSN if
1075  * Immediate Bit is not set.
1076  */
1077  cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1078  /*
1079  * Special case for Unsupported SAM WRITE Opcodes
1080  * and ImmediateData=Yes.
1081  */
1082  if (dump_immediate_data) {
1083  if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
1084  return -1;
1085  } else if (cmd->unsolicited_data) {
1087 
1088  spin_lock_bh(&cmd->dataout_timeout_lock);
1089  iscsit_start_dataout_timer(cmd, cmd->conn);
1090  spin_unlock_bh(&cmd->dataout_timeout_lock);
1091  }
1092 
1093  if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1096  1, 0, buf, cmd);
1097 
1098  } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
1099  /*
1100  * Immediate Data failed DataCRC and ERL>=1,
1101  * silently drop this PDU and let the initiator
1102  * plug the CmdSN gap.
1103  *
1104  * FIXME: Send Unsolicited NOPIN with reserved
1105  * TTT here to help the initiator figure out
1106  * the missing CmdSN, although they should be
1107  * intelligent enough to determine the missing
1108  * CmdSN and issue a retry to plug the sequence.
1109  */
1110  cmd->i_state = ISTATE_REMOVE;
1111  iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
1112  } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
1113  return -1;
1114 
1115  return 0;
1116 }
1117 
1118 static u32 iscsit_do_crypto_hash_sg(
1119  struct hash_desc *hash,
1120  struct iscsi_cmd *cmd,
1121  u32 data_offset,
1122  u32 data_length,
1123  u32 padding,
1124  u8 *pad_bytes)
1125 {
1126  u32 data_crc;
1127  u32 i;
1128  struct scatterlist *sg;
1129  unsigned int page_off;
1130 
1131  crypto_hash_init(hash);
1132 
1133  sg = cmd->first_data_sg;
1134  page_off = cmd->first_data_sg_off;
1135 
1136  i = 0;
1137  while (data_length) {
1138  u32 cur_len = min_t(u32, data_length, (sg[i].length - page_off));
1139 
1140  crypto_hash_update(hash, &sg[i], cur_len);
1141 
1142  data_length -= cur_len;
1143  page_off = 0;
1144  i++;
1145  }
1146 
1147  if (padding) {
1148  struct scatterlist pad_sg;
1149 
1150  sg_init_one(&pad_sg, pad_bytes, padding);
1151  crypto_hash_update(hash, &pad_sg, padding);
1152  }
1153  crypto_hash_final(hash, (u8 *) &data_crc);
1154 
1155  return data_crc;
1156 }
1157 
1158 static void iscsit_do_crypto_hash_buf(
1159  struct hash_desc *hash,
1160  unsigned char *buf,
1162  u32 padding,
1163  u8 *pad_bytes,
1164  u8 *data_crc)
1165 {
1166  struct scatterlist sg;
1167 
1168  crypto_hash_init(hash);
1169 
1170  sg_init_one(&sg, buf, payload_length);
1171  crypto_hash_update(hash, &sg, payload_length);
1172 
1173  if (padding) {
1174  sg_init_one(&sg, pad_bytes, padding);
1175  crypto_hash_update(hash, &sg, padding);
1176  }
1177  crypto_hash_final(hash, data_crc);
1178 }
1179 
1180 static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1181 {
1182  int iov_ret, ooo_cmdsn = 0, ret;
1183  u8 data_crc_failed = 0;
1184  u32 checksum, iov_count = 0, padding = 0, rx_got = 0;
1185  u32 rx_size = 0, payload_length;
1186  struct iscsi_cmd *cmd = NULL;
1187  struct se_cmd *se_cmd;
1188  struct iscsi_data *hdr;
1189  struct kvec *iov;
1190  unsigned long flags;
1191 
1192  hdr = (struct iscsi_data *) buf;
1193  payload_length = ntoh24(hdr->dlength);
1194 
1195  if (!payload_length) {
1196  pr_err("DataOUT payload is ZERO, protocol error.\n");
1197  return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1198  buf, conn);
1199  }
1200 
1201  /* iSCSI write */
1202  spin_lock_bh(&conn->sess->session_stats_lock);
1203  conn->sess->rx_data_octets += payload_length;
1204  if (conn->sess->se_sess->se_node_acl) {
1205  spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
1206  conn->sess->se_sess->se_node_acl->write_bytes += payload_length;
1207  spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
1208  }
1209  spin_unlock_bh(&conn->sess->session_stats_lock);
1210 
1211  if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1212  pr_err("DataSegmentLength: %u is greater than"
1213  " MaxXmitDataSegmentLength: %u\n", payload_length,
1214  conn->conn_ops->MaxXmitDataSegmentLength);
1215  return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1216  buf, conn);
1217  }
1218 
1219  cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt,
1220  payload_length);
1221  if (!cmd)
1222  return 0;
1223 
1224  pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
1225  " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
1226  hdr->itt, hdr->ttt, hdr->datasn, hdr->offset,
1227  payload_length, conn->cid);
1228 
1229  if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
1230  pr_err("Command ITT: 0x%08x received DataOUT after"
1231  " last DataOUT received, dumping payload\n",
1232  cmd->init_task_tag);
1233  return iscsit_dump_data_payload(conn, payload_length, 1);
1234  }
1235 
1236  if (cmd->data_direction != DMA_TO_DEVICE) {
1237  pr_err("Command ITT: 0x%08x received DataOUT for a"
1238  " NON-WRITE command.\n", cmd->init_task_tag);
1240  1, 0, buf, cmd);
1241  }
1242  se_cmd = &cmd->se_cmd;
1244 
1245  if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
1246  pr_err("DataOut Offset: %u, Length %u greater than"
1247  " iSCSI Command EDTL %u, protocol error.\n",
1248  hdr->offset, payload_length, cmd->se_cmd.data_length);
1250  1, 0, buf, cmd);
1251  }
1252 
1253  if (cmd->unsolicited_data) {
1254  int dump_unsolicited_data = 0;
1255 
1256  if (conn->sess->sess_ops->InitialR2T) {
1257  pr_err("Received unexpected unsolicited data"
1258  " while InitialR2T=Yes, protocol error.\n");
1261  return -1;
1262  }
1263  /*
1264  * Special case for dealing with Unsolicited DataOUT
1265  * and Unsupported SAM WRITE Opcodes and SE resource allocation
1266  * failures;
1267  */
1268 
1269  /* Something's amiss if we're not in WRITE_PENDING state... */
1270  spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1272  spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1273 
1274  spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1275  if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
1277  dump_unsolicited_data = 1;
1278  spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1279 
1280  if (dump_unsolicited_data) {
1281  /*
1282  * Check if a delayed TASK_ABORTED status needs to
1283  * be sent now if the ISCSI_FLAG_CMD_FINAL has been
1284  * received with the unsolicitied data out.
1285  */
1286  if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1288 
1290  (hdr->flags & ISCSI_FLAG_CMD_FINAL));
1291  return iscsit_dump_data_payload(conn, payload_length, 1);
1292  }
1293  } else {
1294  /*
1295  * For the normal solicited data path:
1296  *
1297  * Check for a delayed TASK_ABORTED status and dump any
1298  * incoming data out payload if one exists. Also, when the
1299  * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current
1300  * data out sequence, we decrement outstanding_r2ts. Once
1301  * outstanding_r2ts reaches zero, go ahead and send the delayed
1302  * TASK_ABORTED status.
1303  */
1304  if (se_cmd->transport_state & CMD_T_ABORTED) {
1305  if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1306  if (--cmd->outstanding_r2ts < 1) {
1309  se_cmd, 1);
1310  }
1311 
1312  return iscsit_dump_data_payload(conn, payload_length, 1);
1313  }
1314  }
1315  /*
1316  * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and
1317  * within-command recovery checks before receiving the payload.
1318  */
1319  ret = iscsit_check_pre_dataout(cmd, buf);
1321  return 0;
1322  else if (ret == DATAOUT_CANNOT_RECOVER)
1323  return -1;
1324 
1325  rx_size += payload_length;
1326  iov = &cmd->iov_data[0];
1327 
1328  iov_ret = iscsit_map_iovec(cmd, iov, be32_to_cpu(hdr->offset),
1329  payload_length);
1330  if (iov_ret < 0)
1331  return -1;
1332 
1333  iov_count += iov_ret;
1334 
1335  padding = ((-payload_length) & 3);
1336  if (padding != 0) {
1337  iov[iov_count].iov_base = cmd->pad_bytes;
1338  iov[iov_count++].iov_len = padding;
1339  rx_size += padding;
1340  pr_debug("Receiving %u padding bytes.\n", padding);
1341  }
1342 
1343  if (conn->conn_ops->DataDigest) {
1344  iov[iov_count].iov_base = &checksum;
1345  iov[iov_count++].iov_len = ISCSI_CRC_LEN;
1346  rx_size += ISCSI_CRC_LEN;
1347  }
1348 
1349  rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
1350 
1351  iscsit_unmap_iovec(cmd);
1352 
1353  if (rx_got != rx_size)
1354  return -1;
1355 
1356  if (conn->conn_ops->DataDigest) {
1357  u32 data_crc;
1358 
1359  data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
1360  be32_to_cpu(hdr->offset),
1361  payload_length, padding,
1362  cmd->pad_bytes);
1363 
1364  if (checksum != data_crc) {
1365  pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
1366  " DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
1367  " does not match computed 0x%08x\n",
1368  hdr->itt, hdr->offset, payload_length,
1369  hdr->datasn, checksum, data_crc);
1370  data_crc_failed = 1;
1371  } else {
1372  pr_debug("Got CRC32C DataDigest 0x%08x for"
1373  " %u bytes of Data Out\n", checksum,
1374  payload_length);
1375  }
1376  }
1377  /*
1378  * Increment post receive data and CRC values or perform
1379  * within-command recovery.
1380  */
1381  ret = iscsit_check_post_dataout(cmd, buf, data_crc_failed);
1382  if ((ret == DATAOUT_NORMAL) || (ret == DATAOUT_WITHIN_COMMAND_RECOVERY))
1383  return 0;
1384  else if (ret == DATAOUT_SEND_R2T) {
1386  iscsit_build_r2ts_for_cmd(cmd, conn, false);
1387  } else if (ret == DATAOUT_SEND_TO_TRANSPORT) {
1388  /*
1389  * Handle extra special case for out of order
1390  * Unsolicited Data Out.
1391  */
1392  spin_lock_bh(&cmd->istate_lock);
1393  ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN);
1396  spin_unlock_bh(&cmd->istate_lock);
1397 
1399  if (ooo_cmdsn)
1400  return 0;
1401  target_execute_cmd(&cmd->se_cmd);
1402  return 0;
1403  } else /* DATAOUT_CANNOT_RECOVER */
1404  return -1;
1405 
1406  return 0;
1407 }
1408 
1409 static int iscsit_handle_nop_out(
1410  struct iscsi_conn *conn,
1411  unsigned char *buf)
1412 {
1413  unsigned char *ping_data = NULL;
1414  int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size;
1415  u32 checksum, data_crc, padding = 0, payload_length;
1416  struct iscsi_cmd *cmd = NULL;
1417  struct kvec *iov = NULL;
1418  struct iscsi_nopout *hdr;
1419 
1420  hdr = (struct iscsi_nopout *) buf;
1421  payload_length = ntoh24(hdr->dlength);
1422 
1423  if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1424  pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
1425  " not set, protocol error.\n");
1426  return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1427  buf, conn);
1428  }
1429 
1430  if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1431  pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
1432  " greater than MaxXmitDataSegmentLength: %u, protocol"
1433  " error.\n", payload_length,
1434  conn->conn_ops->MaxXmitDataSegmentLength);
1435  return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1436  buf, conn);
1437  }
1438 
1439  pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x,"
1440  " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
1441  hdr->itt == RESERVED_ITT ? "Response" : "Request",
1442  hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
1443  payload_length);
1444  /*
1445  * This is not a response to a Unsolicited NopIN, which means
1446  * it can either be a NOPOUT ping request (with a valid ITT),
1447  * or a NOPOUT not requesting a NOPIN (with a reserved ITT).
1448  * Either way, make sure we allocate an struct iscsi_cmd, as both
1449  * can contain ping data.
1450  */
1451  if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1452  cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1453  if (!cmd)
1454  return iscsit_add_reject(
1456  1, buf, conn);
1457 
1459  cmd->i_state = ISTATE_SEND_NOPIN;
1460  cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ?
1461  1 : 0);
1462  conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1463  cmd->targ_xfer_tag = 0xFFFFFFFF;
1464  cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
1465  cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
1466  cmd->data_direction = DMA_NONE;
1467  }
1468 
1469  if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1470  rx_size = payload_length;
1471  ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
1472  if (!ping_data) {
1473  pr_err("Unable to allocate memory for"
1474  " NOPOUT ping data.\n");
1475  ret = -1;
1476  goto out;
1477  }
1478 
1479  iov = &cmd->iov_misc[0];
1480  iov[niov].iov_base = ping_data;
1481  iov[niov++].iov_len = payload_length;
1482 
1483  padding = ((-payload_length) & 3);
1484  if (padding != 0) {
1485  pr_debug("Receiving %u additional bytes"
1486  " for padding.\n", padding);
1487  iov[niov].iov_base = &cmd->pad_bytes;
1488  iov[niov++].iov_len = padding;
1489  rx_size += padding;
1490  }
1491  if (conn->conn_ops->DataDigest) {
1492  iov[niov].iov_base = &checksum;
1493  iov[niov++].iov_len = ISCSI_CRC_LEN;
1494  rx_size += ISCSI_CRC_LEN;
1495  }
1496 
1497  rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
1498  if (rx_got != rx_size) {
1499  ret = -1;
1500  goto out;
1501  }
1502 
1503  if (conn->conn_ops->DataDigest) {
1504  iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
1505  ping_data, payload_length,
1506  padding, cmd->pad_bytes,
1507  (u8 *)&data_crc);
1508 
1509  if (checksum != data_crc) {
1510  pr_err("Ping data CRC32C DataDigest"
1511  " 0x%08x does not match computed 0x%08x\n",
1512  checksum, data_crc);
1513  if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
1514  pr_err("Unable to recover from"
1515  " NOPOUT Ping DataCRC failure while in"
1516  " ERL=0.\n");
1517  ret = -1;
1518  goto out;
1519  } else {
1520  /*
1521  * Silently drop this PDU and let the
1522  * initiator plug the CmdSN gap.
1523  */
1524  pr_debug("Dropping NOPOUT"
1525  " Command CmdSN: 0x%08x due to"
1526  " DataCRC error.\n", hdr->cmdsn);
1527  ret = 0;
1528  goto out;
1529  }
1530  } else {
1531  pr_debug("Got CRC32C DataDigest"
1532  " 0x%08x for %u bytes of ping data.\n",
1533  checksum, payload_length);
1534  }
1535  }
1536 
1537  ping_data[payload_length] = '\0';
1538  /*
1539  * Attach ping data to struct iscsi_cmd->buf_ptr.
1540  */
1541  cmd->buf_ptr = ping_data;
1543 
1544  pr_debug("Got %u bytes of NOPOUT ping"
1545  " data.\n", payload_length);
1546  pr_debug("Ping Data: \"%s\"\n", ping_data);
1547  }
1548 
1549  if (hdr->itt != RESERVED_ITT) {
1550  if (!cmd) {
1551  pr_err("Checking CmdSN for NOPOUT,"
1552  " but cmd is NULL!\n");
1553  return -1;
1554  }
1555  /*
1556  * Initiator is expecting a NopIN ping reply,
1557  */
1558  spin_lock_bh(&conn->cmd_lock);
1559  list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1560  spin_unlock_bh(&conn->cmd_lock);
1561 
1562  iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1563 
1564  if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
1566  cmd->i_state);
1567  return 0;
1568  }
1569 
1570  cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1571  if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1572  ret = 0;
1573  goto ping_out;
1574  }
1575  if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1578  1, 0, buf, cmd);
1579 
1580  return 0;
1581  }
1582 
1583  if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
1584  /*
1585  * This was a response to a unsolicited NOPIN ping.
1586  */
1587  cmd = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt));
1588  if (!cmd)
1589  return -1;
1590 
1592 
1593  cmd->i_state = ISTATE_REMOVE;
1594  iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
1596  } else {
1597  /*
1598  * Initiator is not expecting a NOPIN is response.
1599  * Just ignore for now.
1600  *
1601  * iSCSI v19-91 10.18
1602  * "A NOP-OUT may also be used to confirm a changed
1603  * ExpStatSN if another PDU will not be available
1604  * for a long time."
1605  */
1606  ret = 0;
1607  goto out;
1608  }
1609 
1610  return 0;
1611 out:
1612  if (cmd)
1613  iscsit_release_cmd(cmd);
1614 ping_out:
1615  kfree(ping_data);
1616  return ret;
1617 }
1618 
1619 static int iscsit_handle_task_mgt_cmd(
1620  struct iscsi_conn *conn,
1621  unsigned char *buf)
1622 {
1623  struct iscsi_cmd *cmd;
1624  struct se_tmr_req *se_tmr;
1625  struct iscsi_tmr_req *tmr_req;
1626  struct iscsi_tm *hdr;
1627  int out_of_order_cmdsn = 0;
1628  int ret;
1629  u8 function;
1630 
1631  hdr = (struct iscsi_tm *) buf;
1632  hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
1633  function = hdr->flags;
1634 
1635  pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:"
1636  " 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:"
1637  " 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function,
1638  hdr->rtt, hdr->refcmdsn, conn->cid);
1639 
1640  if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
1641  ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
1642  hdr->rtt != RESERVED_ITT)) {
1643  pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
1644  hdr->rtt = RESERVED_ITT;
1645  }
1646 
1647  if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
1648  !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1649  pr_err("Task Management Request TASK_REASSIGN not"
1650  " issued as immediate command, bad iSCSI Initiator"
1651  "implementation\n");
1652  return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1653  buf, conn);
1654  }
1655  if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
1658 
1659  cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1660  if (!cmd)
1661  return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1662  1, buf, conn);
1663 
1664  cmd->data_direction = DMA_NONE;
1665 
1666  cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
1667  if (!cmd->tmr_req) {
1668  pr_err("Unable to allocate memory for"
1669  " Task Management command!\n");
1672  1, 1, buf, cmd);
1673  }
1674 
1675  /*
1676  * TASK_REASSIGN for ERL=2 / connection stays inside of
1677  * LIO-Target $FABRIC_MOD
1678  */
1679  if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
1680 
1681  u8 tcm_function;
1682  int ret;
1683 
1685  &lio_target_fabric_configfs->tf_ops,
1686  conn->sess->se_sess, 0, DMA_NONE,
1687  MSG_SIMPLE_TAG, cmd->sense_buffer + 2);
1688 
1689  switch (function) {
1691  tcm_function = TMR_ABORT_TASK;
1692  break;
1694  tcm_function = TMR_ABORT_TASK_SET;
1695  break;
1697  tcm_function = TMR_CLEAR_ACA;
1698  break;
1700  tcm_function = TMR_CLEAR_TASK_SET;
1701  break;
1703  tcm_function = TMR_LUN_RESET;
1704  break;
1706  tcm_function = TMR_TARGET_WARM_RESET;
1707  break;
1709  tcm_function = TMR_TARGET_COLD_RESET;
1710  break;
1711  default:
1712  pr_err("Unknown iSCSI TMR Function:"
1713  " 0x%02x\n", function);
1716  1, 1, buf, cmd);
1717  }
1718 
1719  ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req,
1720  tcm_function, GFP_KERNEL);
1721  if (ret < 0)
1724  1, 1, buf, cmd);
1725 
1726  cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
1727  }
1728 
1731  cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
1732  cmd->init_task_tag = hdr->itt;
1733  cmd->targ_xfer_tag = 0xFFFFFFFF;
1734  cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
1735  cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
1736  se_tmr = cmd->se_cmd.se_tmr_req;
1737  tmr_req = cmd->tmr_req;
1738  /*
1739  * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
1740  */
1741  if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
1742  ret = transport_lookup_tmr_lun(&cmd->se_cmd,
1743  scsilun_to_int(&hdr->lun));
1744  if (ret < 0) {
1745  cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1746  se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
1747  goto attach;
1748  }
1749  }
1750 
1751  switch (function) {
1753  se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
1754  if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) {
1755  cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1756  goto attach;
1757  }
1758  break;
1763  break;
1765  if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
1766  cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1768  goto attach;
1769  }
1770  break;
1772  if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
1773  cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1775  goto attach;
1776  }
1777  break;
1779  se_tmr->response = iscsit_tmr_task_reassign(cmd, buf);
1780  /*
1781  * Perform sanity checks on the ExpDataSN only if the
1782  * TASK_REASSIGN was successful.
1783  */
1784  if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE)
1785  break;
1786 
1787  if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
1790  buf, cmd);
1791  break;
1792  default:
1793  pr_err("Unknown TMR function: 0x%02x, protocol"
1794  " error.\n", function);
1795  cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1797  goto attach;
1798  }
1799 
1800  if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
1801  (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
1802  se_tmr->call_transport = 1;
1803 attach:
1804  spin_lock_bh(&conn->cmd_lock);
1805  list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1806  spin_unlock_bh(&conn->cmd_lock);
1807 
1808  if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1809  int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1810  if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
1811  out_of_order_cmdsn = 1;
1812  else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
1813  return 0;
1814  else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1817  1, 0, buf, cmd);
1818  }
1819  iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1820 
1821  if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
1822  return 0;
1823  /*
1824  * Found the referenced task, send to transport for processing.
1825  */
1826  if (se_tmr->call_transport)
1827  return transport_generic_handle_tmr(&cmd->se_cmd);
1828 
1829  /*
1830  * Could not find the referenced LUN, task, or Task Management
1831  * command not authorized or supported. Change state and
1832  * let the tx_thread send the response.
1833  *
1834  * For connection recovery, this is also the default action for
1835  * TMR TASK_REASSIGN.
1836  */
1837  iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
1838  return 0;
1839 }
1840 
1841 /* #warning FIXME: Support Text Command parameters besides SendTargets */
1842 static int iscsit_handle_text_cmd(
1843  struct iscsi_conn *conn,
1844  unsigned char *buf)
1845 {
1846  char *text_ptr, *text_in;
1847  int cmdsn_ret, niov = 0, rx_got, rx_size;
1848  u32 checksum = 0, data_crc = 0, payload_length;
1849  u32 padding = 0, pad_bytes = 0, text_length = 0;
1850  struct iscsi_cmd *cmd;
1851  struct kvec iov[3];
1852  struct iscsi_text *hdr;
1853 
1854  hdr = (struct iscsi_text *) buf;
1855  payload_length = ntoh24(hdr->dlength);
1856 
1857  if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1858  pr_err("Unable to accept text parameter length: %u"
1859  "greater than MaxXmitDataSegmentLength %u.\n",
1860  payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
1861  return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1862  buf, conn);
1863  }
1864 
1865  pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
1866  " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
1867  hdr->exp_statsn, payload_length);
1868 
1869  rx_size = text_length = payload_length;
1870  if (text_length) {
1871  text_in = kzalloc(text_length, GFP_KERNEL);
1872  if (!text_in) {
1873  pr_err("Unable to allocate memory for"
1874  " incoming text parameters\n");
1875  return -1;
1876  }
1877 
1878  memset(iov, 0, 3 * sizeof(struct kvec));
1879  iov[niov].iov_base = text_in;
1880  iov[niov++].iov_len = text_length;
1881 
1882  padding = ((-payload_length) & 3);
1883  if (padding != 0) {
1884  iov[niov].iov_base = &pad_bytes;
1885  iov[niov++].iov_len = padding;
1886  rx_size += padding;
1887  pr_debug("Receiving %u additional bytes"
1888  " for padding.\n", padding);
1889  }
1890  if (conn->conn_ops->DataDigest) {
1891  iov[niov].iov_base = &checksum;
1892  iov[niov++].iov_len = ISCSI_CRC_LEN;
1893  rx_size += ISCSI_CRC_LEN;
1894  }
1895 
1896  rx_got = rx_data(conn, &iov[0], niov, rx_size);
1897  if (rx_got != rx_size) {
1898  kfree(text_in);
1899  return -1;
1900  }
1901 
1902  if (conn->conn_ops->DataDigest) {
1903  iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
1904  text_in, text_length,
1905  padding, (u8 *)&pad_bytes,
1906  (u8 *)&data_crc);
1907 
1908  if (checksum != data_crc) {
1909  pr_err("Text data CRC32C DataDigest"
1910  " 0x%08x does not match computed"
1911  " 0x%08x\n", checksum, data_crc);
1912  if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
1913  pr_err("Unable to recover from"
1914  " Text Data digest failure while in"
1915  " ERL=0.\n");
1916  kfree(text_in);
1917  return -1;
1918  } else {
1919  /*
1920  * Silently drop this PDU and let the
1921  * initiator plug the CmdSN gap.
1922  */
1923  pr_debug("Dropping Text"
1924  " Command CmdSN: 0x%08x due to"
1925  " DataCRC error.\n", hdr->cmdsn);
1926  kfree(text_in);
1927  return 0;
1928  }
1929  } else {
1930  pr_debug("Got CRC32C DataDigest"
1931  " 0x%08x for %u bytes of text data.\n",
1932  checksum, text_length);
1933  }
1934  }
1935  text_in[text_length - 1] = '\0';
1936  pr_debug("Successfully read %d bytes of text"
1937  " data.\n", text_length);
1938 
1939  if (strncmp("SendTargets", text_in, 11) != 0) {
1940  pr_err("Received Text Data that is not"
1941  " SendTargets, cannot continue.\n");
1942  kfree(text_in);
1943  return -1;
1944  }
1945  text_ptr = strchr(text_in, '=');
1946  if (!text_ptr) {
1947  pr_err("No \"=\" separator found in Text Data,"
1948  " cannot continue.\n");
1949  kfree(text_in);
1950  return -1;
1951  }
1952  if (strncmp("=All", text_ptr, 4) != 0) {
1953  pr_err("Unable to locate All value for"
1954  " SendTargets key, cannot continue.\n");
1955  kfree(text_in);
1956  return -1;
1957  }
1958 /*#warning Support SendTargets=(iSCSI Target Name/Nothing) values. */
1959  kfree(text_in);
1960  }
1961 
1962  cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1963  if (!cmd)
1964  return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1965  1, buf, conn);
1966 
1967  cmd->iscsi_opcode = ISCSI_OP_TEXT;
1969  cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
1970  conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1971  cmd->targ_xfer_tag = 0xFFFFFFFF;
1972  cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
1973  cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
1974  cmd->data_direction = DMA_NONE;
1975 
1976  spin_lock_bh(&conn->cmd_lock);
1977  list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1978  spin_unlock_bh(&conn->cmd_lock);
1979 
1980  iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1981 
1982  if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1983  cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1984  if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1987  1, 0, buf, cmd);
1988 
1989  return 0;
1990  }
1991 
1992  return iscsit_execute_cmd(cmd, 0);
1993 }
1994 
1995 int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1996 {
1997  struct iscsi_conn *conn_p;
1998  struct iscsi_session *sess = conn->sess;
1999 
2000  pr_debug("Received logout request CLOSESESSION on CID: %hu"
2001  " for SID: %u.\n", conn->cid, conn->sess->sid);
2002 
2003  atomic_set(&sess->session_logout, 1);
2004  atomic_set(&conn->conn_logout_remove, 1);
2006 
2009 
2010  spin_lock_bh(&sess->conn_lock);
2011  list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) {
2012  if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN)
2013  continue;
2014 
2015  pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2017  }
2018  spin_unlock_bh(&sess->conn_lock);
2019 
2020  iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2021 
2022  return 0;
2023 }
2024 
2026 {
2027  struct iscsi_conn *l_conn;
2028  struct iscsi_session *sess = conn->sess;
2029 
2030  pr_debug("Received logout request CLOSECONNECTION for CID:"
2031  " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2032 
2033  /*
2034  * A Logout Request with a CLOSECONNECTION reason code for a CID
2035  * can arrive on a connection with a differing CID.
2036  */
2037  if (conn->cid == cmd->logout_cid) {
2038  spin_lock_bh(&conn->state_lock);
2039  pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2041 
2042  atomic_set(&conn->conn_logout_remove, 1);
2045 
2046  spin_unlock_bh(&conn->state_lock);
2047  } else {
2048  /*
2049  * Handle all different cid CLOSECONNECTION requests in
2050  * iscsit_logout_post_handler_diffcid() as to give enough
2051  * time for any non immediate command's CmdSN to be
2052  * acknowledged on the connection in question.
2053  *
2054  * Here we simply make sure the CID is still around.
2055  */
2056  l_conn = iscsit_get_conn_from_cid(sess,
2057  cmd->logout_cid);
2058  if (!l_conn) {
2061  cmd->i_state);
2062  return 0;
2063  }
2064 
2066  }
2067 
2068  iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2069 
2070  return 0;
2071 }
2072 
2074 {
2075  struct iscsi_session *sess = conn->sess;
2076 
2077  pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
2078  " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2079 
2080  if (sess->sess_ops->ErrorRecoveryLevel != 2) {
2081  pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2082  " while ERL!=2.\n");
2084  iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2085  return 0;
2086  }
2087 
2088  if (conn->cid == cmd->logout_cid) {
2089  pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2090  " with CID: %hu on CID: %hu, implementation error.\n",
2091  cmd->logout_cid, conn->cid);
2093  iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2094  return 0;
2095  }
2096 
2097  iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2098 
2099  return 0;
2100 }
2101 
2102 static int iscsit_handle_logout_cmd(
2103  struct iscsi_conn *conn,
2104  unsigned char *buf)
2105 {
2106  int cmdsn_ret, logout_remove = 0;
2107  u8 reason_code = 0;
2108  struct iscsi_cmd *cmd;
2109  struct iscsi_logout *hdr;
2110  struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
2111 
2112  hdr = (struct iscsi_logout *) buf;
2113  reason_code = (hdr->flags & 0x7f);
2114 
2115  if (tiqn) {
2116  spin_lock(&tiqn->logout_stats.lock);
2117  if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION)
2118  tiqn->logout_stats.normal_logouts++;
2119  else
2120  tiqn->logout_stats.abnormal_logouts++;
2121  spin_unlock(&tiqn->logout_stats.lock);
2122  }
2123 
2124  pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x"
2125  " ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n",
2126  hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code,
2127  hdr->cid, conn->cid);
2128 
2129  if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
2130  pr_err("Received logout request on connection that"
2131  " is not in logged in state, ignoring request.\n");
2132  return 0;
2133  }
2134 
2135  cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
2136  if (!cmd)
2137  return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
2138  buf, conn);
2139 
2142  cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2143  conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
2144  cmd->targ_xfer_tag = 0xFFFFFFFF;
2145  cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
2146  cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
2147  cmd->logout_cid = be16_to_cpu(hdr->cid);
2148  cmd->logout_reason = reason_code;
2149  cmd->data_direction = DMA_NONE;
2150 
2151  /*
2152  * We need to sleep in these cases (by returning 1) until the Logout
2153  * Response gets sent in the tx thread.
2154  */
2155  if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
2156  ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
2157  be16_to_cpu(hdr->cid) == conn->cid))
2158  logout_remove = 1;
2159 
2160  spin_lock_bh(&conn->cmd_lock);
2161  list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2162  spin_unlock_bh(&conn->cmd_lock);
2163 
2164  if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
2165  iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2166 
2167  /*
2168  * Immediate commands are executed, well, immediately.
2169  * Non-Immediate Logout Commands are executed in CmdSN order.
2170  */
2171  if (cmd->immediate_cmd) {
2172  int ret = iscsit_execute_cmd(cmd, 0);
2173 
2174  if (ret < 0)
2175  return ret;
2176  } else {
2177  cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
2178  if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
2179  logout_remove = 0;
2180  } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
2183  1, 0, buf, cmd);
2184  }
2185  }
2186 
2187  return logout_remove;
2188 }
2189 
2190 static int iscsit_handle_snack(
2191  struct iscsi_conn *conn,
2192  unsigned char *buf)
2193 {
2194  struct iscsi_snack *hdr;
2195 
2196  hdr = (struct iscsi_snack *) buf;
2197  hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
2198 
2199  pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
2200  " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
2201  " CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags,
2202  hdr->begrun, hdr->runlength, conn->cid);
2203 
2204  if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2205  pr_err("Initiator sent SNACK request while in"
2206  " ErrorRecoveryLevel=0.\n");
2207  return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
2208  buf, conn);
2209  }
2210  /*
2211  * SNACK_DATA and SNACK_R2T are both 0, so check which function to
2212  * call from inside iscsi_send_recovery_datain_or_r2t().
2213  */
2214  switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
2215  case 0:
2216  return iscsit_handle_recovery_datain_or_r2t(conn, buf,
2217  hdr->itt,
2218  be32_to_cpu(hdr->ttt),
2219  be32_to_cpu(hdr->begrun),
2220  be32_to_cpu(hdr->runlength));
2222  return iscsit_handle_status_snack(conn, hdr->itt,
2223  be32_to_cpu(hdr->ttt),
2224  be32_to_cpu(hdr->begrun), be32_to_cpu(hdr->runlength));
2226  return iscsit_handle_data_ack(conn, be32_to_cpu(hdr->ttt),
2227  be32_to_cpu(hdr->begrun),
2228  be32_to_cpu(hdr->runlength));
2230  /* FIXME: Support R-Data SNACK */
2231  pr_err("R-Data SNACK Not Supported.\n");
2232  return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
2233  buf, conn);
2234  default:
2235  pr_err("Unknown SNACK type 0x%02x, protocol"
2236  " error.\n", hdr->flags & 0x0f);
2237  return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
2238  buf, conn);
2239  }
2240 
2241  return 0;
2242 }
2243 
2244 static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
2245 {
2246  if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2247  (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2249  &conn->rx_half_close_comp,
2251  }
2252 }
2253 
2254 static int iscsit_handle_immediate_data(
2255  struct iscsi_cmd *cmd,
2256  unsigned char *buf,
2257  u32 length)
2258 {
2259  int iov_ret, rx_got = 0, rx_size = 0;
2260  u32 checksum, iov_count = 0, padding = 0;
2261  struct iscsi_conn *conn = cmd->conn;
2262  struct kvec *iov;
2263 
2264  iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length);
2265  if (iov_ret < 0)
2267 
2268  rx_size = length;
2269  iov_count = iov_ret;
2270  iov = &cmd->iov_data[0];
2271 
2272  padding = ((-length) & 3);
2273  if (padding != 0) {
2274  iov[iov_count].iov_base = cmd->pad_bytes;
2275  iov[iov_count++].iov_len = padding;
2276  rx_size += padding;
2277  }
2278 
2279  if (conn->conn_ops->DataDigest) {
2280  iov[iov_count].iov_base = &checksum;
2281  iov[iov_count++].iov_len = ISCSI_CRC_LEN;
2282  rx_size += ISCSI_CRC_LEN;
2283  }
2284 
2285  rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
2286 
2287  iscsit_unmap_iovec(cmd);
2288 
2289  if (rx_got != rx_size) {
2290  iscsit_rx_thread_wait_for_tcp(conn);
2292  }
2293 
2294  if (conn->conn_ops->DataDigest) {
2295  u32 data_crc;
2296 
2297  data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
2298  cmd->write_data_done, length, padding,
2299  cmd->pad_bytes);
2300 
2301  if (checksum != data_crc) {
2302  pr_err("ImmediateData CRC32C DataDigest 0x%08x"
2303  " does not match computed 0x%08x\n", checksum,
2304  data_crc);
2305 
2306  if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2307  pr_err("Unable to recover from"
2308  " Immediate Data digest failure while"
2309  " in ERL=0.\n");
2312  1, 0, buf, cmd);
2314  } else {
2317  0, 0, buf, cmd);
2319  }
2320  } else {
2321  pr_debug("Got CRC32C DataDigest 0x%08x for"
2322  " %u bytes of Immediate Data\n", checksum,
2323  length);
2324  }
2325  }
2326 
2327  cmd->write_data_done += length;
2328 
2329  if (cmd->write_data_done == cmd->se_cmd.data_length) {
2330  spin_lock_bh(&cmd->istate_lock);
2333  spin_unlock_bh(&cmd->istate_lock);
2334  }
2335 
2337 }
2338 
2339 /*
2340  * Called with sess->conn_lock held.
2341  */
2342 /* #warning iscsi_build_conn_drop_async_message() only sends out on connections
2343  with active network interface */
2344 static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
2345 {
2346  struct iscsi_cmd *cmd;
2347  struct iscsi_conn *conn_p;
2348 
2349  /*
2350  * Only send a Asynchronous Message on connections whos network
2351  * interface is still functional.
2352  */
2353  list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
2354  if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
2356  break;
2357  }
2358  }
2359 
2360  if (!conn_p)
2361  return;
2362 
2363  cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL);
2364  if (!cmd) {
2366  return;
2367  }
2368 
2369  cmd->logout_cid = conn->cid;
2372 
2373  spin_lock_bh(&conn_p->cmd_lock);
2374  list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list);
2375  spin_unlock_bh(&conn_p->cmd_lock);
2376 
2377  iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
2379 }
2380 
2381 static int iscsit_send_conn_drop_async_message(
2382  struct iscsi_cmd *cmd,
2383  struct iscsi_conn *conn)
2384 {
2385  struct iscsi_async *hdr;
2386 
2387  cmd->tx_size = ISCSI_HDR_LEN;
2389 
2390  hdr = (struct iscsi_async *) cmd->pdu;
2392  hdr->flags = ISCSI_FLAG_CMD_FINAL;
2393  cmd->init_task_tag = RESERVED_ITT;
2394  cmd->targ_xfer_tag = 0xFFFFFFFF;
2395  put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
2396  cmd->stat_sn = conn->stat_sn++;
2397  hdr->statsn = cpu_to_be32(cmd->stat_sn);
2398  hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2399  hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2401  hdr->param1 = cpu_to_be16(cmd->logout_cid);
2402  hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
2403  hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
2404 
2405  if (conn->conn_ops->HeaderDigest) {
2406  u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2407 
2408  iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2409  (unsigned char *)hdr, ISCSI_HDR_LEN,
2410  0, NULL, (u8 *)header_digest);
2411 
2412  cmd->tx_size += ISCSI_CRC_LEN;
2413  pr_debug("Attaching CRC32C HeaderDigest to"
2414  " Async Message 0x%08x\n", *header_digest);
2415  }
2416 
2417  cmd->iov_misc[0].iov_base = cmd->pdu;
2418  cmd->iov_misc[0].iov_len = cmd->tx_size;
2419  cmd->iov_misc_count = 1;
2420 
2421  pr_debug("Sending Connection Dropped Async Message StatSN:"
2422  " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
2423  cmd->logout_cid, conn->cid);
2424  return 0;
2425 }
2426 
2427 static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
2428 {
2429  if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2430  (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2432  &conn->tx_half_close_comp,
2434  }
2435 }
2436 
2437 static int iscsit_send_data_in(
2438  struct iscsi_cmd *cmd,
2439  struct iscsi_conn *conn)
2440 {
2441  int iov_ret = 0, set_statsn = 0;
2442  u32 iov_count = 0, tx_size = 0;
2443  struct iscsi_datain datain;
2444  struct iscsi_datain_req *dr;
2445  struct iscsi_data_rsp *hdr;
2446  struct kvec *iov;
2447  int eodr = 0;
2448  int ret;
2449 
2450  memset(&datain, 0, sizeof(struct iscsi_datain));
2451  dr = iscsit_get_datain_values(cmd, &datain);
2452  if (!dr) {
2453  pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n",
2454  cmd->init_task_tag);
2455  return -1;
2456  }
2457 
2458  /*
2459  * Be paranoid and double check the logic for now.
2460  */
2461  if ((datain.offset + datain.length) > cmd->se_cmd.data_length) {
2462  pr_err("Command ITT: 0x%08x, datain.offset: %u and"
2463  " datain.length: %u exceeds cmd->data_length: %u\n",
2464  cmd->init_task_tag, datain.offset, datain.length,
2465  cmd->se_cmd.data_length);
2466  return -1;
2467  }
2468 
2469  spin_lock_bh(&conn->sess->session_stats_lock);
2470  conn->sess->tx_data_octets += datain.length;
2471  if (conn->sess->se_sess->se_node_acl) {
2472  spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
2473  conn->sess->se_sess->se_node_acl->read_bytes += datain.length;
2474  spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
2475  }
2476  spin_unlock_bh(&conn->sess->session_stats_lock);
2477  /*
2478  * Special case for successfully execution w/ both DATAIN
2479  * and Sense Data.
2480  */
2481  if ((datain.flags & ISCSI_FLAG_DATA_STATUS) &&
2482  (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
2483  datain.flags &= ~ISCSI_FLAG_DATA_STATUS;
2484  else {
2485  if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) ||
2487  iscsit_increment_maxcmdsn(cmd, conn->sess);
2488  cmd->stat_sn = conn->stat_sn++;
2489  set_statsn = 1;
2490  } else if (dr->dr_complete ==
2492  set_statsn = 1;
2493  }
2494 
2495  hdr = (struct iscsi_data_rsp *) cmd->pdu;
2496  memset(hdr, 0, ISCSI_HDR_LEN);
2498  hdr->flags = datain.flags;
2499  if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
2500  if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
2502  hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2503  } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
2505  hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2506  }
2507  }
2508  hton24(hdr->dlength, datain.length);
2509  if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2510  int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2511  (struct scsi_lun *)&hdr->lun);
2512  else
2513  put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2514 
2515  hdr->itt = cmd->init_task_tag;
2516 
2517  if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2518  hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2519  else
2520  hdr->ttt = cpu_to_be32(0xFFFFFFFF);
2521  if (set_statsn)
2522  hdr->statsn = cpu_to_be32(cmd->stat_sn);
2523  else
2524  hdr->statsn = cpu_to_be32(0xFFFFFFFF);
2525 
2526  hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2527  hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2528  hdr->datasn = cpu_to_be32(datain.data_sn);
2529  hdr->offset = cpu_to_be32(datain.offset);
2530 
2531  iov = &cmd->iov_data[0];
2532  iov[iov_count].iov_base = cmd->pdu;
2533  iov[iov_count++].iov_len = ISCSI_HDR_LEN;
2534  tx_size += ISCSI_HDR_LEN;
2535 
2536  if (conn->conn_ops->HeaderDigest) {
2537  u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2538 
2539  iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2540  (unsigned char *)hdr, ISCSI_HDR_LEN,
2541  0, NULL, (u8 *)header_digest);
2542 
2543  iov[0].iov_len += ISCSI_CRC_LEN;
2544  tx_size += ISCSI_CRC_LEN;
2545 
2546  pr_debug("Attaching CRC32 HeaderDigest"
2547  " for DataIN PDU 0x%08x\n", *header_digest);
2548  }
2549 
2550  iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], datain.offset, datain.length);
2551  if (iov_ret < 0)
2552  return -1;
2553 
2554  iov_count += iov_ret;
2555  tx_size += datain.length;
2556 
2557  cmd->padding = ((-datain.length) & 3);
2558  if (cmd->padding) {
2559  iov[iov_count].iov_base = cmd->pad_bytes;
2560  iov[iov_count++].iov_len = cmd->padding;
2561  tx_size += cmd->padding;
2562 
2563  pr_debug("Attaching %u padding bytes\n",
2564  cmd->padding);
2565  }
2566  if (conn->conn_ops->DataDigest) {
2567  cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd,
2568  datain.offset, datain.length, cmd->padding, cmd->pad_bytes);
2569 
2570  iov[iov_count].iov_base = &cmd->data_crc;
2571  iov[iov_count++].iov_len = ISCSI_CRC_LEN;
2572  tx_size += ISCSI_CRC_LEN;
2573 
2574  pr_debug("Attached CRC32C DataDigest %d bytes, crc"
2575  " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc);
2576  }
2577 
2578  cmd->iov_data_count = iov_count;
2579  cmd->tx_size = tx_size;
2580 
2581  pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
2582  " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
2583  cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
2584  ntohl(hdr->offset), datain.length, conn->cid);
2585 
2586  /* sendpage is preferred but can't insert markers */
2587  if (!conn->conn_ops->IFMarker)
2588  ret = iscsit_fe_sendpage_sg(cmd, conn);
2589  else
2590  ret = iscsit_send_tx_data(cmd, conn, 0);
2591 
2592  iscsit_unmap_iovec(cmd);
2593 
2594  if (ret < 0) {
2595  iscsit_tx_thread_wait_for_tcp(conn);
2596  return ret;
2597  }
2598 
2599  if (dr->dr_complete) {
2600  eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
2601  2 : 1;
2602  iscsit_free_datain_req(cmd, dr);
2603  }
2604 
2605  return eodr;
2606 }
2607 
2608 static int iscsit_send_logout_response(
2609  struct iscsi_cmd *cmd,
2610  struct iscsi_conn *conn)
2611 {
2612  int niov = 0, tx_size;
2613  struct iscsi_conn *logout_conn = NULL;
2614  struct iscsi_conn_recovery *cr = NULL;
2615  struct iscsi_session *sess = conn->sess;
2616  struct kvec *iov;
2617  struct iscsi_logout_rsp *hdr;
2618  /*
2619  * The actual shutting down of Sessions and/or Connections
2620  * for CLOSESESSION and CLOSECONNECTION Logout Requests
2621  * is done in scsi_logout_post_handler().
2622  */
2623  switch (cmd->logout_reason) {
2625  pr_debug("iSCSI session logout successful, setting"
2626  " logout response to ISCSI_LOGOUT_SUCCESS.\n");
2628  break;
2631  break;
2632  /*
2633  * For CLOSECONNECTION logout requests carrying
2634  * a matching logout CID -> local CID, the reference
2635  * for the local CID will have been incremented in
2636  * iscsi_logout_closeconnection().
2637  *
2638  * For CLOSECONNECTION logout requests carrying
2639  * a different CID than the connection it arrived
2640  * on, the connection responding to cmd->logout_cid
2641  * is stopped in iscsit_logout_post_handler_diffcid().
2642  */
2643 
2644  pr_debug("iSCSI CID: %hu logout on CID: %hu"
2645  " successful.\n", cmd->logout_cid, conn->cid);
2647  break;
2651  break;
2652  /*
2653  * If the connection is still active from our point of view
2654  * force connection recovery to occur.
2655  */
2656  logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
2657  cmd->logout_cid);
2658  if (logout_conn) {
2660  iscsit_dec_conn_usage_count(logout_conn);
2661  }
2662 
2664  conn->sess, cmd->logout_cid);
2665  if (!cr) {
2666  pr_err("Unable to locate CID: %hu for"
2667  " REMOVECONNFORRECOVERY Logout Request.\n",
2668  cmd->logout_cid);
2670  break;
2671  }
2672 
2674 
2675  pr_debug("iSCSI REMOVECONNFORRECOVERY logout"
2676  " for recovery for CID: %hu on CID: %hu successful.\n",
2677  cmd->logout_cid, conn->cid);
2679  break;
2680  default:
2681  pr_err("Unknown cmd->logout_reason: 0x%02x\n",
2682  cmd->logout_reason);
2683  return -1;
2684  }
2685 
2686  tx_size = ISCSI_HDR_LEN;
2687  hdr = (struct iscsi_logout_rsp *)cmd->pdu;
2688  memset(hdr, 0, ISCSI_HDR_LEN);
2689  hdr->opcode = ISCSI_OP_LOGOUT_RSP;
2690  hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2691  hdr->response = cmd->logout_response;
2692  hdr->itt = cmd->init_task_tag;
2693  cmd->stat_sn = conn->stat_sn++;
2694  hdr->statsn = cpu_to_be32(cmd->stat_sn);
2695 
2696  iscsit_increment_maxcmdsn(cmd, conn->sess);
2697  hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2698  hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2699 
2700  iov = &cmd->iov_misc[0];
2701  iov[niov].iov_base = cmd->pdu;
2702  iov[niov++].iov_len = ISCSI_HDR_LEN;
2703 
2704  if (conn->conn_ops->HeaderDigest) {
2705  u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2706 
2707  iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2708  (unsigned char *)hdr, ISCSI_HDR_LEN,
2709  0, NULL, (u8 *)header_digest);
2710 
2711  iov[0].iov_len += ISCSI_CRC_LEN;
2712  tx_size += ISCSI_CRC_LEN;
2713  pr_debug("Attaching CRC32C HeaderDigest to"
2714  " Logout Response 0x%08x\n", *header_digest);
2715  }
2716  cmd->iov_misc_count = niov;
2717  cmd->tx_size = tx_size;
2718 
2719  pr_debug("Sending Logout Response ITT: 0x%08x StatSN:"
2720  " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
2721  cmd->init_task_tag, cmd->stat_sn, hdr->response,
2722  cmd->logout_cid, conn->cid);
2723 
2724  return 0;
2725 }
2726 
2727 /*
2728  * Unsolicited NOPIN, either requesting a response or not.
2729  */
2730 static int iscsit_send_unsolicited_nopin(
2731  struct iscsi_cmd *cmd,
2732  struct iscsi_conn *conn,
2733  int want_response)
2734 {
2735  int tx_size = ISCSI_HDR_LEN;
2736  struct iscsi_nopin *hdr;
2737  int ret;
2738 
2739  hdr = (struct iscsi_nopin *) cmd->pdu;
2740  memset(hdr, 0, ISCSI_HDR_LEN);
2741  hdr->opcode = ISCSI_OP_NOOP_IN;
2742  hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2743  hdr->itt = cmd->init_task_tag;
2744  hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2745  cmd->stat_sn = conn->stat_sn;
2746  hdr->statsn = cpu_to_be32(cmd->stat_sn);
2747  hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2748  hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2749 
2750  if (conn->conn_ops->HeaderDigest) {
2751  u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2752 
2753  iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2754  (unsigned char *)hdr, ISCSI_HDR_LEN,
2755  0, NULL, (u8 *)header_digest);
2756 
2757  tx_size += ISCSI_CRC_LEN;
2758  pr_debug("Attaching CRC32C HeaderDigest to"
2759  " NopIN 0x%08x\n", *header_digest);
2760  }
2761 
2762  cmd->iov_misc[0].iov_base = cmd->pdu;
2763  cmd->iov_misc[0].iov_len = tx_size;
2764  cmd->iov_misc_count = 1;
2765  cmd->tx_size = tx_size;
2766 
2767  pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
2768  " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
2769 
2770  ret = iscsit_send_tx_data(cmd, conn, 1);
2771  if (ret < 0) {
2772  iscsit_tx_thread_wait_for_tcp(conn);
2773  return ret;
2774  }
2775 
2776  spin_lock_bh(&cmd->istate_lock);
2777  cmd->i_state = want_response ?
2779  spin_unlock_bh(&cmd->istate_lock);
2780 
2781  return 0;
2782 }
2783 
2784 static int iscsit_send_nopin_response(
2785  struct iscsi_cmd *cmd,
2786  struct iscsi_conn *conn)
2787 {
2788  int niov = 0, tx_size;
2789  u32 padding = 0;
2790  struct kvec *iov;
2791  struct iscsi_nopin *hdr;
2792 
2793  tx_size = ISCSI_HDR_LEN;
2794  hdr = (struct iscsi_nopin *) cmd->pdu;
2795  memset(hdr, 0, ISCSI_HDR_LEN);
2796  hdr->opcode = ISCSI_OP_NOOP_IN;
2797  hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2798  hton24(hdr->dlength, cmd->buf_ptr_size);
2799  put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2800  hdr->itt = cmd->init_task_tag;
2801  hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2802  cmd->stat_sn = conn->stat_sn++;
2803  hdr->statsn = cpu_to_be32(cmd->stat_sn);
2804 
2805  iscsit_increment_maxcmdsn(cmd, conn->sess);
2806  hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2807  hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2808 
2809  iov = &cmd->iov_misc[0];
2810  iov[niov].iov_base = cmd->pdu;
2811  iov[niov++].iov_len = ISCSI_HDR_LEN;
2812 
2813  if (conn->conn_ops->HeaderDigest) {
2814  u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2815 
2816  iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2817  (unsigned char *)hdr, ISCSI_HDR_LEN,
2818  0, NULL, (u8 *)header_digest);
2819 
2820  iov[0].iov_len += ISCSI_CRC_LEN;
2821  tx_size += ISCSI_CRC_LEN;
2822  pr_debug("Attaching CRC32C HeaderDigest"
2823  " to NopIn 0x%08x\n", *header_digest);
2824  }
2825 
2826  /*
2827  * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr.
2828  * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size.
2829  */
2830  if (cmd->buf_ptr_size) {
2831  iov[niov].iov_base = cmd->buf_ptr;
2832  iov[niov++].iov_len = cmd->buf_ptr_size;
2833  tx_size += cmd->buf_ptr_size;
2834 
2835  pr_debug("Echoing back %u bytes of ping"
2836  " data.\n", cmd->buf_ptr_size);
2837 
2838  padding = ((-cmd->buf_ptr_size) & 3);
2839  if (padding != 0) {
2840  iov[niov].iov_base = &cmd->pad_bytes;
2841  iov[niov++].iov_len = padding;
2842  tx_size += padding;
2843  pr_debug("Attaching %u additional"
2844  " padding bytes.\n", padding);
2845  }
2846  if (conn->conn_ops->DataDigest) {
2847  iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2848  cmd->buf_ptr, cmd->buf_ptr_size,
2849  padding, (u8 *)&cmd->pad_bytes,
2850  (u8 *)&cmd->data_crc);
2851 
2852  iov[niov].iov_base = &cmd->data_crc;
2853  iov[niov++].iov_len = ISCSI_CRC_LEN;
2854  tx_size += ISCSI_CRC_LEN;
2855  pr_debug("Attached DataDigest for %u"
2856  " bytes of ping data, CRC 0x%08x\n",
2857  cmd->buf_ptr_size, cmd->data_crc);
2858  }
2859  }
2860 
2861  cmd->iov_misc_count = niov;
2862  cmd->tx_size = tx_size;
2863 
2864  pr_debug("Sending NOPIN Response ITT: 0x%08x, TTT:"
2865  " 0x%08x, StatSN: 0x%08x, Length %u\n", cmd->init_task_tag,
2866  cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
2867 
2868  return 0;
2869 }
2870 
2871 static int iscsit_send_r2t(
2872  struct iscsi_cmd *cmd,
2873  struct iscsi_conn *conn)
2874 {
2875  int tx_size = 0;
2876  struct iscsi_r2t *r2t;
2877  struct iscsi_r2t_rsp *hdr;
2878  int ret;
2879 
2880  r2t = iscsit_get_r2t_from_list(cmd);
2881  if (!r2t)
2882  return -1;
2883 
2884  hdr = (struct iscsi_r2t_rsp *) cmd->pdu;
2885  memset(hdr, 0, ISCSI_HDR_LEN);
2886  hdr->opcode = ISCSI_OP_R2T;
2887  hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2888  int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2889  (struct scsi_lun *)&hdr->lun);
2890  hdr->itt = cmd->init_task_tag;
2891  spin_lock_bh(&conn->sess->ttt_lock);
2892  r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
2893  if (r2t->targ_xfer_tag == 0xFFFFFFFF)
2894  r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
2895  spin_unlock_bh(&conn->sess->ttt_lock);
2896  hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
2897  hdr->statsn = cpu_to_be32(conn->stat_sn);
2898  hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2899  hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2900  hdr->r2tsn = cpu_to_be32(r2t->r2t_sn);
2901  hdr->data_offset = cpu_to_be32(r2t->offset);
2902  hdr->data_length = cpu_to_be32(r2t->xfer_len);
2903 
2904  cmd->iov_misc[0].iov_base = cmd->pdu;
2905  cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
2906  tx_size += ISCSI_HDR_LEN;
2907 
2908  if (conn->conn_ops->HeaderDigest) {
2909  u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2910 
2911  iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2912  (unsigned char *)hdr, ISCSI_HDR_LEN,
2913  0, NULL, (u8 *)header_digest);
2914 
2915  cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
2916  tx_size += ISCSI_CRC_LEN;
2917  pr_debug("Attaching CRC32 HeaderDigest for R2T"
2918  " PDU 0x%08x\n", *header_digest);
2919  }
2920 
2921  pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
2922  " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
2923  (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
2924  r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
2925  r2t->offset, r2t->xfer_len, conn->cid);
2926 
2927  cmd->iov_misc_count = 1;
2928  cmd->tx_size = tx_size;
2929 
2930  spin_lock_bh(&cmd->r2t_lock);
2931  r2t->sent_r2t = 1;
2932  spin_unlock_bh(&cmd->r2t_lock);
2933 
2934  ret = iscsit_send_tx_data(cmd, conn, 1);
2935  if (ret < 0) {
2936  iscsit_tx_thread_wait_for_tcp(conn);
2937  return ret;
2938  }
2939 
2940  spin_lock_bh(&cmd->dataout_timeout_lock);
2941  iscsit_start_dataout_timer(cmd, conn);
2942  spin_unlock_bh(&cmd->dataout_timeout_lock);
2943 
2944  return 0;
2945 }
2946 
2947 /*
2948  * @recovery: If called from iscsi_task_reassign_complete_write() for
2949  * connection recovery.
2950  */
2952  struct iscsi_cmd *cmd,
2953  struct iscsi_conn *conn,
2954  bool recovery)
2955 {
2956  int first_r2t = 1;
2957  u32 offset = 0, xfer_len = 0;
2958 
2959  spin_lock_bh(&cmd->r2t_lock);
2960  if (cmd->cmd_flags & ICF_SENT_LAST_R2T) {
2961  spin_unlock_bh(&cmd->r2t_lock);
2962  return 0;
2963  }
2964 
2965  if (conn->sess->sess_ops->DataSequenceInOrder &&
2966  !recovery)
2967  cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done);
2968 
2969  while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
2970  if (conn->sess->sess_ops->DataSequenceInOrder) {
2971  offset = cmd->r2t_offset;
2972 
2973  if (first_r2t && recovery) {
2974  int new_data_end = offset +
2975  conn->sess->sess_ops->MaxBurstLength -
2976  cmd->next_burst_len;
2977 
2978  if (new_data_end > cmd->se_cmd.data_length)
2979  xfer_len = cmd->se_cmd.data_length - offset;
2980  else
2981  xfer_len =
2982  conn->sess->sess_ops->MaxBurstLength -
2983  cmd->next_burst_len;
2984  } else {
2985  int new_data_end = offset +
2986  conn->sess->sess_ops->MaxBurstLength;
2987 
2988  if (new_data_end > cmd->se_cmd.data_length)
2989  xfer_len = cmd->se_cmd.data_length - offset;
2990  else
2991  xfer_len = conn->sess->sess_ops->MaxBurstLength;
2992  }
2993  cmd->r2t_offset += xfer_len;
2994 
2995  if (cmd->r2t_offset == cmd->se_cmd.data_length)
2996  cmd->cmd_flags |= ICF_SENT_LAST_R2T;
2997  } else {
2998  struct iscsi_seq *seq;
2999 
3000  seq = iscsit_get_seq_holder_for_r2t(cmd);
3001  if (!seq) {
3002  spin_unlock_bh(&cmd->r2t_lock);
3003  return -1;
3004  }
3005 
3006  offset = seq->offset;
3007  xfer_len = seq->xfer_len;
3008 
3009  if (cmd->seq_send_order == cmd->seq_count)
3010  cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3011  }
3012  cmd->outstanding_r2ts++;
3013  first_r2t = 0;
3014 
3015  if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) {
3016  spin_unlock_bh(&cmd->r2t_lock);
3017  return -1;
3018  }
3019 
3020  if (cmd->cmd_flags & ICF_SENT_LAST_R2T)
3021  break;
3022  }
3023  spin_unlock_bh(&cmd->r2t_lock);
3024 
3025  return 0;
3026 }
3027 
3028 static int iscsit_send_status(
3029  struct iscsi_cmd *cmd,
3030  struct iscsi_conn *conn)
3031 {
3032  u8 iov_count = 0, recovery;
3033  u32 padding = 0, tx_size = 0;
3034  struct iscsi_scsi_rsp *hdr;
3035  struct kvec *iov;
3036 
3037  recovery = (cmd->i_state != ISTATE_SEND_STATUS);
3038  if (!recovery)
3039  cmd->stat_sn = conn->stat_sn++;
3040 
3041  spin_lock_bh(&conn->sess->session_stats_lock);
3042  conn->sess->rsp_pdus++;
3043  spin_unlock_bh(&conn->sess->session_stats_lock);
3044 
3045  hdr = (struct iscsi_scsi_rsp *) cmd->pdu;
3046  memset(hdr, 0, ISCSI_HDR_LEN);
3048  hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3049  if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
3051  hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3052  } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
3054  hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3055  }
3056  hdr->response = cmd->iscsi_response;
3057  hdr->cmd_status = cmd->se_cmd.scsi_status;
3058  hdr->itt = cmd->init_task_tag;
3059  hdr->statsn = cpu_to_be32(cmd->stat_sn);
3060 
3061  iscsit_increment_maxcmdsn(cmd, conn->sess);
3062  hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3063  hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3064 
3065  iov = &cmd->iov_misc[0];
3066  iov[iov_count].iov_base = cmd->pdu;
3067  iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3068  tx_size += ISCSI_HDR_LEN;
3069 
3070  /*
3071  * Attach SENSE DATA payload to iSCSI Response PDU
3072  */
3073  if (cmd->se_cmd.sense_buffer &&
3074  ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
3075  (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
3076  put_unaligned_be16(cmd->se_cmd.scsi_sense_length, cmd->sense_buffer);
3077  cmd->se_cmd.scsi_sense_length += sizeof (__be16);
3078 
3079  padding = -(cmd->se_cmd.scsi_sense_length) & 3;
3080  hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
3081  iov[iov_count].iov_base = cmd->sense_buffer;
3082  iov[iov_count++].iov_len =
3083  (cmd->se_cmd.scsi_sense_length + padding);
3084  tx_size += cmd->se_cmd.scsi_sense_length;
3085 
3086  if (padding) {
3087  memset(cmd->sense_buffer +
3088  cmd->se_cmd.scsi_sense_length, 0, padding);
3089  tx_size += padding;
3090  pr_debug("Adding %u bytes of padding to"
3091  " SENSE.\n", padding);
3092  }
3093 
3094  if (conn->conn_ops->DataDigest) {
3095  iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3096  cmd->sense_buffer,
3097  (cmd->se_cmd.scsi_sense_length + padding),
3098  0, NULL, (u8 *)&cmd->data_crc);
3099 
3100  iov[iov_count].iov_base = &cmd->data_crc;
3101  iov[iov_count++].iov_len = ISCSI_CRC_LEN;
3102  tx_size += ISCSI_CRC_LEN;
3103 
3104  pr_debug("Attaching CRC32 DataDigest for"
3105  " SENSE, %u bytes CRC 0x%08x\n",
3106  (cmd->se_cmd.scsi_sense_length + padding),
3107  cmd->data_crc);
3108  }
3109 
3110  pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
3111  " Response PDU\n",
3112  cmd->se_cmd.scsi_sense_length);
3113  }
3114 
3115  if (conn->conn_ops->HeaderDigest) {
3116  u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3117 
3118  iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3119  (unsigned char *)hdr, ISCSI_HDR_LEN,
3120  0, NULL, (u8 *)header_digest);
3121 
3122  iov[0].iov_len += ISCSI_CRC_LEN;
3123  tx_size += ISCSI_CRC_LEN;
3124  pr_debug("Attaching CRC32 HeaderDigest for Response"
3125  " PDU 0x%08x\n", *header_digest);
3126  }
3127 
3128  cmd->iov_misc_count = iov_count;
3129  cmd->tx_size = tx_size;
3130 
3131  pr_debug("Built %sSCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
3132  " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
3133  (!recovery) ? "" : "Recovery ", cmd->init_task_tag,
3134  cmd->stat_sn, 0x00, cmd->se_cmd.scsi_status, conn->cid);
3135 
3136  return 0;
3137 }
3138 
3139 static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
3140 {
3141  switch (se_tmr->response) {
3142  case TMR_FUNCTION_COMPLETE:
3143  return ISCSI_TMF_RSP_COMPLETE;
3145  return ISCSI_TMF_RSP_NO_TASK;
3147  return ISCSI_TMF_RSP_NO_LUN;
3152  case TMR_FUNCTION_REJECTED:
3153  default:
3154  return ISCSI_TMF_RSP_REJECTED;
3155  }
3156 }
3157 
3158 static int iscsit_send_task_mgt_rsp(
3159  struct iscsi_cmd *cmd,
3160  struct iscsi_conn *conn)
3161 {
3162  struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
3163  struct iscsi_tm_rsp *hdr;
3164  u32 tx_size = 0;
3165 
3166  hdr = (struct iscsi_tm_rsp *) cmd->pdu;
3167  memset(hdr, 0, ISCSI_HDR_LEN);
3169  hdr->flags = ISCSI_FLAG_CMD_FINAL;
3170  hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
3171  hdr->itt = cmd->init_task_tag;
3172  cmd->stat_sn = conn->stat_sn++;
3173  hdr->statsn = cpu_to_be32(cmd->stat_sn);
3174 
3175  iscsit_increment_maxcmdsn(cmd, conn->sess);
3176  hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3177  hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3178 
3179  cmd->iov_misc[0].iov_base = cmd->pdu;
3180  cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
3181  tx_size += ISCSI_HDR_LEN;
3182 
3183  if (conn->conn_ops->HeaderDigest) {
3184  u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3185 
3186  iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3187  (unsigned char *)hdr, ISCSI_HDR_LEN,
3188  0, NULL, (u8 *)header_digest);
3189 
3190  cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
3191  tx_size += ISCSI_CRC_LEN;
3192  pr_debug("Attaching CRC32 HeaderDigest for Task"
3193  " Mgmt Response PDU 0x%08x\n", *header_digest);
3194  }
3195 
3196  cmd->iov_misc_count = 1;
3197  cmd->tx_size = tx_size;
3198 
3199  pr_debug("Built Task Management Response ITT: 0x%08x,"
3200  " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
3201  cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
3202 
3203  return 0;
3204 }
3205 
3206 static bool iscsit_check_inaddr_any(struct iscsi_np *np)
3207 {
3208  bool ret = false;
3209 
3210  if (np->np_sockaddr.ss_family == AF_INET6) {
3211  const struct sockaddr_in6 sin6 = {
3213  struct sockaddr_in6 *sock_in6 =
3214  (struct sockaddr_in6 *)&np->np_sockaddr;
3215 
3216  if (!memcmp(sock_in6->sin6_addr.s6_addr,
3217  sin6.sin6_addr.s6_addr, 16))
3218  ret = true;
3219  } else {
3220  struct sockaddr_in * sock_in =
3221  (struct sockaddr_in *)&np->np_sockaddr;
3222 
3223  if (sock_in->sin_addr.s_addr == htonl(INADDR_ANY))
3224  ret = true;
3225  }
3226 
3227  return ret;
3228 }
3229 
3230 #define SENDTARGETS_BUF_LIMIT 32768U
3231 
3232 static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3233 {
3234  char *payload = NULL;
3235  struct iscsi_conn *conn = cmd->conn;
3236  struct iscsi_portal_group *tpg;
3237  struct iscsi_tiqn *tiqn;
3238  struct iscsi_tpg_np *tpg_np;
3239  int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
3240  unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
3241 
3242  buffer_len = max(conn->conn_ops->MaxRecvDataSegmentLength,
3244 
3245  payload = kzalloc(buffer_len, GFP_KERNEL);
3246  if (!payload) {
3247  pr_err("Unable to allocate memory for sendtargets"
3248  " response.\n");
3249  return -ENOMEM;
3250  }
3251 
3252  spin_lock(&tiqn_lock);
3253  list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
3254  len = sprintf(buf, "TargetName=%s", tiqn->tiqn);
3255  len += 1;
3256 
3257  if ((len + payload_len) > buffer_len) {
3258  end_of_buf = 1;
3259  goto eob;
3260  }
3261  memcpy(payload + payload_len, buf, len);
3262  payload_len += len;
3263 
3264  spin_lock(&tiqn->tiqn_tpg_lock);
3265  list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
3266 
3267  spin_lock(&tpg->tpg_state_lock);
3268  if ((tpg->tpg_state == TPG_STATE_FREE) ||
3269  (tpg->tpg_state == TPG_STATE_INACTIVE)) {
3270  spin_unlock(&tpg->tpg_state_lock);
3271  continue;
3272  }
3273  spin_unlock(&tpg->tpg_state_lock);
3274 
3275  spin_lock(&tpg->tpg_np_lock);
3276  list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
3277  tpg_np_list) {
3278  struct iscsi_np *np = tpg_np->tpg_np;
3279  bool inaddr_any = iscsit_check_inaddr_any(np);
3280 
3281  len = sprintf(buf, "TargetAddress="
3282  "%s%s%s:%hu,%hu",
3283  (np->np_sockaddr.ss_family == AF_INET6) ?
3284  "[" : "", (inaddr_any == false) ?
3285  np->np_ip : conn->local_ip,
3286  (np->np_sockaddr.ss_family == AF_INET6) ?
3287  "]" : "", (inaddr_any == false) ?
3288  np->np_port : conn->local_port,
3289  tpg->tpgt);
3290  len += 1;
3291 
3292  if ((len + payload_len) > buffer_len) {
3293  spin_unlock(&tpg->tpg_np_lock);
3294  spin_unlock(&tiqn->tiqn_tpg_lock);
3295  end_of_buf = 1;
3296  goto eob;
3297  }
3298  memcpy(payload + payload_len, buf, len);
3299  payload_len += len;
3300  }
3301  spin_unlock(&tpg->tpg_np_lock);
3302  }
3303  spin_unlock(&tiqn->tiqn_tpg_lock);
3304 eob:
3305  if (end_of_buf)
3306  break;
3307  }
3308  spin_unlock(&tiqn_lock);
3309 
3310  cmd->buf_ptr = payload;
3311 
3312  return payload_len;
3313 }
3314 
3315 /*
3316  * FIXME: Add support for F_BIT and C_BIT when the length is longer than
3317  * MaxRecvDataSegmentLength.
3318  */
3319 static int iscsit_send_text_rsp(
3320  struct iscsi_cmd *cmd,
3321  struct iscsi_conn *conn)
3322 {
3323  struct iscsi_text_rsp *hdr;
3324  struct kvec *iov;
3325  u32 padding = 0, tx_size = 0;
3326  int text_length, iov_count = 0;
3327 
3328  text_length = iscsit_build_sendtargets_response(cmd);
3329  if (text_length < 0)
3330  return text_length;
3331 
3332  padding = ((-text_length) & 3);
3333  if (padding != 0) {
3334  memset(cmd->buf_ptr + text_length, 0, padding);
3335  pr_debug("Attaching %u additional bytes for"
3336  " padding.\n", padding);
3337  }
3338 
3339  hdr = (struct iscsi_text_rsp *) cmd->pdu;
3340  memset(hdr, 0, ISCSI_HDR_LEN);
3341  hdr->opcode = ISCSI_OP_TEXT_RSP;
3342  hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3343  hton24(hdr->dlength, text_length);
3344  hdr->itt = cmd->init_task_tag;
3345  hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
3346  cmd->stat_sn = conn->stat_sn++;
3347  hdr->statsn = cpu_to_be32(cmd->stat_sn);
3348 
3349  iscsit_increment_maxcmdsn(cmd, conn->sess);
3350  hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3351  hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3352 
3353  iov = &cmd->iov_misc[0];
3354 
3355  iov[iov_count].iov_base = cmd->pdu;
3356  iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3357  iov[iov_count].iov_base = cmd->buf_ptr;
3358  iov[iov_count++].iov_len = text_length + padding;
3359 
3360  tx_size += (ISCSI_HDR_LEN + text_length + padding);
3361 
3362  if (conn->conn_ops->HeaderDigest) {
3363  u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3364 
3365  iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3366  (unsigned char *)hdr, ISCSI_HDR_LEN,
3367  0, NULL, (u8 *)header_digest);
3368 
3369  iov[0].iov_len += ISCSI_CRC_LEN;
3370  tx_size += ISCSI_CRC_LEN;
3371  pr_debug("Attaching CRC32 HeaderDigest for"
3372  " Text Response PDU 0x%08x\n", *header_digest);
3373  }
3374 
3375  if (conn->conn_ops->DataDigest) {
3376  iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3377  cmd->buf_ptr, (text_length + padding),
3378  0, NULL, (u8 *)&cmd->data_crc);
3379 
3380  iov[iov_count].iov_base = &cmd->data_crc;
3381  iov[iov_count++].iov_len = ISCSI_CRC_LEN;
3382  tx_size += ISCSI_CRC_LEN;
3383 
3384  pr_debug("Attaching DataDigest for %u bytes of text"
3385  " data, CRC 0x%08x\n", (text_length + padding),
3386  cmd->data_crc);
3387  }
3388 
3389  cmd->iov_misc_count = iov_count;
3390  cmd->tx_size = tx_size;
3391 
3392  pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x,"
3393  " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn,
3394  text_length, conn->cid);
3395  return 0;
3396 }
3397 
3398 static int iscsit_send_reject(
3399  struct iscsi_cmd *cmd,
3400  struct iscsi_conn *conn)
3401 {
3402  u32 iov_count = 0, tx_size = 0;
3403  struct iscsi_reject *hdr;
3404  struct kvec *iov;
3405 
3406  hdr = (struct iscsi_reject *) cmd->pdu;
3407  hdr->opcode = ISCSI_OP_REJECT;
3408  hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3409  hton24(hdr->dlength, ISCSI_HDR_LEN);
3410  hdr->ffffffff = cpu_to_be32(0xffffffff);
3411  cmd->stat_sn = conn->stat_sn++;
3412  hdr->statsn = cpu_to_be32(cmd->stat_sn);
3413  hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3414  hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3415 
3416  iov = &cmd->iov_misc[0];
3417 
3418  iov[iov_count].iov_base = cmd->pdu;
3419  iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3420  iov[iov_count].iov_base = cmd->buf_ptr;
3421  iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3422 
3423  tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN);
3424 
3425  if (conn->conn_ops->HeaderDigest) {
3426  u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3427 
3428  iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3429  (unsigned char *)hdr, ISCSI_HDR_LEN,
3430  0, NULL, (u8 *)header_digest);
3431 
3432  iov[0].iov_len += ISCSI_CRC_LEN;
3433  tx_size += ISCSI_CRC_LEN;
3434  pr_debug("Attaching CRC32 HeaderDigest for"
3435  " REJECT PDU 0x%08x\n", *header_digest);
3436  }
3437 
3438  if (conn->conn_ops->DataDigest) {
3439  iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3440  (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN,
3441  0, NULL, (u8 *)&cmd->data_crc);
3442 
3443  iov[iov_count].iov_base = &cmd->data_crc;
3444  iov[iov_count++].iov_len = ISCSI_CRC_LEN;
3445  tx_size += ISCSI_CRC_LEN;
3446  pr_debug("Attaching CRC32 DataDigest for REJECT"
3447  " PDU 0x%08x\n", cmd->data_crc);
3448  }
3449 
3450  cmd->iov_misc_count = iov_count;
3451  cmd->tx_size = tx_size;
3452 
3453  pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
3454  " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
3455 
3456  return 0;
3457 }
3458 
3460 {
3461  struct iscsi_thread_set *ts = conn->thread_set;
3462  int ord, cpu;
3463  /*
3464  * thread_id is assigned from iscsit_global->ts_bitmap from
3465  * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
3466  *
3467  * Here we use thread_id to determine which CPU that this
3468  * iSCSI connection's iscsi_thread_set will be scheduled to
3469  * execute upon.
3470  */
3471  ord = ts->thread_id % cpumask_weight(cpu_online_mask);
3472  for_each_online_cpu(cpu) {
3473  if (ord-- == 0) {
3474  cpumask_set_cpu(cpu, conn->conn_cpumask);
3475  return;
3476  }
3477  }
3478  /*
3479  * This should never be reached..
3480  */
3481  dump_stack();
3482  cpumask_setall(conn->conn_cpumask);
3483 }
3484 
3485 static inline void iscsit_thread_check_cpumask(
3486  struct iscsi_conn *conn,
3487  struct task_struct *p,
3488  int mode)
3489 {
3490  char buf[128];
3491  /*
3492  * mode == 1 signals iscsi_target_tx_thread() usage.
3493  * mode == 0 signals iscsi_target_rx_thread() usage.
3494  */
3495  if (mode == 1) {
3496  if (!conn->conn_tx_reset_cpumask)
3497  return;
3498  conn->conn_tx_reset_cpumask = 0;
3499  } else {
3500  if (!conn->conn_rx_reset_cpumask)
3501  return;
3502  conn->conn_rx_reset_cpumask = 0;
3503  }
3504  /*
3505  * Update the CPU mask for this single kthread so that
3506  * both TX and RX kthreads are scheduled to run on the
3507  * same CPU.
3508  */
3509  memset(buf, 0, 128);
3510  cpumask_scnprintf(buf, 128, conn->conn_cpumask);
3511  set_cpus_allowed_ptr(p, conn->conn_cpumask);
3512 }
3513 
3514 static int handle_immediate_queue(struct iscsi_conn *conn)
3515 {
3516  struct iscsi_queue_req *qr;
3517  struct iscsi_cmd *cmd;
3518  u8 state;
3519  int ret;
3520 
3521  while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) {
3522  atomic_set(&conn->check_immediate_queue, 0);
3523  cmd = qr->cmd;
3524  state = qr->state;
3525  kmem_cache_free(lio_qr_cache, qr);
3526 
3527  switch (state) {
3528  case ISTATE_SEND_R2T:
3529  ret = iscsit_send_r2t(cmd, conn);
3530  if (ret < 0)
3531  goto err;
3532  break;
3533  case ISTATE_REMOVE:
3534  if (cmd->data_direction == DMA_TO_DEVICE)
3536 
3537  spin_lock_bh(&conn->cmd_lock);
3538  list_del(&cmd->i_conn_node);
3539  spin_unlock_bh(&conn->cmd_lock);
3540 
3541  iscsit_free_cmd(cmd);
3542  continue;
3545  ret = iscsit_send_unsolicited_nopin(cmd,
3546  conn, 1);
3547  if (ret < 0)
3548  goto err;
3549  break;
3551  ret = iscsit_send_unsolicited_nopin(cmd,
3552  conn, 0);
3553  if (ret < 0)
3554  goto err;
3555  break;
3556  default:
3557  pr_err("Unknown Opcode: 0x%02x ITT:"
3558  " 0x%08x, i_state: %d on CID: %hu\n",
3559  cmd->iscsi_opcode, cmd->init_task_tag, state,
3560  conn->cid);
3561  goto err;
3562  }
3563  }
3564 
3565  return 0;
3566 
3567 err:
3568  return -1;
3569 }
3570 
3571 static int handle_response_queue(struct iscsi_conn *conn)
3572 {
3573  struct iscsi_queue_req *qr;
3574  struct iscsi_cmd *cmd;
3575  u8 state;
3576  int ret;
3577 
3578  while ((qr = iscsit_get_cmd_from_response_queue(conn))) {
3579  cmd = qr->cmd;
3580  state = qr->state;
3581  kmem_cache_free(lio_qr_cache, qr);
3582 
3583 check_rsp_state:
3584  switch (state) {
3585  case ISTATE_SEND_DATAIN:
3586  ret = iscsit_send_data_in(cmd, conn);
3587  if (ret < 0)
3588  goto err;
3589  else if (!ret)
3590  /* more drs */
3591  goto check_rsp_state;
3592  else if (ret == 1) {
3593  /* all done */
3594  spin_lock_bh(&cmd->istate_lock);
3595  cmd->i_state = ISTATE_SENT_STATUS;
3596  spin_unlock_bh(&cmd->istate_lock);
3597  continue;
3598  } else if (ret == 2) {
3599  /* Still must send status,
3600  SCF_TRANSPORT_TASK_SENSE was set */
3601  spin_lock_bh(&cmd->istate_lock);
3602  cmd->i_state = ISTATE_SEND_STATUS;
3603  spin_unlock_bh(&cmd->istate_lock);
3604  state = ISTATE_SEND_STATUS;
3605  goto check_rsp_state;
3606  }
3607 
3608  break;
3609  case ISTATE_SEND_STATUS:
3611  ret = iscsit_send_status(cmd, conn);
3612  break;
3613  case ISTATE_SEND_LOGOUTRSP:
3614  ret = iscsit_send_logout_response(cmd, conn);
3615  break;
3616  case ISTATE_SEND_ASYNCMSG:
3617  ret = iscsit_send_conn_drop_async_message(
3618  cmd, conn);
3619  break;
3620  case ISTATE_SEND_NOPIN:
3621  ret = iscsit_send_nopin_response(cmd, conn);
3622  break;
3623  case ISTATE_SEND_REJECT:
3624  ret = iscsit_send_reject(cmd, conn);
3625  break;
3627  ret = iscsit_send_task_mgt_rsp(cmd, conn);
3628  if (ret != 0)
3629  break;
3630  ret = iscsit_tmr_post_handler(cmd, conn);
3631  if (ret != 0)
3633  break;
3634  case ISTATE_SEND_TEXTRSP:
3635  ret = iscsit_send_text_rsp(cmd, conn);
3636  break;
3637  default:
3638  pr_err("Unknown Opcode: 0x%02x ITT:"
3639  " 0x%08x, i_state: %d on CID: %hu\n",
3640  cmd->iscsi_opcode, cmd->init_task_tag,
3641  state, conn->cid);
3642  goto err;
3643  }
3644  if (ret < 0)
3645  goto err;
3646 
3647  if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
3648  iscsit_tx_thread_wait_for_tcp(conn);
3649  iscsit_unmap_iovec(cmd);
3650  goto err;
3651  }
3652  iscsit_unmap_iovec(cmd);
3653 
3654  switch (state) {
3655  case ISTATE_SEND_LOGOUTRSP:
3656  if (!iscsit_logout_post_handler(cmd, conn))
3657  goto restart;
3658  /* fall through */
3659  case ISTATE_SEND_STATUS:
3660  case ISTATE_SEND_ASYNCMSG:
3661  case ISTATE_SEND_NOPIN:
3663  case ISTATE_SEND_TEXTRSP:
3665  spin_lock_bh(&cmd->istate_lock);
3666  cmd->i_state = ISTATE_SENT_STATUS;
3667  spin_unlock_bh(&cmd->istate_lock);
3668  break;
3669  case ISTATE_SEND_REJECT:
3670  if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
3672  complete(&cmd->reject_comp);
3673  goto err;
3674  }
3675  complete(&cmd->reject_comp);
3676  break;
3677  default:
3678  pr_err("Unknown Opcode: 0x%02x ITT:"
3679  " 0x%08x, i_state: %d on CID: %hu\n",
3680  cmd->iscsi_opcode, cmd->init_task_tag,
3681  cmd->i_state, conn->cid);
3682  goto err;
3683  }
3684 
3685  if (atomic_read(&conn->check_immediate_queue))
3686  break;
3687  }
3688 
3689  return 0;
3690 
3691 err:
3692  return -1;
3693 restart:
3694  return -EAGAIN;
3695 }
3696 
3698 {
3699  int ret = 0;
3700  struct iscsi_conn *conn;
3701  struct iscsi_thread_set *ts = arg;
3702  /*
3703  * Allow ourselves to be interrupted by SIGINT so that a
3704  * connection recovery / failure event can be triggered externally.
3705  */
3707 
3708 restart:
3709  conn = iscsi_tx_thread_pre_handler(ts);
3710  if (!conn)
3711  goto out;
3712 
3713  ret = 0;
3714 
3715  while (!kthread_should_stop()) {
3716  /*
3717  * Ensure that both TX and RX per connection kthreads
3718  * are scheduled to run on the same CPU.
3719  */
3720  iscsit_thread_check_cpumask(conn, current, 1);
3721 
3725 
3726  if ((ts->status == ISCSI_THREAD_SET_RESET) ||
3727  signal_pending(current))
3728  goto transport_err;
3729 
3730  ret = handle_immediate_queue(conn);
3731  if (ret < 0)
3732  goto transport_err;
3733 
3734  ret = handle_response_queue(conn);
3735  if (ret == -EAGAIN)
3736  goto restart;
3737  else if (ret < 0)
3738  goto transport_err;
3739  }
3740 
3741 transport_err:
3743  goto restart;
3744 out:
3745  return 0;
3746 }
3747 
3749 {
3750  int ret;
3752  u32 checksum = 0, digest = 0;
3753  struct iscsi_conn *conn = NULL;
3754  struct iscsi_thread_set *ts = arg;
3755  struct kvec iov;
3756  /*
3757  * Allow ourselves to be interrupted by SIGINT so that a
3758  * connection recovery / failure event can be triggered externally.
3759  */
3761 
3762 restart:
3763  conn = iscsi_rx_thread_pre_handler(ts);
3764  if (!conn)
3765  goto out;
3766 
3767  while (!kthread_should_stop()) {
3768  /*
3769  * Ensure that both TX and RX per connection kthreads
3770  * are scheduled to run on the same CPU.
3771  */
3772  iscsit_thread_check_cpumask(conn, current, 0);
3773 
3774  memset(buffer, 0, ISCSI_HDR_LEN);
3775  memset(&iov, 0, sizeof(struct kvec));
3776 
3777  iov.iov_base = buffer;
3778  iov.iov_len = ISCSI_HDR_LEN;
3779 
3780  ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
3781  if (ret != ISCSI_HDR_LEN) {
3782  iscsit_rx_thread_wait_for_tcp(conn);
3783  goto transport_err;
3784  }
3785 
3786  /*
3787  * Set conn->bad_hdr for use with REJECT PDUs.
3788  */
3789  memcpy(&conn->bad_hdr, &buffer, ISCSI_HDR_LEN);
3790 
3791  if (conn->conn_ops->HeaderDigest) {
3792  iov.iov_base = &digest;
3793  iov.iov_len = ISCSI_CRC_LEN;
3794 
3795  ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
3796  if (ret != ISCSI_CRC_LEN) {
3797  iscsit_rx_thread_wait_for_tcp(conn);
3798  goto transport_err;
3799  }
3800 
3801  iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
3802  buffer, ISCSI_HDR_LEN,
3803  0, NULL, (u8 *)&checksum);
3804 
3805  if (digest != checksum) {
3806  pr_err("HeaderDigest CRC32C failed,"
3807  " received 0x%08x, computed 0x%08x\n",
3808  digest, checksum);
3809  /*
3810  * Set the PDU to 0xff so it will intentionally
3811  * hit default in the switch below.
3812  */
3813  memset(buffer, 0xff, ISCSI_HDR_LEN);
3814  spin_lock_bh(&conn->sess->session_stats_lock);
3815  conn->sess->conn_digest_errors++;
3816  spin_unlock_bh(&conn->sess->session_stats_lock);
3817  } else {
3818  pr_debug("Got HeaderDigest CRC32C"
3819  " 0x%08x\n", checksum);
3820  }
3821  }
3822 
3824  goto transport_err;
3825 
3826  opcode = buffer[0] & ISCSI_OPCODE_MASK;
3827 
3828  if (conn->sess->sess_ops->SessionType &&
3829  ((!(opcode & ISCSI_OP_TEXT)) ||
3830  (!(opcode & ISCSI_OP_LOGOUT)))) {
3831  pr_err("Received illegal iSCSI Opcode: 0x%02x"
3832  " while in Discovery Session, rejecting.\n", opcode);
3833  iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
3834  buffer, conn);
3835  goto transport_err;
3836  }
3837 
3838  switch (opcode) {
3839  case ISCSI_OP_SCSI_CMD:
3840  if (iscsit_handle_scsi_cmd(conn, buffer) < 0)
3841  goto transport_err;
3842  break;
3844  if (iscsit_handle_data_out(conn, buffer) < 0)
3845  goto transport_err;
3846  break;
3847  case ISCSI_OP_NOOP_OUT:
3848  if (iscsit_handle_nop_out(conn, buffer) < 0)
3849  goto transport_err;
3850  break;
3851  case ISCSI_OP_SCSI_TMFUNC:
3852  if (iscsit_handle_task_mgt_cmd(conn, buffer) < 0)
3853  goto transport_err;
3854  break;
3855  case ISCSI_OP_TEXT:
3856  if (iscsit_handle_text_cmd(conn, buffer) < 0)
3857  goto transport_err;
3858  break;
3859  case ISCSI_OP_LOGOUT:
3860  ret = iscsit_handle_logout_cmd(conn, buffer);
3861  if (ret > 0) {
3864  goto transport_err;
3865  } else if (ret < 0)
3866  goto transport_err;
3867  break;
3868  case ISCSI_OP_SNACK:
3869  if (iscsit_handle_snack(conn, buffer) < 0)
3870  goto transport_err;
3871  break;
3872  default:
3873  pr_err("Got unknown iSCSI OpCode: 0x%02x\n",
3874  opcode);
3875  if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
3876  pr_err("Cannot recover from unknown"
3877  " opcode while ERL=0, closing iSCSI connection"
3878  ".\n");
3879  goto transport_err;
3880  }
3881  if (!conn->conn_ops->OFMarker) {
3882  pr_err("Unable to recover from unknown"
3883  " opcode while OFMarker=No, closing iSCSI"
3884  " connection.\n");
3885  goto transport_err;
3886  }
3887  if (iscsit_recover_from_unknown_opcode(conn) < 0) {
3888  pr_err("Unable to recover from unknown"
3889  " opcode, closing iSCSI connection.\n");
3890  goto transport_err;
3891  }
3892  break;
3893  }
3894  }
3895 
3896 transport_err:
3897  if (!signal_pending(current))
3898  atomic_set(&conn->transport_failed, 1);
3900  goto restart;
3901 out:
3902  return 0;
3903 }
3904 
3905 static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
3906 {
3907  struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
3908  struct iscsi_session *sess = conn->sess;
3909  /*
3910  * We expect this function to only ever be called from either RX or TX
3911  * thread context via iscsit_close_connection() once the other context
3912  * has been reset -> returned sleeping pre-handler state.
3913  */
3914  spin_lock_bh(&conn->cmd_lock);
3915  list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
3916 
3917  list_del(&cmd->i_conn_node);
3918  spin_unlock_bh(&conn->cmd_lock);
3919 
3920  iscsit_increment_maxcmdsn(cmd, sess);
3921 
3922  iscsit_free_cmd(cmd);
3923 
3924  spin_lock_bh(&conn->cmd_lock);
3925  }
3926  spin_unlock_bh(&conn->cmd_lock);
3927 }
3928 
3929 static void iscsit_stop_timers_for_cmds(
3930  struct iscsi_conn *conn)
3931 {
3932  struct iscsi_cmd *cmd;
3933 
3934  spin_lock_bh(&conn->cmd_lock);
3936  if (cmd->data_direction == DMA_TO_DEVICE)
3938  }
3939  spin_unlock_bh(&conn->cmd_lock);
3940 }
3941 
3943  struct iscsi_conn *conn)
3944 {
3945  int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
3946  struct iscsi_session *sess = conn->sess;
3947 
3948  pr_debug("Closing iSCSI connection CID %hu on SID:"
3949  " %u\n", conn->cid, sess->sid);
3950  /*
3951  * Always up conn_logout_comp just in case the RX Thread is sleeping
3952  * and the logout response never got sent because the connection
3953  * failed.
3954  */
3955  complete(&conn->conn_logout_comp);
3956 
3958 
3959  iscsit_stop_timers_for_cmds(conn);
3963 
3964  /*
3965  * During Connection recovery drop unacknowledged out of order
3966  * commands for this connection, and prepare the other commands
3967  * for realligence.
3968  *
3969  * During normal operation clear the out of order commands (but
3970  * do not free the struct iscsi_ooo_cmdsn's) and release all
3971  * struct iscsi_cmds.
3972  */
3973  if (atomic_read(&conn->connection_recovery)) {
3976  } else {
3978  iscsit_release_commands_from_conn(conn);
3979  }
3980 
3981  /*
3982  * Handle decrementing session or connection usage count if
3983  * a logout response was not able to be sent because the
3984  * connection failed. Fall back to Session Recovery here.
3985  */
3986  if (atomic_read(&conn->conn_logout_remove)) {
3990  }
3993 
3994  atomic_set(&conn->conn_logout_remove, 0);
3995  atomic_set(&sess->session_reinstatement, 0);
3997  }
3998 
3999  spin_lock_bh(&sess->conn_lock);
4000  list_del(&conn->conn_list);
4001 
4002  /*
4003  * Attempt to let the Initiator know this connection failed by
4004  * sending an Connection Dropped Async Message on another
4005  * active connection.
4006  */
4007  if (atomic_read(&conn->connection_recovery))
4008  iscsit_build_conn_drop_async_message(conn);
4009 
4010  spin_unlock_bh(&sess->conn_lock);
4011 
4012  /*
4013  * If connection reinstatement is being performed on this connection,
4014  * up the connection reinstatement semaphore that is being blocked on
4015  * in iscsit_cause_connection_reinstatement().
4016  */
4017  spin_lock_bh(&conn->state_lock);
4018  if (atomic_read(&conn->sleep_on_conn_wait_comp)) {
4019  spin_unlock_bh(&conn->state_lock);
4020  complete(&conn->conn_wait_comp);
4022  spin_lock_bh(&conn->state_lock);
4023  }
4024 
4025  /*
4026  * If connection reinstatement is being performed on this connection
4027  * by receiving a REMOVECONNFORRECOVERY logout request, up the
4028  * connection wait rcfr semaphore that is being blocked on
4029  * an iscsit_connection_reinstatement_rcfr().
4030  */
4031  if (atomic_read(&conn->connection_wait_rcfr)) {
4032  spin_unlock_bh(&conn->state_lock);
4033  complete(&conn->conn_wait_rcfr_comp);
4035  spin_lock_bh(&conn->state_lock);
4036  }
4038  spin_unlock_bh(&conn->state_lock);
4039 
4040  /*
4041  * If any other processes are accessing this connection pointer we
4042  * must wait until they have completed.
4043  */
4045 
4046  if (conn->conn_rx_hash.tfm)
4047  crypto_free_hash(conn->conn_rx_hash.tfm);
4048  if (conn->conn_tx_hash.tfm)
4049  crypto_free_hash(conn->conn_tx_hash.tfm);
4050 
4051  if (conn->conn_cpumask)
4052  free_cpumask_var(conn->conn_cpumask);
4053 
4054  kfree(conn->conn_ops);
4055  conn->conn_ops = NULL;
4056 
4057  if (conn->sock)
4058  sock_release(conn->sock);
4059  conn->thread_set = NULL;
4060 
4061  pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
4063  kfree(conn);
4064 
4065  spin_lock_bh(&sess->conn_lock);
4066  atomic_dec(&sess->nconn);
4067  pr_debug("Decremented iSCSI connection count to %hu from node:"
4068  " %s\n", atomic_read(&sess->nconn),
4069  sess->sess_ops->InitiatorName);
4070  /*
4071  * Make sure that if one connection fails in an non ERL=2 iSCSI
4072  * Session that they all fail.
4073  */
4074  if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout &&
4075  !atomic_read(&sess->session_logout))
4077 
4078  /*
4079  * If this was not the last connection in the session, and we are
4080  * performing session reinstatement or falling back to ERL=0, call
4081  * iscsit_stop_session() without sleeping to shutdown the other
4082  * active connections.
4083  */
4084  if (atomic_read(&sess->nconn)) {
4085  if (!atomic_read(&sess->session_reinstatement) &&
4087  spin_unlock_bh(&sess->conn_lock);
4088  return 0;
4089  }
4090  if (!atomic_read(&sess->session_stop_active)) {
4091  atomic_set(&sess->session_stop_active, 1);
4092  spin_unlock_bh(&sess->conn_lock);
4093  iscsit_stop_session(sess, 0, 0);
4094  return 0;
4095  }
4096  spin_unlock_bh(&sess->conn_lock);
4097  return 0;
4098  }
4099 
4100  /*
4101  * If this was the last connection in the session and one of the
4102  * following is occurring:
4103  *
4104  * Session Reinstatement is not being performed, and are falling back
4105  * to ERL=0 call iscsit_close_session().
4106  *
4107  * Session Logout was requested. iscsit_close_session() will be called
4108  * elsewhere.
4109  *
4110  * Session Continuation is not being performed, start the Time2Retain
4111  * handler and check if sleep_on_sess_wait_sem is active.
4112  */
4113  if (!atomic_read(&sess->session_reinstatement) &&
4115  spin_unlock_bh(&sess->conn_lock);
4116  target_put_session(sess->se_sess);
4117 
4118  return 0;
4119  } else if (atomic_read(&sess->session_logout)) {
4120  pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4122  spin_unlock_bh(&sess->conn_lock);
4123 
4125  complete(&sess->session_wait_comp);
4126 
4127  return 0;
4128  } else {
4129  pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4131 
4132  if (!atomic_read(&sess->session_continuation)) {
4133  spin_unlock_bh(&sess->conn_lock);
4135  } else
4136  spin_unlock_bh(&sess->conn_lock);
4137 
4139  complete(&sess->session_wait_comp);
4140 
4141  return 0;
4142  }
4143  spin_unlock_bh(&sess->conn_lock);
4144 
4145  return 0;
4146 }
4147 
4149 {
4150  struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
4151  struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4152 
4153  if (atomic_read(&sess->nconn)) {
4154  pr_err("%d connection(s) still exist for iSCSI session"
4155  " to %s\n", atomic_read(&sess->nconn),
4156  sess->sess_ops->InitiatorName);
4157  BUG();
4158  }
4159 
4160  spin_lock_bh(&se_tpg->session_lock);
4161  atomic_set(&sess->session_logout, 1);
4162  atomic_set(&sess->session_reinstatement, 1);
4164  spin_unlock_bh(&se_tpg->session_lock);
4165 
4166  /*
4167  * transport_deregister_session_configfs() will clear the
4168  * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
4169  * can be setting it again with __transport_register_session() in
4170  * iscsi_post_login_handler() again after the iscsit_stop_session()
4171  * completes in iscsi_np context.
4172  */
4174 
4175  /*
4176  * If any other processes are accessing this session pointer we must
4177  * wait until they have completed. If we are in an interrupt (the
4178  * time2retain handler) and contain and active session usage count we
4179  * restart the timer and exit.
4180  */
4181  if (!in_interrupt()) {
4182  if (iscsit_check_session_usage_count(sess) == 1)
4183  iscsit_stop_session(sess, 1, 1);
4184  } else {
4185  if (iscsit_check_session_usage_count(sess) == 2) {
4186  atomic_set(&sess->session_logout, 0);
4188  return 0;
4189  }
4190  }
4191 
4193 
4194  if (sess->sess_ops->ErrorRecoveryLevel == 2)
4196 
4198 
4199  spin_lock_bh(&se_tpg->session_lock);
4200  pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4202  pr_debug("Released iSCSI session from node: %s\n",
4203  sess->sess_ops->InitiatorName);
4204  tpg->nsessions--;
4205  if (tpg->tpg_tiqn)
4206  tpg->tpg_tiqn->tiqn_nsessions--;
4207 
4208  pr_debug("Decremented number of active iSCSI Sessions on"
4209  " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
4210 
4211  spin_lock(&sess_idr_lock);
4213  spin_unlock(&sess_idr_lock);
4214 
4215  kfree(sess->sess_ops);
4216  sess->sess_ops = NULL;
4217  spin_unlock_bh(&se_tpg->session_lock);
4218 
4219  kfree(sess);
4220  return 0;
4221 }
4222 
4223 static void iscsit_logout_post_handler_closesession(
4224  struct iscsi_conn *conn)
4225 {
4226  struct iscsi_session *sess = conn->sess;
4227 
4230 
4231  atomic_set(&conn->conn_logout_remove, 0);
4232  complete(&conn->conn_logout_comp);
4233 
4235  iscsit_stop_session(sess, 1, 1);
4237  target_put_session(sess->se_sess);
4238 }
4239 
4240 static void iscsit_logout_post_handler_samecid(
4241  struct iscsi_conn *conn)
4242 {
4245 
4246  atomic_set(&conn->conn_logout_remove, 0);
4247  complete(&conn->conn_logout_comp);
4248 
4251 }
4252 
4253 static void iscsit_logout_post_handler_diffcid(
4254  struct iscsi_conn *conn,
4255  u16 cid)
4256 {
4257  struct iscsi_conn *l_conn;
4258  struct iscsi_session *sess = conn->sess;
4259 
4260  if (!sess)
4261  return;
4262 
4263  spin_lock_bh(&sess->conn_lock);
4264  list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
4265  if (l_conn->cid == cid) {
4267  break;
4268  }
4269  }
4270  spin_unlock_bh(&sess->conn_lock);
4271 
4272  if (!l_conn)
4273  return;
4274 
4275  if (l_conn->sock)
4276  l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN);
4277 
4278  spin_lock_bh(&l_conn->state_lock);
4279  pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
4281  spin_unlock_bh(&l_conn->state_lock);
4282 
4285 }
4286 
4287 /*
4288  * Return of 0 causes the TX thread to restart.
4289  */
4290 static int iscsit_logout_post_handler(
4291  struct iscsi_cmd *cmd,
4292  struct iscsi_conn *conn)
4293 {
4294  int ret = 0;
4295 
4296  switch (cmd->logout_reason) {
4298  switch (cmd->logout_response) {
4299  case ISCSI_LOGOUT_SUCCESS:
4301  default:
4302  iscsit_logout_post_handler_closesession(conn);
4303  break;
4304  }
4305  ret = 0;
4306  break;
4308  if (conn->cid == cmd->logout_cid) {
4309  switch (cmd->logout_response) {
4310  case ISCSI_LOGOUT_SUCCESS:
4312  default:
4313  iscsit_logout_post_handler_samecid(conn);
4314  break;
4315  }
4316  ret = 0;
4317  } else {
4318  switch (cmd->logout_response) {
4319  case ISCSI_LOGOUT_SUCCESS:
4320  iscsit_logout_post_handler_diffcid(conn,
4321  cmd->logout_cid);
4322  break;
4325  default:
4326  break;
4327  }
4328  ret = 1;
4329  }
4330  break;
4332  switch (cmd->logout_response) {
4333  case ISCSI_LOGOUT_SUCCESS:
4337  default:
4338  break;
4339  }
4340  ret = 1;
4341  break;
4342  default:
4343  break;
4344 
4345  }
4346  return ret;
4347 }
4348 
4350 {
4351  struct iscsi_conn *conn;
4352 
4353  spin_lock_bh(&sess->conn_lock);
4355  pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
4357  }
4358  spin_unlock_bh(&sess->conn_lock);
4359 
4360  pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4362 }
4363 
4365 {
4366  u16 conn_count = atomic_read(&sess->nconn);
4367  struct iscsi_conn *conn, *conn_tmp = NULL;
4368  int is_last;
4369 
4370  spin_lock_bh(&sess->conn_lock);
4372 
4373  list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
4374  conn_list) {
4375  if (conn_count == 0)
4376  break;
4377 
4378  if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
4379  is_last = 1;
4380  } else {
4381  iscsit_inc_conn_usage_count(conn_tmp);
4382  is_last = 0;
4383  }
4385 
4386  spin_unlock_bh(&sess->conn_lock);
4388  spin_lock_bh(&sess->conn_lock);
4389 
4391  if (is_last == 0)
4392  iscsit_dec_conn_usage_count(conn_tmp);
4393 
4394  conn_count--;
4395  }
4396 
4397  if (atomic_read(&sess->nconn)) {
4398  spin_unlock_bh(&sess->conn_lock);
4400  } else
4401  spin_unlock_bh(&sess->conn_lock);
4402 
4403  target_put_session(sess->se_sess);
4404  return 0;
4405 }
4406 
4408  struct iscsi_session *sess,
4409  int session_sleep,
4410  int connection_sleep)
4411 {
4412  u16 conn_count = atomic_read(&sess->nconn);
4413  struct iscsi_conn *conn, *conn_tmp = NULL;
4414  int is_last;
4415 
4416  spin_lock_bh(&sess->conn_lock);
4417  if (session_sleep)
4419 
4420  if (connection_sleep) {
4421  list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
4422  conn_list) {
4423  if (conn_count == 0)
4424  break;
4425 
4426  if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
4427  is_last = 1;
4428  } else {
4429  iscsit_inc_conn_usage_count(conn_tmp);
4430  is_last = 0;
4431  }
4433 
4434  spin_unlock_bh(&sess->conn_lock);
4436  spin_lock_bh(&sess->conn_lock);
4437 
4439  if (is_last == 0)
4440  iscsit_dec_conn_usage_count(conn_tmp);
4441  conn_count--;
4442  }
4443  } else {
4446  }
4447 
4448  if (session_sleep && atomic_read(&sess->nconn)) {
4449  spin_unlock_bh(&sess->conn_lock);
4451  } else
4452  spin_unlock_bh(&sess->conn_lock);
4453 }
4454 
4456 {
4457  struct iscsi_session *sess;
4458  struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4459  struct se_session *se_sess, *se_sess_tmp;
4460  int session_count = 0;
4461 
4462  spin_lock_bh(&se_tpg->session_lock);
4463  if (tpg->nsessions && !force) {
4464  spin_unlock_bh(&se_tpg->session_lock);
4465  return -1;
4466  }
4467 
4468  list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
4469  sess_list) {
4470  sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
4471 
4472  spin_lock(&sess->conn_lock);
4473  if (atomic_read(&sess->session_fall_back_to_erl0) ||
4474  atomic_read(&sess->session_logout) ||
4476  spin_unlock(&sess->conn_lock);
4477  continue;
4478  }
4479  atomic_set(&sess->session_reinstatement, 1);
4480  spin_unlock(&sess->conn_lock);
4481  spin_unlock_bh(&se_tpg->session_lock);
4482 
4483  iscsit_free_session(sess);
4484  spin_lock_bh(&se_tpg->session_lock);
4485 
4486  session_count++;
4487  }
4488  spin_unlock_bh(&se_tpg->session_lock);
4489 
4490  pr_debug("Released %d iSCSI Session(s) from Target Portal"
4491  " Group: %hu\n", session_count, tpg->tpgt);
4492  return 0;
4493 }
4494 
4495 MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure");
4496 MODULE_VERSION("4.1.x");
4498 MODULE_LICENSE("GPL");
4499 
4500 module_init(iscsi_target_init_module);
4501 module_exit(iscsi_target_cleanup_module);