Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sbp_target.c
Go to the documentation of this file.
1 /*
2  * SBP2 target driver (SCSI over IEEE1394 in target mode)
3  *
4  * Copyright (C) 2011 Chris Boot <[email protected]>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software Foundation,
18  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19  */
20 
21 #define KMSG_COMPONENT "sbp_target"
22 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
23 
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/configfs.h>
30 #include <linux/ctype.h>
31 #include <linux/firewire.h>
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_tcq.h>
40 #include <target/configfs_macros.h>
41 #include <asm/unaligned.h>
42 
43 #include "sbp_target.h"
44 
45 /* Local pointer to allocated TCM configfs fabric module */
46 static struct target_fabric_configfs *sbp_fabric_configfs;
47 
48 /* FireWire address region for management and command block address handlers */
49 static const struct fw_address_region sbp_register_region = {
50  .start = CSR_REGISTER_BASE + 0x10000,
51  .end = 0x1000000000000ULL,
52 };
53 
54 static const u32 sbp_unit_directory_template[] = {
55  0x1200609e, /* unit_specifier_id: NCITS/T10 */
56  0x13010483, /* unit_sw_version: 1155D Rev 4 */
57  0x3800609e, /* command_set_specifier_id: NCITS/T10 */
58  0x390104d8, /* command_set: SPC-2 */
59  0x3b000000, /* command_set_revision: 0 */
60  0x3c000001, /* firmware_revision: 1 */
61 };
62 
63 #define SESSION_MAINTENANCE_INTERVAL HZ
64 
65 static atomic_t login_id = ATOMIC_INIT(0);
66 
67 static void session_maintenance_work(struct work_struct *);
68 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
69  unsigned long long, void *, size_t);
70 
71 static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
72 {
73  int ret;
74  __be32 high, low;
75 
76  ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
77  req->node_addr, req->generation, req->speed,
79  &high, sizeof(high));
80  if (ret != RCODE_COMPLETE)
81  return ret;
82 
83  ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
84  req->node_addr, req->generation, req->speed,
86  &low, sizeof(low));
87  if (ret != RCODE_COMPLETE)
88  return ret;
89 
90  *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
91 
92  return RCODE_COMPLETE;
93 }
94 
95 static struct sbp_session *sbp_session_find_by_guid(
96  struct sbp_tpg *tpg, u64 guid)
97 {
98  struct se_session *se_sess;
99  struct sbp_session *sess, *found = NULL;
100 
101  spin_lock_bh(&tpg->se_tpg.session_lock);
102  list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
103  sess = se_sess->fabric_sess_ptr;
104  if (sess->guid == guid)
105  found = sess;
106  }
107  spin_unlock_bh(&tpg->se_tpg.session_lock);
108 
109  return found;
110 }
111 
112 static struct sbp_login_descriptor *sbp_login_find_by_lun(
113  struct sbp_session *session, struct se_lun *lun)
114 {
115  struct sbp_login_descriptor *login, *found = NULL;
116 
117  spin_lock_bh(&session->lock);
118  list_for_each_entry(login, &session->login_list, link) {
119  if (login->lun == lun)
120  found = login;
121  }
122  spin_unlock_bh(&session->lock);
123 
124  return found;
125 }
126 
127 static int sbp_login_count_all_by_lun(
128  struct sbp_tpg *tpg,
129  struct se_lun *lun,
130  int exclusive)
131 {
132  struct se_session *se_sess;
133  struct sbp_session *sess;
134  struct sbp_login_descriptor *login;
135  int count = 0;
136 
137  spin_lock_bh(&tpg->se_tpg.session_lock);
138  list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
139  sess = se_sess->fabric_sess_ptr;
140 
141  spin_lock_bh(&sess->lock);
142  list_for_each_entry(login, &sess->login_list, link) {
143  if (login->lun != lun)
144  continue;
145 
146  if (!exclusive || login->exclusive)
147  count++;
148  }
149  spin_unlock_bh(&sess->lock);
150  }
151  spin_unlock_bh(&tpg->se_tpg.session_lock);
152 
153  return count;
154 }
155 
156 static struct sbp_login_descriptor *sbp_login_find_by_id(
157  struct sbp_tpg *tpg, int login_id)
158 {
159  struct se_session *se_sess;
160  struct sbp_session *sess;
161  struct sbp_login_descriptor *login, *found = NULL;
162 
163  spin_lock_bh(&tpg->se_tpg.session_lock);
164  list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
165  sess = se_sess->fabric_sess_ptr;
166 
167  spin_lock_bh(&sess->lock);
168  list_for_each_entry(login, &sess->login_list, link) {
169  if (login->login_id == login_id)
170  found = login;
171  }
172  spin_unlock_bh(&sess->lock);
173  }
174  spin_unlock_bh(&tpg->se_tpg.session_lock);
175 
176  return found;
177 }
178 
179 static struct se_lun *sbp_get_lun_from_tpg(struct sbp_tpg *tpg, int lun)
180 {
181  struct se_portal_group *se_tpg = &tpg->se_tpg;
182  struct se_lun *se_lun;
183 
184  if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
185  return ERR_PTR(-EINVAL);
186 
187  spin_lock(&se_tpg->tpg_lun_lock);
188  se_lun = se_tpg->tpg_lun_list[lun];
189 
191  se_lun = ERR_PTR(-ENODEV);
192 
193  spin_unlock(&se_tpg->tpg_lun_lock);
194 
195  return se_lun;
196 }
197 
198 static struct sbp_session *sbp_session_create(
199  struct sbp_tpg *tpg,
200  u64 guid)
201 {
202  struct sbp_session *sess;
203  int ret;
204  char guid_str[17];
205  struct se_node_acl *se_nacl;
206 
207  sess = kmalloc(sizeof(*sess), GFP_KERNEL);
208  if (!sess) {
209  pr_err("failed to allocate session descriptor\n");
210  return ERR_PTR(-ENOMEM);
211  }
212 
214  if (IS_ERR(sess->se_sess)) {
215  pr_err("failed to init se_session\n");
216 
217  ret = PTR_ERR(sess->se_sess);
218  kfree(sess);
219  return ERR_PTR(ret);
220  }
221 
222  snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
223 
224  se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str);
225  if (!se_nacl) {
226  pr_warn("Node ACL not found for %s\n", guid_str);
227 
229  kfree(sess);
230 
231  return ERR_PTR(-EPERM);
232  }
233 
234  sess->se_sess->se_node_acl = se_nacl;
235 
236  spin_lock_init(&sess->lock);
237  INIT_LIST_HEAD(&sess->login_list);
238  INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
239 
240  sess->guid = guid;
241 
242  transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess);
243 
244  return sess;
245 }
246 
247 static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
248 {
249  spin_lock_bh(&sess->lock);
250  if (!list_empty(&sess->login_list)) {
251  spin_unlock_bh(&sess->lock);
252  return;
253  }
254  spin_unlock_bh(&sess->lock);
255 
256  if (cancel_work)
258 
261 
262  if (sess->card)
263  fw_card_put(sess->card);
264 
265  kfree(sess);
266 }
267 
268 static void sbp_target_agent_unregister(struct sbp_target_agent *);
269 
270 static void sbp_login_release(struct sbp_login_descriptor *login,
271  bool cancel_work)
272 {
273  struct sbp_session *sess = login->sess;
274 
275  /* FIXME: abort/wait on tasks */
276 
277  sbp_target_agent_unregister(login->tgt_agt);
278 
279  if (sess) {
280  spin_lock_bh(&sess->lock);
281  list_del(&login->link);
282  spin_unlock_bh(&sess->lock);
283 
284  sbp_session_release(sess, cancel_work);
285  }
286 
287  kfree(login);
288 }
289 
290 static struct sbp_target_agent *sbp_target_agent_register(
291  struct sbp_login_descriptor *);
292 
293 static void sbp_management_request_login(
294  struct sbp_management_agent *agent, struct sbp_management_request *req,
295  int *status_data_size)
296 {
297  struct sbp_tport *tport = agent->tport;
298  struct sbp_tpg *tpg = tport->tpg;
299  struct se_lun *se_lun;
300  int ret;
301  u64 guid;
302  struct sbp_session *sess;
303  struct sbp_login_descriptor *login;
305  int login_response_len;
306 
307  se_lun = sbp_get_lun_from_tpg(tpg,
308  LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
309  if (IS_ERR(se_lun)) {
310  pr_notice("login to unknown LUN: %d\n",
311  LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
312 
313  req->status.status = cpu_to_be32(
316  return;
317  }
318 
319  ret = read_peer_guid(&guid, req);
320  if (ret != RCODE_COMPLETE) {
321  pr_warn("failed to read peer GUID: %d\n", ret);
322 
323  req->status.status = cpu_to_be32(
326  return;
327  }
328 
329  pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
330  se_lun->unpacked_lun, guid);
331 
332  sess = sbp_session_find_by_guid(tpg, guid);
333  if (sess) {
334  login = sbp_login_find_by_lun(sess, se_lun);
335  if (login) {
336  pr_notice("initiator already logged-in\n");
337 
338  /*
339  * SBP-2 R4 says we should return access denied, but
340  * that can confuse initiators. Instead we need to
341  * treat this like a reconnect, but send the login
342  * response block like a fresh login.
343  *
344  * This is required particularly in the case of Apple
345  * devices booting off the FireWire target, where
346  * the firmware has an active login to the target. When
347  * the OS takes control of the session it issues its own
348  * LOGIN rather than a RECONNECT. To avoid the machine
349  * waiting until the reconnect_hold expires, we can skip
350  * the ACCESS_DENIED errors to speed things up.
351  */
352 
353  goto already_logged_in;
354  }
355  }
356 
357  /*
358  * check exclusive bit in login request
359  * reject with access_denied if any logins present
360  */
361  if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
362  sbp_login_count_all_by_lun(tpg, se_lun, 0)) {
363  pr_warn("refusing exclusive login with other active logins\n");
364 
365  req->status.status = cpu_to_be32(
368  return;
369  }
370 
371  /*
372  * check exclusive bit in any existing login descriptor
373  * reject with access_denied if any exclusive logins present
374  */
375  if (sbp_login_count_all_by_lun(tpg, se_lun, 1)) {
376  pr_warn("refusing login while another exclusive login present\n");
377 
378  req->status.status = cpu_to_be32(
381  return;
382  }
383 
384  /*
385  * check we haven't exceeded the number of allowed logins
386  * reject with resources_unavailable if we have
387  */
388  if (sbp_login_count_all_by_lun(tpg, se_lun, 0) >=
389  tport->max_logins_per_lun) {
390  pr_warn("max number of logins reached\n");
391 
392  req->status.status = cpu_to_be32(
395  return;
396  }
397 
398  if (!sess) {
399  sess = sbp_session_create(tpg, guid);
400  if (IS_ERR(sess)) {
401  switch (PTR_ERR(sess)) {
402  case -EPERM:
404  break;
405  default:
407  break;
408  }
409 
410  req->status.status = cpu_to_be32(
414  return;
415  }
416 
417  sess->node_id = req->node_addr;
418  sess->card = fw_card_get(req->card);
419  sess->generation = req->generation;
420  sess->speed = req->speed;
421 
424  }
425 
426  /* only take the latest reconnect_hold into account */
427  sess->reconnect_hold = min(
428  1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
429  tport->max_reconnect_timeout) - 1;
430 
431  login = kmalloc(sizeof(*login), GFP_KERNEL);
432  if (!login) {
433  pr_err("failed to allocate login descriptor\n");
434 
435  sbp_session_release(sess, true);
436 
437  req->status.status = cpu_to_be32(
440  return;
441  }
442 
443  login->sess = sess;
444  login->lun = se_lun;
445  login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
446  login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
447  login->login_id = atomic_inc_return(&login_id);
448 
449  login->tgt_agt = sbp_target_agent_register(login);
450  if (IS_ERR(login->tgt_agt)) {
451  ret = PTR_ERR(login->tgt_agt);
452  pr_err("failed to map command block handler: %d\n", ret);
453 
454  sbp_session_release(sess, true);
455  kfree(login);
456 
457  req->status.status = cpu_to_be32(
460  return;
461  }
462 
463  spin_lock_bh(&sess->lock);
464  list_add_tail(&login->link, &sess->login_list);
465  spin_unlock_bh(&sess->lock);
466 
467 already_logged_in:
468  response = kzalloc(sizeof(*response), GFP_KERNEL);
469  if (!response) {
470  pr_err("failed to allocate login response block\n");
471 
472  sbp_login_release(login, true);
473 
474  req->status.status = cpu_to_be32(
477  return;
478  }
479 
480  login_response_len = clamp_val(
482  12, sizeof(*response));
483  response->misc = cpu_to_be32(
484  ((login_response_len & 0xffff) << 16) |
485  (login->login_id & 0xffff));
486  response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
487  addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
488  &response->command_block_agent);
489 
490  ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
491  sess->node_id, sess->generation, sess->speed,
492  sbp2_pointer_to_addr(&req->orb.ptr2), response,
493  login_response_len);
494  if (ret != RCODE_COMPLETE) {
495  pr_debug("failed to write login response block: %x\n", ret);
496 
497  kfree(response);
498  sbp_login_release(login, true);
499 
500  req->status.status = cpu_to_be32(
503  return;
504  }
505 
506  kfree(response);
507 
508  req->status.status = cpu_to_be32(
511 }
512 
513 static void sbp_management_request_query_logins(
514  struct sbp_management_agent *agent, struct sbp_management_request *req,
515  int *status_data_size)
516 {
517  pr_notice("QUERY LOGINS not implemented\n");
518  /* FIXME: implement */
519 
520  req->status.status = cpu_to_be32(
523 }
524 
525 static void sbp_management_request_reconnect(
526  struct sbp_management_agent *agent, struct sbp_management_request *req,
527  int *status_data_size)
528 {
529  struct sbp_tport *tport = agent->tport;
530  struct sbp_tpg *tpg = tport->tpg;
531  int ret;
532  u64 guid;
533  struct sbp_login_descriptor *login;
534 
535  ret = read_peer_guid(&guid, req);
536  if (ret != RCODE_COMPLETE) {
537  pr_warn("failed to read peer GUID: %d\n", ret);
538 
539  req->status.status = cpu_to_be32(
542  return;
543  }
544 
545  pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
546 
547  login = sbp_login_find_by_id(tpg,
549 
550  if (!login) {
551  pr_err("mgt_agent RECONNECT unknown login ID\n");
552 
553  req->status.status = cpu_to_be32(
556  return;
557  }
558 
559  if (login->sess->guid != guid) {
560  pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
561 
562  req->status.status = cpu_to_be32(
565  return;
566  }
567 
568  spin_lock_bh(&login->sess->lock);
569  if (login->sess->card)
570  fw_card_put(login->sess->card);
571 
572  /* update the node details */
573  login->sess->generation = req->generation;
574  login->sess->node_id = req->node_addr;
575  login->sess->card = fw_card_get(req->card);
576  login->sess->speed = req->speed;
577  spin_unlock_bh(&login->sess->lock);
578 
579  req->status.status = cpu_to_be32(
582 }
583 
584 static void sbp_management_request_logout(
585  struct sbp_management_agent *agent, struct sbp_management_request *req,
586  int *status_data_size)
587 {
588  struct sbp_tport *tport = agent->tport;
589  struct sbp_tpg *tpg = tport->tpg;
590  int id;
591  struct sbp_login_descriptor *login;
592 
593  id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
594 
595  login = sbp_login_find_by_id(tpg, id);
596  if (!login) {
597  pr_warn("cannot find login: %d\n", id);
598 
599  req->status.status = cpu_to_be32(
602  return;
603  }
604 
605  pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
606  login->lun->unpacked_lun, login->login_id);
607 
608  if (req->node_addr != login->sess->node_id) {
609  pr_warn("logout from different node ID\n");
610 
611  req->status.status = cpu_to_be32(
614  return;
615  }
616 
617  sbp_login_release(login, true);
618 
619  req->status.status = cpu_to_be32(
622 }
623 
624 static void session_check_for_reset(struct sbp_session *sess)
625 {
626  bool card_valid = false;
627 
628  spin_lock_bh(&sess->lock);
629 
630  if (sess->card) {
631  spin_lock_irq(&sess->card->lock);
632  card_valid = (sess->card->local_node != NULL);
633  spin_unlock_irq(&sess->card->lock);
634 
635  if (!card_valid) {
636  fw_card_put(sess->card);
637  sess->card = NULL;
638  }
639  }
640 
641  if (!card_valid || (sess->generation != sess->card->generation)) {
642  pr_info("Waiting for reconnect from node: %016llx\n",
643  sess->guid);
644 
645  sess->node_id = -1;
647  ((sess->reconnect_hold + 1) * HZ);
648  }
649 
650  spin_unlock_bh(&sess->lock);
651 }
652 
653 static void session_reconnect_expired(struct sbp_session *sess)
654 {
655  struct sbp_login_descriptor *login, *temp;
656  LIST_HEAD(login_list);
657 
658  pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
659 
660  spin_lock_bh(&sess->lock);
661  list_for_each_entry_safe(login, temp, &sess->login_list, link) {
662  login->sess = NULL;
663  list_move_tail(&login->link, &login_list);
664  }
665  spin_unlock_bh(&sess->lock);
666 
667  list_for_each_entry_safe(login, temp, &login_list, link) {
668  list_del(&login->link);
669  sbp_login_release(login, false);
670  }
671 
672  sbp_session_release(sess, false);
673 }
674 
675 static void session_maintenance_work(struct work_struct *work)
676 {
677  struct sbp_session *sess = container_of(work, struct sbp_session,
678  maint_work.work);
679 
680  /* could be called while tearing down the session */
681  spin_lock_bh(&sess->lock);
682  if (list_empty(&sess->login_list)) {
683  spin_unlock_bh(&sess->lock);
684  return;
685  }
686  spin_unlock_bh(&sess->lock);
687 
688  if (sess->node_id != -1) {
689  /* check for bus reset and make node_id invalid */
690  session_check_for_reset(sess);
691 
694  } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
695  /* still waiting for reconnect */
698  } else {
699  /* reconnect timeout has expired */
700  session_reconnect_expired(sess);
701  }
702 }
703 
704 static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
705  struct sbp_target_agent *agent)
706 {
707  __be32 state;
708 
709  switch (tcode) {
711  pr_debug("tgt_agent AGENT_STATE READ\n");
712 
713  spin_lock_bh(&agent->lock);
714  state = cpu_to_be32(agent->state);
715  spin_unlock_bh(&agent->lock);
716  memcpy(data, &state, sizeof(state));
717 
718  return RCODE_COMPLETE;
719 
721  /* ignored */
722  return RCODE_COMPLETE;
723 
724  default:
725  return RCODE_TYPE_ERROR;
726  }
727 }
728 
729 static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
730  struct sbp_target_agent *agent)
731 {
732  switch (tcode) {
734  pr_debug("tgt_agent AGENT_RESET\n");
735  spin_lock_bh(&agent->lock);
736  agent->state = AGENT_STATE_RESET;
737  spin_unlock_bh(&agent->lock);
738  return RCODE_COMPLETE;
739 
740  default:
741  return RCODE_TYPE_ERROR;
742  }
743 }
744 
745 static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
746  struct sbp_target_agent *agent)
747 {
748  struct sbp2_pointer *ptr = data;
749 
750  switch (tcode) {
752  spin_lock_bh(&agent->lock);
753  if (agent->state != AGENT_STATE_SUSPENDED &&
754  agent->state != AGENT_STATE_RESET) {
755  spin_unlock_bh(&agent->lock);
756  pr_notice("Ignoring ORB_POINTER write while active.\n");
757  return RCODE_CONFLICT_ERROR;
758  }
759  agent->state = AGENT_STATE_ACTIVE;
760  spin_unlock_bh(&agent->lock);
761 
762  agent->orb_pointer = sbp2_pointer_to_addr(ptr);
763  agent->doorbell = false;
764 
765  pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
766  agent->orb_pointer);
767 
768  queue_work(system_unbound_wq, &agent->work);
769 
770  return RCODE_COMPLETE;
771 
773  pr_debug("tgt_agent ORB_POINTER READ\n");
774  spin_lock_bh(&agent->lock);
775  addr_to_sbp2_pointer(agent->orb_pointer, ptr);
776  spin_unlock_bh(&agent->lock);
777  return RCODE_COMPLETE;
778 
779  default:
780  return RCODE_TYPE_ERROR;
781  }
782 }
783 
784 static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
785  struct sbp_target_agent *agent)
786 {
787  switch (tcode) {
789  spin_lock_bh(&agent->lock);
790  if (agent->state != AGENT_STATE_SUSPENDED) {
791  spin_unlock_bh(&agent->lock);
792  pr_debug("Ignoring DOORBELL while active.\n");
793  return RCODE_CONFLICT_ERROR;
794  }
795  agent->state = AGENT_STATE_ACTIVE;
796  spin_unlock_bh(&agent->lock);
797 
798  agent->doorbell = true;
799 
800  pr_debug("tgt_agent DOORBELL\n");
801 
802  queue_work(system_unbound_wq, &agent->work);
803 
804  return RCODE_COMPLETE;
805 
807  return RCODE_COMPLETE;
808 
809  default:
810  return RCODE_TYPE_ERROR;
811  }
812 }
813 
814 static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
815  int tcode, void *data, struct sbp_target_agent *agent)
816 {
817  switch (tcode) {
819  pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
820  /* ignored as we don't send unsolicited status */
821  return RCODE_COMPLETE;
822 
824  return RCODE_COMPLETE;
825 
826  default:
827  return RCODE_TYPE_ERROR;
828  }
829 }
830 
831 static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
832  int tcode, int destination, int source, int generation,
833  unsigned long long offset, void *data, size_t length,
834  void *callback_data)
835 {
836  struct sbp_target_agent *agent = callback_data;
837  struct sbp_session *sess = agent->login->sess;
838  int sess_gen, sess_node, rcode;
839 
840  spin_lock_bh(&sess->lock);
841  sess_gen = sess->generation;
842  sess_node = sess->node_id;
843  spin_unlock_bh(&sess->lock);
844 
845  if (generation != sess_gen) {
846  pr_notice("ignoring request with wrong generation\n");
847  rcode = RCODE_TYPE_ERROR;
848  goto out;
849  }
850 
851  if (source != sess_node) {
852  pr_notice("ignoring request from foreign node (%x != %x)\n",
853  source, sess_node);
854  rcode = RCODE_TYPE_ERROR;
855  goto out;
856  }
857 
858  /* turn offset into the offset from the start of the block */
859  offset -= agent->handler.offset;
860 
861  if (offset == 0x00 && length == 4) {
862  /* AGENT_STATE */
863  rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
864  } else if (offset == 0x04 && length == 4) {
865  /* AGENT_RESET */
866  rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
867  } else if (offset == 0x08 && length == 8) {
868  /* ORB_POINTER */
869  rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
870  } else if (offset == 0x10 && length == 4) {
871  /* DOORBELL */
872  rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
873  } else if (offset == 0x14 && length == 4) {
874  /* UNSOLICITED_STATUS_ENABLE */
875  rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
876  data, agent);
877  } else {
878  rcode = RCODE_ADDRESS_ERROR;
879  }
880 
881 out:
882  fw_send_response(card, request, rcode);
883 }
884 
885 static void sbp_handle_command(struct sbp_target_request *);
886 static int sbp_send_status(struct sbp_target_request *);
887 static void sbp_free_request(struct sbp_target_request *);
888 
889 static void tgt_agent_process_work(struct work_struct *work)
890 {
891  struct sbp_target_request *req =
892  container_of(work, struct sbp_target_request, work);
893 
894  pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
895  req->orb_pointer,
896  sbp2_pointer_to_addr(&req->orb.next_orb),
897  sbp2_pointer_to_addr(&req->orb.data_descriptor),
898  be32_to_cpu(req->orb.misc));
899 
900  if (req->orb_pointer >> 32)
901  pr_debug("ORB with high bits set\n");
902 
903  switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
904  case 0:/* Format specified by this standard */
905  sbp_handle_command(req);
906  return;
907  case 1: /* Reserved for future standardization */
908  case 2: /* Vendor-dependent */
909  req->status.status |= cpu_to_be32(
912  STATUS_BLOCK_DEAD(0) |
913  STATUS_BLOCK_LEN(1) |
916  sbp_send_status(req);
917  sbp_free_request(req);
918  return;
919  case 3: /* Dummy ORB */
920  req->status.status |= cpu_to_be32(
923  STATUS_BLOCK_DEAD(0) |
924  STATUS_BLOCK_LEN(1) |
927  sbp_send_status(req);
928  sbp_free_request(req);
929  return;
930  default:
931  BUG();
932  }
933 }
934 
935 /* used to double-check we haven't been issued an AGENT_RESET */
936 static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
937 {
938  bool active;
939 
940  spin_lock_bh(&agent->lock);
941  active = (agent->state == AGENT_STATE_ACTIVE);
942  spin_unlock_bh(&agent->lock);
943 
944  return active;
945 }
946 
947 static void tgt_agent_fetch_work(struct work_struct *work)
948 {
949  struct sbp_target_agent *agent =
950  container_of(work, struct sbp_target_agent, work);
951  struct sbp_session *sess = agent->login->sess;
952  struct sbp_target_request *req;
953  int ret;
954  bool doorbell = agent->doorbell;
955  u64 next_orb = agent->orb_pointer;
956 
957  while (next_orb && tgt_agent_check_active(agent)) {
958  req = kzalloc(sizeof(*req), GFP_KERNEL);
959  if (!req) {
960  spin_lock_bh(&agent->lock);
961  agent->state = AGENT_STATE_DEAD;
962  spin_unlock_bh(&agent->lock);
963  return;
964  }
965 
966  req->login = agent->login;
967  req->orb_pointer = next_orb;
968 
970  req->orb_pointer >> 32));
971  req->status.orb_low = cpu_to_be32(
972  req->orb_pointer & 0xfffffffc);
973 
974  /* read in the ORB */
975  ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
976  sess->node_id, sess->generation, sess->speed,
977  req->orb_pointer, &req->orb, sizeof(req->orb));
978  if (ret != RCODE_COMPLETE) {
979  pr_debug("tgt_orb fetch failed: %x\n", ret);
980  req->status.status |= cpu_to_be32(
985  STATUS_BLOCK_DEAD(1) |
986  STATUS_BLOCK_LEN(1) |
989  spin_lock_bh(&agent->lock);
990  agent->state = AGENT_STATE_DEAD;
991  spin_unlock_bh(&agent->lock);
992 
993  sbp_send_status(req);
994  sbp_free_request(req);
995  return;
996  }
997 
998  /* check the next_ORB field */
999  if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
1000  next_orb = 0;
1001  req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
1003  } else {
1004  next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
1005  req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
1007  }
1008 
1009  if (tgt_agent_check_active(agent) && !doorbell) {
1010  INIT_WORK(&req->work, tgt_agent_process_work);
1011  queue_work(system_unbound_wq, &req->work);
1012  } else {
1013  /* don't process this request, just check next_ORB */
1014  sbp_free_request(req);
1015  }
1016 
1017  spin_lock_bh(&agent->lock);
1018  doorbell = agent->doorbell = false;
1019 
1020  /* check if we should carry on processing */
1021  if (next_orb)
1022  agent->orb_pointer = next_orb;
1023  else
1024  agent->state = AGENT_STATE_SUSPENDED;
1025 
1026  spin_unlock_bh(&agent->lock);
1027  };
1028 }
1029 
1030 static struct sbp_target_agent *sbp_target_agent_register(
1031  struct sbp_login_descriptor *login)
1032 {
1033  struct sbp_target_agent *agent;
1034  int ret;
1035 
1036  agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1037  if (!agent)
1038  return ERR_PTR(-ENOMEM);
1039 
1040  spin_lock_init(&agent->lock);
1041 
1042  agent->handler.length = 0x20;
1043  agent->handler.address_callback = tgt_agent_rw;
1044  agent->handler.callback_data = agent;
1045 
1046  agent->login = login;
1047  agent->state = AGENT_STATE_RESET;
1048  INIT_WORK(&agent->work, tgt_agent_fetch_work);
1049  agent->orb_pointer = 0;
1050  agent->doorbell = false;
1051 
1052  ret = fw_core_add_address_handler(&agent->handler,
1053  &sbp_register_region);
1054  if (ret < 0) {
1055  kfree(agent);
1056  return ERR_PTR(ret);
1057  }
1058 
1059  return agent;
1060 }
1061 
1062 static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
1063 {
1065  cancel_work_sync(&agent->work);
1066  kfree(agent);
1067 }
1068 
1069 /*
1070  * Simple wrapper around fw_run_transaction that retries the transaction several
1071  * times in case of failure, with an exponential backoff.
1072  */
1073 static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
1074  int generation, int speed, unsigned long long offset,
1075  void *payload, size_t length)
1076 {
1077  int attempt, ret, delay;
1078 
1079  for (attempt = 1; attempt <= 5; attempt++) {
1080  ret = fw_run_transaction(card, tcode, destination_id,
1081  generation, speed, offset, payload, length);
1082 
1083  switch (ret) {
1084  case RCODE_COMPLETE:
1085  case RCODE_TYPE_ERROR:
1086  case RCODE_ADDRESS_ERROR:
1087  case RCODE_GENERATION:
1088  return ret;
1089 
1090  default:
1091  delay = 5 * attempt * attempt;
1092  usleep_range(delay, delay * 2);
1093  }
1094  }
1095 
1096  return ret;
1097 }
1098 
1099 /*
1100  * Wrapper around sbp_run_transaction that gets the card, destination,
1101  * generation and speed out of the request's session.
1102  */
1103 static int sbp_run_request_transaction(struct sbp_target_request *req,
1104  int tcode, unsigned long long offset, void *payload,
1105  size_t length)
1106 {
1107  struct sbp_login_descriptor *login = req->login;
1108  struct sbp_session *sess = login->sess;
1109  struct fw_card *card;
1110  int node_id, generation, speed, ret;
1111 
1112  spin_lock_bh(&sess->lock);
1113  card = fw_card_get(sess->card);
1114  node_id = sess->node_id;
1115  generation = sess->generation;
1116  speed = sess->speed;
1117  spin_unlock_bh(&sess->lock);
1118 
1119  ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
1120  offset, payload, length);
1121 
1122  fw_card_put(card);
1123 
1124  return ret;
1125 }
1126 
1127 static int sbp_fetch_command(struct sbp_target_request *req)
1128 {
1129  int ret, cmd_len, copy_len;
1130 
1131  cmd_len = scsi_command_size(req->orb.command_block);
1132 
1133  req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
1134  if (!req->cmd_buf)
1135  return -ENOMEM;
1136 
1137  memcpy(req->cmd_buf, req->orb.command_block,
1138  min_t(int, cmd_len, sizeof(req->orb.command_block)));
1139 
1140  if (cmd_len > sizeof(req->orb.command_block)) {
1141  pr_debug("sbp_fetch_command: filling in long command\n");
1142  copy_len = cmd_len - sizeof(req->orb.command_block);
1143 
1144  ret = sbp_run_request_transaction(req,
1146  req->orb_pointer + sizeof(req->orb),
1147  req->cmd_buf + sizeof(req->orb.command_block),
1148  copy_len);
1149  if (ret != RCODE_COMPLETE)
1150  return -EIO;
1151  }
1152 
1153  return 0;
1154 }
1155 
1156 static int sbp_fetch_page_table(struct sbp_target_request *req)
1157 {
1158  int pg_tbl_sz, ret;
1159  struct sbp_page_table_entry *pg_tbl;
1160 
1161  if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
1162  return 0;
1163 
1164  pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
1165  sizeof(struct sbp_page_table_entry);
1166 
1167  pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
1168  if (!pg_tbl)
1169  return -ENOMEM;
1170 
1171  ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
1172  sbp2_pointer_to_addr(&req->orb.data_descriptor),
1173  pg_tbl, pg_tbl_sz);
1174  if (ret != RCODE_COMPLETE) {
1175  kfree(pg_tbl);
1176  return -EIO;
1177  }
1178 
1179  req->pg_tbl = pg_tbl;
1180  return 0;
1181 }
1182 
1183 static void sbp_calc_data_length_direction(struct sbp_target_request *req,
1185 {
1186  int data_size, direction, idx;
1187 
1188  data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1189  direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
1190 
1191  if (!data_size) {
1192  *data_len = 0;
1193  *data_dir = DMA_NONE;
1194  return;
1195  }
1196 
1197  *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1198 
1199  if (req->pg_tbl) {
1200  *data_len = 0;
1201  for (idx = 0; idx < data_size; idx++) {
1202  *data_len += be16_to_cpu(
1203  req->pg_tbl[idx].segment_length);
1204  }
1205  } else {
1206  *data_len = data_size;
1207  }
1208 }
1209 
1210 static void sbp_handle_command(struct sbp_target_request *req)
1211 {
1212  struct sbp_login_descriptor *login = req->login;
1213  struct sbp_session *sess = login->sess;
1214  int ret, unpacked_lun;
1215  u32 data_length;
1217 
1218  ret = sbp_fetch_command(req);
1219  if (ret) {
1220  pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
1221  goto err;
1222  }
1223 
1224  ret = sbp_fetch_page_table(req);
1225  if (ret) {
1226  pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1227  ret);
1228  goto err;
1229  }
1230 
1231  unpacked_lun = req->login->lun->unpacked_lun;
1232  sbp_calc_data_length_direction(req, &data_length, &data_dir);
1233 
1234  pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1235  req->orb_pointer, unpacked_lun, data_length, data_dir);
1236 
1237  if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1238  req->sense_buf, unpacked_lun, data_length,
1239  MSG_SIMPLE_TAG, data_dir, 0))
1240  goto err;
1241 
1242  return;
1243 
1244 err:
1245  req->status.status |= cpu_to_be32(
1247  STATUS_BLOCK_DEAD(0) |
1248  STATUS_BLOCK_LEN(1) |
1250  sbp_send_status(req);
1251  sbp_free_request(req);
1252 }
1253 
1254 /*
1255  * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1256  * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1257  */
1258 static int sbp_rw_data(struct sbp_target_request *req)
1259 {
1260  struct sbp_session *sess = req->login->sess;
1261  int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
1262  generation, num_pte, length, tfr_length,
1263  rcode = RCODE_COMPLETE;
1264  struct sbp_page_table_entry *pte;
1265  unsigned long long offset;
1266  struct fw_card *card;
1267  struct sg_mapping_iter iter;
1268 
1269  if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
1270  tcode = TCODE_WRITE_BLOCK_REQUEST;
1271  sg_miter_flags = SG_MITER_FROM_SG;
1272  } else {
1273  tcode = TCODE_READ_BLOCK_REQUEST;
1274  sg_miter_flags = SG_MITER_TO_SG;
1275  }
1276 
1277  max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
1278  speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
1279 
1280  pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
1281  if (pg_size) {
1282  pr_err("sbp_run_transaction: page size ignored\n");
1283  pg_size = 0x100 << pg_size;
1284  }
1285 
1286  spin_lock_bh(&sess->lock);
1287  card = fw_card_get(sess->card);
1288  node_id = sess->node_id;
1289  generation = sess->generation;
1290  spin_unlock_bh(&sess->lock);
1291 
1292  if (req->pg_tbl) {
1293  pte = req->pg_tbl;
1294  num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1295 
1296  offset = 0;
1297  length = 0;
1298  } else {
1299  pte = NULL;
1300  num_pte = 0;
1301 
1302  offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
1303  length = req->se_cmd.data_length;
1304  }
1305 
1306  sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
1307  sg_miter_flags);
1308 
1309  while (length || num_pte) {
1310  if (!length) {
1311  offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
1313  length = be16_to_cpu(pte->segment_length);
1314 
1315  pte++;
1316  num_pte--;
1317  }
1318 
1319  sg_miter_next(&iter);
1320 
1321  tfr_length = min3(length, max_payload, (int)iter.length);
1322 
1323  /* FIXME: take page_size into account */
1324 
1325  rcode = sbp_run_transaction(card, tcode, node_id,
1326  generation, speed,
1327  offset, iter.addr, tfr_length);
1328 
1329  if (rcode != RCODE_COMPLETE)
1330  break;
1331 
1332  length -= tfr_length;
1333  offset += tfr_length;
1334  iter.consumed = tfr_length;
1335  }
1336 
1337  sg_miter_stop(&iter);
1338  fw_card_put(card);
1339 
1340  if (rcode == RCODE_COMPLETE) {
1341  WARN_ON(length != 0);
1342  return 0;
1343  } else {
1344  return -EIO;
1345  }
1346 }
1347 
1348 static int sbp_send_status(struct sbp_target_request *req)
1349 {
1350  int ret, length;
1351  struct sbp_login_descriptor *login = req->login;
1352 
1353  length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
1354 
1355  ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
1356  login->status_fifo_addr, &req->status, length);
1357  if (ret != RCODE_COMPLETE) {
1358  pr_debug("sbp_send_status: write failed: 0x%x\n", ret);
1359  return -EIO;
1360  }
1361 
1362  pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1363  req->orb_pointer);
1364 
1365  return 0;
1366 }
1367 
1368 static void sbp_sense_mangle(struct sbp_target_request *req)
1369 {
1370  struct se_cmd *se_cmd = &req->se_cmd;
1371  u8 *sense = req->sense_buf;
1372  u8 *status = req->status.data;
1373 
1374  WARN_ON(se_cmd->scsi_sense_length < 18);
1375 
1376  switch (sense[0] & 0x7f) { /* sfmt */
1377  case 0x70: /* current, fixed */
1378  status[0] = 0 << 6;
1379  break;
1380  case 0x71: /* deferred, fixed */
1381  status[0] = 1 << 6;
1382  break;
1383  case 0x72: /* current, descriptor */
1384  case 0x73: /* deferred, descriptor */
1385  default:
1386  /*
1387  * TODO: SBP-3 specifies what we should do with descriptor
1388  * format sense data
1389  */
1390  pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1391  sense[0]);
1392  req->status.status |= cpu_to_be32(
1394  STATUS_BLOCK_DEAD(0) |
1395  STATUS_BLOCK_LEN(1) |
1397  return;
1398  }
1399 
1400  status[0] |= se_cmd->scsi_status & 0x3f;/* status */
1401  status[1] =
1402  (sense[0] & 0x80) | /* valid */
1403  ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */
1404  (sense[2] & 0x0f); /* sense_key */
1405  status[2] = se_cmd->scsi_asc; /* sense_code */
1406  status[3] = se_cmd->scsi_ascq; /* sense_qualifier */
1407 
1408  /* information */
1409  status[4] = sense[3];
1410  status[5] = sense[4];
1411  status[6] = sense[5];
1412  status[7] = sense[6];
1413 
1414  /* CDB-dependent */
1415  status[8] = sense[8];
1416  status[9] = sense[9];
1417  status[10] = sense[10];
1418  status[11] = sense[11];
1419 
1420  /* fru */
1421  status[12] = sense[14];
1422 
1423  /* sense_key-dependent */
1424  status[13] = sense[15];
1425  status[14] = sense[16];
1426  status[15] = sense[17];
1427 
1428  req->status.status |= cpu_to_be32(
1430  STATUS_BLOCK_DEAD(0) |
1431  STATUS_BLOCK_LEN(5) |
1433 }
1434 
1435 static int sbp_send_sense(struct sbp_target_request *req)
1436 {
1437  struct se_cmd *se_cmd = &req->se_cmd;
1438 
1439  if (se_cmd->scsi_sense_length) {
1440  sbp_sense_mangle(req);
1441  } else {
1442  req->status.status |= cpu_to_be32(
1444  STATUS_BLOCK_DEAD(0) |
1445  STATUS_BLOCK_LEN(1) |
1447  }
1448 
1449  return sbp_send_status(req);
1450 }
1451 
1452 static void sbp_free_request(struct sbp_target_request *req)
1453 {
1454  kfree(req->pg_tbl);
1455  kfree(req->cmd_buf);
1456  kfree(req);
1457 }
1458 
1459 static void sbp_mgt_agent_process(struct work_struct *work)
1460 {
1461  struct sbp_management_agent *agent =
1462  container_of(work, struct sbp_management_agent, work);
1463  struct sbp_management_request *req = agent->request;
1464  int ret;
1465  int status_data_len = 0;
1466 
1467  /* fetch the ORB from the initiator */
1468  ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
1469  req->node_addr, req->generation, req->speed,
1470  agent->orb_offset, &req->orb, sizeof(req->orb));
1471  if (ret != RCODE_COMPLETE) {
1472  pr_debug("mgt_orb fetch failed: %x\n", ret);
1473  goto out;
1474  }
1475 
1476  pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1477  sbp2_pointer_to_addr(&req->orb.ptr1),
1478  sbp2_pointer_to_addr(&req->orb.ptr2),
1479  be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
1480  sbp2_pointer_to_addr(&req->orb.status_fifo));
1481 
1482  if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
1483  ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
1484  pr_err("mgt_orb bad request\n");
1485  goto out;
1486  }
1487 
1488  switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
1490  sbp_management_request_login(agent, req, &status_data_len);
1491  break;
1492 
1494  sbp_management_request_query_logins(agent, req,
1495  &status_data_len);
1496  break;
1497 
1499  sbp_management_request_reconnect(agent, req, &status_data_len);
1500  break;
1501 
1503  pr_notice("SET PASSWORD not implemented\n");
1504 
1505  req->status.status = cpu_to_be32(
1508 
1509  break;
1510 
1512  sbp_management_request_logout(agent, req, &status_data_len);
1513  break;
1514 
1516  pr_notice("ABORT TASK not implemented\n");
1517 
1518  req->status.status = cpu_to_be32(
1521 
1522  break;
1523 
1525  pr_notice("ABORT TASK SET not implemented\n");
1526 
1527  req->status.status = cpu_to_be32(
1530 
1531  break;
1532 
1534  pr_notice("LOGICAL UNIT RESET not implemented\n");
1535 
1536  req->status.status = cpu_to_be32(
1539 
1540  break;
1541 
1543  pr_notice("TARGET RESET not implemented\n");
1544 
1545  req->status.status = cpu_to_be32(
1548 
1549  break;
1550 
1551  default:
1552  pr_notice("unknown management function 0x%x\n",
1554 
1555  req->status.status = cpu_to_be32(
1558 
1559  break;
1560  }
1561 
1562  req->status.status |= cpu_to_be32(
1563  STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1564  STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
1566  req->status.orb_low = cpu_to_be32(agent->orb_offset);
1567 
1568  /* write the status block back to the initiator */
1569  ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
1570  req->node_addr, req->generation, req->speed,
1571  sbp2_pointer_to_addr(&req->orb.status_fifo),
1572  &req->status, 8 + status_data_len);
1573  if (ret != RCODE_COMPLETE) {
1574  pr_debug("mgt_orb status write failed: %x\n", ret);
1575  goto out;
1576  }
1577 
1578 out:
1579  fw_card_put(req->card);
1580  kfree(req);
1581 
1582  spin_lock_bh(&agent->lock);
1584  spin_unlock_bh(&agent->lock);
1585 }
1586 
1587 static void sbp_mgt_agent_rw(struct fw_card *card,
1588  struct fw_request *request, int tcode, int destination, int source,
1589  int generation, unsigned long long offset, void *data, size_t length,
1590  void *callback_data)
1591 {
1592  struct sbp_management_agent *agent = callback_data;
1593  struct sbp2_pointer *ptr = data;
1594  int rcode = RCODE_ADDRESS_ERROR;
1595 
1596  if (!agent->tport->enable)
1597  goto out;
1598 
1599  if ((offset != agent->handler.offset) || (length != 8))
1600  goto out;
1601 
1602  if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
1603  struct sbp_management_request *req;
1604  int prev_state;
1605 
1606  spin_lock_bh(&agent->lock);
1607  prev_state = agent->state;
1609  spin_unlock_bh(&agent->lock);
1610 
1611  if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
1612  pr_notice("ignoring management request while busy\n");
1613  rcode = RCODE_CONFLICT_ERROR;
1614  goto out;
1615  }
1616 
1617  req = kzalloc(sizeof(*req), GFP_ATOMIC);
1618  if (!req) {
1619  rcode = RCODE_CONFLICT_ERROR;
1620  goto out;
1621  }
1622 
1623  req->card = fw_card_get(card);
1624  req->generation = generation;
1625  req->node_addr = source;
1626  req->speed = fw_get_request_speed(request);
1627 
1628  agent->orb_offset = sbp2_pointer_to_addr(ptr);
1629  agent->request = req;
1630 
1631  queue_work(system_unbound_wq, &agent->work);
1632  rcode = RCODE_COMPLETE;
1633  } else if (tcode == TCODE_READ_BLOCK_REQUEST) {
1634  addr_to_sbp2_pointer(agent->orb_offset, ptr);
1635  rcode = RCODE_COMPLETE;
1636  } else {
1637  rcode = RCODE_TYPE_ERROR;
1638  }
1639 
1640 out:
1641  fw_send_response(card, request, rcode);
1642 }
1643 
1644 static struct sbp_management_agent *sbp_management_agent_register(
1645  struct sbp_tport *tport)
1646 {
1647  int ret;
1648  struct sbp_management_agent *agent;
1649 
1650  agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1651  if (!agent)
1652  return ERR_PTR(-ENOMEM);
1653 
1654  spin_lock_init(&agent->lock);
1655  agent->tport = tport;
1656  agent->handler.length = 0x08;
1657  agent->handler.address_callback = sbp_mgt_agent_rw;
1658  agent->handler.callback_data = agent;
1660  INIT_WORK(&agent->work, sbp_mgt_agent_process);
1661  agent->orb_offset = 0;
1662  agent->request = NULL;
1663 
1664  ret = fw_core_add_address_handler(&agent->handler,
1665  &sbp_register_region);
1666  if (ret < 0) {
1667  kfree(agent);
1668  return ERR_PTR(ret);
1669  }
1670 
1671  return agent;
1672 }
1673 
1674 static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
1675 {
1677  cancel_work_sync(&agent->work);
1678  kfree(agent);
1679 }
1680 
1681 static int sbp_check_true(struct se_portal_group *se_tpg)
1682 {
1683  return 1;
1684 }
1685 
1686 static int sbp_check_false(struct se_portal_group *se_tpg)
1687 {
1688  return 0;
1689 }
1690 
1691 static char *sbp_get_fabric_name(void)
1692 {
1693  return "sbp";
1694 }
1695 
1696 static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1697 {
1698  struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1699  struct sbp_tport *tport = tpg->tport;
1700 
1701  return &tport->tport_name[0];
1702 }
1703 
1704 static u16 sbp_get_tag(struct se_portal_group *se_tpg)
1705 {
1706  struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1707  return tpg->tport_tpgt;
1708 }
1709 
1710 static u32 sbp_get_default_depth(struct se_portal_group *se_tpg)
1711 {
1712  return 1;
1713 }
1714 
1715 static struct se_node_acl *sbp_alloc_fabric_acl(struct se_portal_group *se_tpg)
1716 {
1717  struct sbp_nacl *nacl;
1718 
1719  nacl = kzalloc(sizeof(struct sbp_nacl), GFP_KERNEL);
1720  if (!nacl) {
1721  pr_err("Unable to alocate struct sbp_nacl\n");
1722  return NULL;
1723  }
1724 
1725  return &nacl->se_node_acl;
1726 }
1727 
1728 static void sbp_release_fabric_acl(
1729  struct se_portal_group *se_tpg,
1730  struct se_node_acl *se_nacl)
1731 {
1732  struct sbp_nacl *nacl =
1733  container_of(se_nacl, struct sbp_nacl, se_node_acl);
1734  kfree(nacl);
1735 }
1736 
1737 static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
1738 {
1739  return 1;
1740 }
1741 
1742 static void sbp_release_cmd(struct se_cmd *se_cmd)
1743 {
1744  struct sbp_target_request *req = container_of(se_cmd,
1745  struct sbp_target_request, se_cmd);
1746 
1747  sbp_free_request(req);
1748 }
1749 
1750 static int sbp_shutdown_session(struct se_session *se_sess)
1751 {
1752  return 0;
1753 }
1754 
1755 static void sbp_close_session(struct se_session *se_sess)
1756 {
1757  return;
1758 }
1759 
1760 static u32 sbp_sess_get_index(struct se_session *se_sess)
1761 {
1762  return 0;
1763 }
1764 
1765 static int sbp_write_pending(struct se_cmd *se_cmd)
1766 {
1767  struct sbp_target_request *req = container_of(se_cmd,
1768  struct sbp_target_request, se_cmd);
1769  int ret;
1770 
1771  ret = sbp_rw_data(req);
1772  if (ret) {
1773  req->status.status |= cpu_to_be32(
1776  STATUS_BLOCK_DEAD(0) |
1777  STATUS_BLOCK_LEN(1) |
1780  sbp_send_status(req);
1781  return ret;
1782  }
1783 
1784  target_execute_cmd(se_cmd);
1785  return 0;
1786 }
1787 
1788 static int sbp_write_pending_status(struct se_cmd *se_cmd)
1789 {
1790  return 0;
1791 }
1792 
1793 static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
1794 {
1795  return;
1796 }
1797 
1798 static u32 sbp_get_task_tag(struct se_cmd *se_cmd)
1799 {
1800  struct sbp_target_request *req = container_of(se_cmd,
1801  struct sbp_target_request, se_cmd);
1802 
1803  /* only used for printk until we do TMRs */
1804  return (u32)req->orb_pointer;
1805 }
1806 
1807 static int sbp_get_cmd_state(struct se_cmd *se_cmd)
1808 {
1809  return 0;
1810 }
1811 
1812 static int sbp_queue_data_in(struct se_cmd *se_cmd)
1813 {
1814  struct sbp_target_request *req = container_of(se_cmd,
1815  struct sbp_target_request, se_cmd);
1816  int ret;
1817 
1818  ret = sbp_rw_data(req);
1819  if (ret) {
1820  req->status.status |= cpu_to_be32(
1822  STATUS_BLOCK_DEAD(0) |
1823  STATUS_BLOCK_LEN(1) |
1825  sbp_send_status(req);
1826  return ret;
1827  }
1828 
1829  return sbp_send_sense(req);
1830 }
1831 
1832 /*
1833  * Called after command (no data transfer) or after the write (to device)
1834  * operation is completed
1835  */
1836 static int sbp_queue_status(struct se_cmd *se_cmd)
1837 {
1838  struct sbp_target_request *req = container_of(se_cmd,
1839  struct sbp_target_request, se_cmd);
1840 
1841  return sbp_send_sense(req);
1842 }
1843 
1844 static int sbp_queue_tm_rsp(struct se_cmd *se_cmd)
1845 {
1846  return 0;
1847 }
1848 
1849 static int sbp_check_stop_free(struct se_cmd *se_cmd)
1850 {
1851  struct sbp_target_request *req = container_of(se_cmd,
1852  struct sbp_target_request, se_cmd);
1853 
1855  return 1;
1856 }
1857 
1858 /*
1859  * Handlers for Serial Bus Protocol 2/3 (SBP-2 / SBP-3)
1860  */
1861 static u8 sbp_get_fabric_proto_ident(struct se_portal_group *se_tpg)
1862 {
1863  /*
1864  * Return a IEEE 1394 SCSI Protocol identifier for loopback operations
1865  * This is defined in section 7.5.1 Table 362 in spc4r17
1866  */
1867  return SCSI_PROTOCOL_SBP;
1868 }
1869 
1870 static u32 sbp_get_pr_transport_id(
1871  struct se_portal_group *se_tpg,
1872  struct se_node_acl *se_nacl,
1873  struct t10_pr_registration *pr_reg,
1874  int *format_code,
1875  unsigned char *buf)
1876 {
1877  int ret;
1878 
1879  /*
1880  * Set PROTOCOL IDENTIFIER to 3h for SBP
1881  */
1882  buf[0] = SCSI_PROTOCOL_SBP;
1883  /*
1884  * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
1885  * over IEEE 1394
1886  */
1887  ret = hex2bin(&buf[8], se_nacl->initiatorname, 8);
1888  if (ret < 0)
1889  pr_debug("sbp transport_id: invalid hex string\n");
1890 
1891  /*
1892  * The IEEE 1394 Transport ID is a hardcoded 24-byte length
1893  */
1894  return 24;
1895 }
1896 
1897 static u32 sbp_get_pr_transport_id_len(
1898  struct se_portal_group *se_tpg,
1899  struct se_node_acl *se_nacl,
1900  struct t10_pr_registration *pr_reg,
1901  int *format_code)
1902 {
1903  *format_code = 0;
1904  /*
1905  * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
1906  * over IEEE 1394
1907  *
1908  * The SBP Transport ID is a hardcoded 24-byte length
1909  */
1910  return 24;
1911 }
1912 
1913 /*
1914  * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
1915  * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
1916  */
1917 static char *sbp_parse_pr_out_transport_id(
1918  struct se_portal_group *se_tpg,
1919  const char *buf,
1920  u32 *out_tid_len,
1921  char **port_nexus_ptr)
1922 {
1923  /*
1924  * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.4 TransportID
1925  * for initiator ports using SCSI over SBP Serial SCSI Protocol
1926  *
1927  * The TransportID for a IEEE 1394 Initiator Port is of fixed size of
1928  * 24 bytes, and IEEE 1394 does not contain a I_T nexus identifier,
1929  * so we return the **port_nexus_ptr set to NULL.
1930  */
1931  *port_nexus_ptr = NULL;
1932  *out_tid_len = 24;
1933 
1934  return (char *)&buf[8];
1935 }
1936 
1937 static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
1938 {
1939  int i, count = 0;
1940 
1941  spin_lock(&tpg->tpg_lun_lock);
1942  for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
1943  struct se_lun *se_lun = tpg->tpg_lun_list[i];
1944 
1945  if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
1946  continue;
1947 
1948  count++;
1949  }
1950  spin_unlock(&tpg->tpg_lun_lock);
1951 
1952  return count;
1953 }
1954 
1955 static int sbp_update_unit_directory(struct sbp_tport *tport)
1956 {
1957  int num_luns, num_entries, idx = 0, mgt_agt_addr, ret, i;
1958  u32 *data;
1959 
1960  if (tport->unit_directory.data) {
1962  kfree(tport->unit_directory.data);
1963  tport->unit_directory.data = NULL;
1964  }
1965 
1966  if (!tport->enable || !tport->tpg)
1967  return 0;
1968 
1969  num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
1970 
1971  /*
1972  * Number of entries in the final unit directory:
1973  * - all of those in the template
1974  * - management_agent
1975  * - unit_characteristics
1976  * - reconnect_timeout
1977  * - unit unique ID
1978  * - one for each LUN
1979  *
1980  * MUST NOT include leaf or sub-directory entries
1981  */
1982  num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
1983 
1984  if (tport->directory_id != -1)
1985  num_entries++;
1986 
1987  /* allocate num_entries + 4 for the header and unique ID leaf */
1988  data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
1989  if (!data)
1990  return -ENOMEM;
1991 
1992  /* directory_length */
1993  data[idx++] = num_entries << 16;
1994 
1995  /* directory_id */
1996  if (tport->directory_id != -1)
1997  data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
1998 
1999  /* unit directory template */
2000  memcpy(&data[idx], sbp_unit_directory_template,
2001  sizeof(sbp_unit_directory_template));
2002  idx += ARRAY_SIZE(sbp_unit_directory_template);
2003 
2004  /* management_agent */
2005  mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
2006  data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
2007 
2008  /* unit_characteristics */
2009  data[idx++] = 0x3a000000 |
2010  (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
2012 
2013  /* reconnect_timeout */
2014  data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
2015 
2016  /* unit unique ID (leaf is just after LUNs) */
2017  data[idx++] = 0x8d000000 | (num_luns + 1);
2018 
2019  spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
2020  for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
2021  struct se_lun *se_lun = tport->tpg->se_tpg.tpg_lun_list[i];
2022  struct se_device *dev;
2023  int type;
2024 
2025  if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
2026  continue;
2027 
2028  spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
2029 
2030  dev = se_lun->lun_se_dev;
2031  type = dev->transport->get_device_type(dev);
2032 
2033  /* logical_unit_number */
2034  data[idx++] = 0x14000000 |
2035  ((type << 16) & 0x1f0000) |
2036  (se_lun->unpacked_lun & 0xffff);
2037 
2038  spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
2039  }
2040  spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
2041 
2042  /* unit unique ID leaf */
2043  data[idx++] = 2 << 16;
2044  data[idx++] = tport->guid >> 32;
2045  data[idx++] = tport->guid;
2046 
2047  tport->unit_directory.length = idx;
2048  tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
2049  tport->unit_directory.data = data;
2050 
2051  ret = fw_core_add_descriptor(&tport->unit_directory);
2052  if (ret < 0) {
2053  kfree(tport->unit_directory.data);
2054  tport->unit_directory.data = NULL;
2055  }
2056 
2057  return ret;
2058 }
2059 
2060 static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
2061 {
2062  const char *cp;
2063  char c, nibble;
2064  int pos = 0, err;
2065 
2066  *wwn = 0;
2067  for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
2068  c = *cp;
2069  if (c == '\n' && cp[1] == '\0')
2070  continue;
2071  if (c == '\0') {
2072  err = 2;
2073  if (pos != 16)
2074  goto fail;
2075  return cp - name;
2076  }
2077  err = 3;
2078  if (isdigit(c))
2079  nibble = c - '0';
2080  else if (isxdigit(c))
2081  nibble = tolower(c) - 'a' + 10;
2082  else
2083  goto fail;
2084  *wwn = (*wwn << 4) | nibble;
2085  pos++;
2086  }
2087  err = 4;
2088 fail:
2089  printk(KERN_INFO "err %u len %zu pos %u\n",
2090  err, cp - name, pos);
2091  return -1;
2092 }
2093 
2094 static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
2095 {
2096  return snprintf(buf, len, "%016llx", wwn);
2097 }
2098 
2099 static struct se_node_acl *sbp_make_nodeacl(
2100  struct se_portal_group *se_tpg,
2101  struct config_group *group,
2102  const char *name)
2103 {
2104  struct se_node_acl *se_nacl, *se_nacl_new;
2105  struct sbp_nacl *nacl;
2106  u64 guid = 0;
2107  u32 nexus_depth = 1;
2108 
2109  if (sbp_parse_wwn(name, &guid) < 0)
2110  return ERR_PTR(-EINVAL);
2111 
2112  se_nacl_new = sbp_alloc_fabric_acl(se_tpg);
2113  if (!se_nacl_new)
2114  return ERR_PTR(-ENOMEM);
2115 
2116  /*
2117  * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
2118  * when converting a NodeACL from demo mode -> explict
2119  */
2120  se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
2121  name, nexus_depth);
2122  if (IS_ERR(se_nacl)) {
2123  sbp_release_fabric_acl(se_tpg, se_nacl_new);
2124  return se_nacl;
2125  }
2126 
2127  nacl = container_of(se_nacl, struct sbp_nacl, se_node_acl);
2128  nacl->guid = guid;
2129  sbp_format_wwn(nacl->iport_name, SBP_NAMELEN, guid);
2130 
2131  return se_nacl;
2132 }
2133 
2134 static void sbp_drop_nodeacl(struct se_node_acl *se_acl)
2135 {
2136  struct sbp_nacl *nacl =
2137  container_of(se_acl, struct sbp_nacl, se_node_acl);
2138 
2139  core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
2140  kfree(nacl);
2141 }
2142 
2143 static int sbp_post_link_lun(
2144  struct se_portal_group *se_tpg,
2145  struct se_lun *se_lun)
2146 {
2147  struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2148 
2149  return sbp_update_unit_directory(tpg->tport);
2150 }
2151 
2152 static void sbp_pre_unlink_lun(
2153  struct se_portal_group *se_tpg,
2154  struct se_lun *se_lun)
2155 {
2156  struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2157  struct sbp_tport *tport = tpg->tport;
2158  int ret;
2159 
2160  if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
2161  tport->enable = 0;
2162 
2163  ret = sbp_update_unit_directory(tport);
2164  if (ret < 0)
2165  pr_err("unlink LUN: failed to update unit directory\n");
2166 }
2167 
2168 static struct se_portal_group *sbp_make_tpg(
2169  struct se_wwn *wwn,
2170  struct config_group *group,
2171  const char *name)
2172 {
2173  struct sbp_tport *tport =
2174  container_of(wwn, struct sbp_tport, tport_wwn);
2175 
2176  struct sbp_tpg *tpg;
2177  unsigned long tpgt;
2178  int ret;
2179 
2180  if (strstr(name, "tpgt_") != name)
2181  return ERR_PTR(-EINVAL);
2182  if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2183  return ERR_PTR(-EINVAL);
2184 
2185  if (tport->tpg) {
2186  pr_err("Only one TPG per Unit is possible.\n");
2187  return ERR_PTR(-EBUSY);
2188  }
2189 
2190  tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2191  if (!tpg) {
2192  pr_err("Unable to allocate struct sbp_tpg\n");
2193  return ERR_PTR(-ENOMEM);
2194  }
2195 
2196  tpg->tport = tport;
2197  tpg->tport_tpgt = tpgt;
2198  tport->tpg = tpg;
2199 
2200  /* default attribute values */
2201  tport->enable = 0;
2202  tport->directory_id = -1;
2203  tport->mgt_orb_timeout = 15;
2204  tport->max_reconnect_timeout = 5;
2205  tport->max_logins_per_lun = 1;
2206 
2207  tport->mgt_agt = sbp_management_agent_register(tport);
2208  if (IS_ERR(tport->mgt_agt)) {
2209  ret = PTR_ERR(tport->mgt_agt);
2210  kfree(tpg);
2211  return ERR_PTR(ret);
2212  }
2213 
2214  ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn,
2215  &tpg->se_tpg, (void *)tpg,
2217  if (ret < 0) {
2218  sbp_management_agent_unregister(tport->mgt_agt);
2219  kfree(tpg);
2220  return ERR_PTR(ret);
2221  }
2222 
2223  return &tpg->se_tpg;
2224 }
2225 
2226 static void sbp_drop_tpg(struct se_portal_group *se_tpg)
2227 {
2228  struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2229  struct sbp_tport *tport = tpg->tport;
2230 
2231  core_tpg_deregister(se_tpg);
2232  sbp_management_agent_unregister(tport->mgt_agt);
2233  tport->tpg = NULL;
2234  kfree(tpg);
2235 }
2236 
2237 static struct se_wwn *sbp_make_tport(
2238  struct target_fabric_configfs *tf,
2239  struct config_group *group,
2240  const char *name)
2241 {
2242  struct sbp_tport *tport;
2243  u64 guid = 0;
2244 
2245  if (sbp_parse_wwn(name, &guid) < 0)
2246  return ERR_PTR(-EINVAL);
2247 
2248  tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2249  if (!tport) {
2250  pr_err("Unable to allocate struct sbp_tport\n");
2251  return ERR_PTR(-ENOMEM);
2252  }
2253 
2254  tport->guid = guid;
2255  sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
2256 
2257  return &tport->tport_wwn;
2258 }
2259 
2260 static void sbp_drop_tport(struct se_wwn *wwn)
2261 {
2262  struct sbp_tport *tport =
2263  container_of(wwn, struct sbp_tport, tport_wwn);
2264 
2265  kfree(tport);
2266 }
2267 
2268 static ssize_t sbp_wwn_show_attr_version(
2269  struct target_fabric_configfs *tf,
2270  char *page)
2271 {
2272  return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
2273 }
2274 
2275 TF_WWN_ATTR_RO(sbp, version);
2276 
2277 static struct configfs_attribute *sbp_wwn_attrs[] = {
2278  &sbp_wwn_version.attr,
2279  NULL,
2280 };
2281 
2282 static ssize_t sbp_tpg_show_directory_id(
2283  struct se_portal_group *se_tpg,
2284  char *page)
2285 {
2286  struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2287  struct sbp_tport *tport = tpg->tport;
2288 
2289  if (tport->directory_id == -1)
2290  return sprintf(page, "implicit\n");
2291  else
2292  return sprintf(page, "%06x\n", tport->directory_id);
2293 }
2294 
2295 static ssize_t sbp_tpg_store_directory_id(
2296  struct se_portal_group *se_tpg,
2297  const char *page,
2298  size_t count)
2299 {
2300  struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2301  struct sbp_tport *tport = tpg->tport;
2302  unsigned long val;
2303 
2304  if (tport->enable) {
2305  pr_err("Cannot change the directory_id on an active target.\n");
2306  return -EBUSY;
2307  }
2308 
2309  if (strstr(page, "implicit") == page) {
2310  tport->directory_id = -1;
2311  } else {
2312  if (kstrtoul(page, 16, &val) < 0)
2313  return -EINVAL;
2314  if (val > 0xffffff)
2315  return -EINVAL;
2316 
2317  tport->directory_id = val;
2318  }
2319 
2320  return count;
2321 }
2322 
2323 static ssize_t sbp_tpg_show_enable(
2324  struct se_portal_group *se_tpg,
2325  char *page)
2326 {
2327  struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2328  struct sbp_tport *tport = tpg->tport;
2329  return sprintf(page, "%d\n", tport->enable);
2330 }
2331 
2332 static ssize_t sbp_tpg_store_enable(
2333  struct se_portal_group *se_tpg,
2334  const char *page,
2335  size_t count)
2336 {
2337  struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2338  struct sbp_tport *tport = tpg->tport;
2339  unsigned long val;
2340  int ret;
2341 
2342  if (kstrtoul(page, 0, &val) < 0)
2343  return -EINVAL;
2344  if ((val != 0) && (val != 1))
2345  return -EINVAL;
2346 
2347  if (tport->enable == val)
2348  return count;
2349 
2350  if (val) {
2351  if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
2352  pr_err("Cannot enable a target with no LUNs!\n");
2353  return -EINVAL;
2354  }
2355  } else {
2356  /* XXX: force-shutdown sessions instead? */
2357  spin_lock_bh(&se_tpg->session_lock);
2358  if (!list_empty(&se_tpg->tpg_sess_list)) {
2359  spin_unlock_bh(&se_tpg->session_lock);
2360  return -EBUSY;
2361  }
2362  spin_unlock_bh(&se_tpg->session_lock);
2363  }
2364 
2365  tport->enable = val;
2366 
2367  ret = sbp_update_unit_directory(tport);
2368  if (ret < 0) {
2369  pr_err("Could not update Config ROM\n");
2370  return ret;
2371  }
2372 
2373  return count;
2374 }
2375 
2378 
2379 static struct configfs_attribute *sbp_tpg_base_attrs[] = {
2380  &sbp_tpg_directory_id.attr,
2381  &sbp_tpg_enable.attr,
2382  NULL,
2383 };
2384 
2385 static ssize_t sbp_tpg_attrib_show_mgt_orb_timeout(
2386  struct se_portal_group *se_tpg,
2387  char *page)
2388 {
2389  struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2390  struct sbp_tport *tport = tpg->tport;
2391  return sprintf(page, "%d\n", tport->mgt_orb_timeout);
2392 }
2393 
2394 static ssize_t sbp_tpg_attrib_store_mgt_orb_timeout(
2395  struct se_portal_group *se_tpg,
2396  const char *page,
2397  size_t count)
2398 {
2399  struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2400  struct sbp_tport *tport = tpg->tport;
2401  unsigned long val;
2402  int ret;
2403 
2404  if (kstrtoul(page, 0, &val) < 0)
2405  return -EINVAL;
2406  if ((val < 1) || (val > 127))
2407  return -EINVAL;
2408 
2409  if (tport->mgt_orb_timeout == val)
2410  return count;
2411 
2412  tport->mgt_orb_timeout = val;
2413 
2414  ret = sbp_update_unit_directory(tport);
2415  if (ret < 0)
2416  return ret;
2417 
2418  return count;
2419 }
2420 
2421 static ssize_t sbp_tpg_attrib_show_max_reconnect_timeout(
2422  struct se_portal_group *se_tpg,
2423  char *page)
2424 {
2425  struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2426  struct sbp_tport *tport = tpg->tport;
2427  return sprintf(page, "%d\n", tport->max_reconnect_timeout);
2428 }
2429 
2430 static ssize_t sbp_tpg_attrib_store_max_reconnect_timeout(
2431  struct se_portal_group *se_tpg,
2432  const char *page,
2433  size_t count)
2434 {
2435  struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2436  struct sbp_tport *tport = tpg->tport;
2437  unsigned long val;
2438  int ret;
2439 
2440  if (kstrtoul(page, 0, &val) < 0)
2441  return -EINVAL;
2442  if ((val < 1) || (val > 32767))
2443  return -EINVAL;
2444 
2445  if (tport->max_reconnect_timeout == val)
2446  return count;
2447 
2448  tport->max_reconnect_timeout = val;
2449 
2450  ret = sbp_update_unit_directory(tport);
2451  if (ret < 0)
2452  return ret;
2453 
2454  return count;
2455 }
2456 
2457 static ssize_t sbp_tpg_attrib_show_max_logins_per_lun(
2458  struct se_portal_group *se_tpg,
2459  char *page)
2460 {
2461  struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2462  struct sbp_tport *tport = tpg->tport;
2463  return sprintf(page, "%d\n", tport->max_logins_per_lun);
2464 }
2465 
2466 static ssize_t sbp_tpg_attrib_store_max_logins_per_lun(
2467  struct se_portal_group *se_tpg,
2468  const char *page,
2469  size_t count)
2470 {
2471  struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2472  struct sbp_tport *tport = tpg->tport;
2473  unsigned long val;
2474 
2475  if (kstrtoul(page, 0, &val) < 0)
2476  return -EINVAL;
2477  if ((val < 1) || (val > 127))
2478  return -EINVAL;
2479 
2480  /* XXX: also check against current count? */
2481 
2482  tport->max_logins_per_lun = val;
2483 
2484  return count;
2485 }
2486 
2490 
2491 static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
2492  &sbp_tpg_attrib_mgt_orb_timeout.attr,
2493  &sbp_tpg_attrib_max_reconnect_timeout.attr,
2494  &sbp_tpg_attrib_max_logins_per_lun.attr,
2495  NULL,
2496 };
2497 
2498 static struct target_core_fabric_ops sbp_ops = {
2499  .get_fabric_name = sbp_get_fabric_name,
2500  .get_fabric_proto_ident = sbp_get_fabric_proto_ident,
2501  .tpg_get_wwn = sbp_get_fabric_wwn,
2502  .tpg_get_tag = sbp_get_tag,
2503  .tpg_get_default_depth = sbp_get_default_depth,
2504  .tpg_get_pr_transport_id = sbp_get_pr_transport_id,
2505  .tpg_get_pr_transport_id_len = sbp_get_pr_transport_id_len,
2506  .tpg_parse_pr_out_transport_id = sbp_parse_pr_out_transport_id,
2507  .tpg_check_demo_mode = sbp_check_true,
2508  .tpg_check_demo_mode_cache = sbp_check_true,
2509  .tpg_check_demo_mode_write_protect = sbp_check_false,
2510  .tpg_check_prod_mode_write_protect = sbp_check_false,
2511  .tpg_alloc_fabric_acl = sbp_alloc_fabric_acl,
2512  .tpg_release_fabric_acl = sbp_release_fabric_acl,
2513  .tpg_get_inst_index = sbp_tpg_get_inst_index,
2514  .release_cmd = sbp_release_cmd,
2515  .shutdown_session = sbp_shutdown_session,
2516  .close_session = sbp_close_session,
2517  .sess_get_index = sbp_sess_get_index,
2518  .write_pending = sbp_write_pending,
2519  .write_pending_status = sbp_write_pending_status,
2520  .set_default_node_attributes = sbp_set_default_node_attrs,
2521  .get_task_tag = sbp_get_task_tag,
2522  .get_cmd_state = sbp_get_cmd_state,
2523  .queue_data_in = sbp_queue_data_in,
2524  .queue_status = sbp_queue_status,
2525  .queue_tm_rsp = sbp_queue_tm_rsp,
2526  .check_stop_free = sbp_check_stop_free,
2527 
2528  .fabric_make_wwn = sbp_make_tport,
2529  .fabric_drop_wwn = sbp_drop_tport,
2530  .fabric_make_tpg = sbp_make_tpg,
2531  .fabric_drop_tpg = sbp_drop_tpg,
2532  .fabric_post_link = sbp_post_link_lun,
2533  .fabric_pre_unlink = sbp_pre_unlink_lun,
2534  .fabric_make_np = NULL,
2535  .fabric_drop_np = NULL,
2536  .fabric_make_nodeacl = sbp_make_nodeacl,
2537  .fabric_drop_nodeacl = sbp_drop_nodeacl,
2538 };
2539 
2540 static int sbp_register_configfs(void)
2541 {
2542  struct target_fabric_configfs *fabric;
2543  int ret;
2544 
2545  fabric = target_fabric_configfs_init(THIS_MODULE, "sbp");
2546  if (IS_ERR(fabric)) {
2547  pr_err("target_fabric_configfs_init() failed\n");
2548  return PTR_ERR(fabric);
2549  }
2550 
2551  fabric->tf_ops = sbp_ops;
2552 
2553  /*
2554  * Setup default attribute lists for various fabric->tf_cit_tmpl
2555  */
2556  TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = sbp_wwn_attrs;
2557  TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs;
2558  TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs;
2559  TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
2560  TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
2561  TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2562  TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2563  TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2564  TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2565 
2566  ret = target_fabric_configfs_register(fabric);
2567  if (ret < 0) {
2568  pr_err("target_fabric_configfs_register() failed for SBP\n");
2569  return ret;
2570  }
2571 
2572  sbp_fabric_configfs = fabric;
2573 
2574  return 0;
2575 };
2576 
2577 static void sbp_deregister_configfs(void)
2578 {
2579  if (!sbp_fabric_configfs)
2580  return;
2581 
2582  target_fabric_configfs_deregister(sbp_fabric_configfs);
2583  sbp_fabric_configfs = NULL;
2584 };
2585 
2586 static int __init sbp_init(void)
2587 {
2588  int ret;
2589 
2590  ret = sbp_register_configfs();
2591  if (ret < 0)
2592  return ret;
2593 
2594  return 0;
2595 };
2596 
2597 static void sbp_exit(void)
2598 {
2599  sbp_deregister_configfs();
2600 };
2601 
2602 MODULE_DESCRIPTION("FireWire SBP fabric driver");
2603 MODULE_LICENSE("GPL");
2604 module_init(sbp_init);
2605 module_exit(sbp_exit);