Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
zfcp_fsf.c
Go to the documentation of this file.
1 /*
2  * zfcp device driver
3  *
4  * Implementation of FSF commands.
5  *
6  * Copyright IBM Corp. 2002, 2010
7  */
8 
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 
12 #include <linux/blktrace_api.h>
13 #include <linux/slab.h>
14 #include <scsi/fc/fc_els.h>
15 #include "zfcp_ext.h"
16 #include "zfcp_fc.h"
17 #include "zfcp_dbf.h"
18 #include "zfcp_qdio.h"
19 #include "zfcp_reqlist.h"
20 
22 
23 static void zfcp_fsf_request_timeout_handler(unsigned long data)
24 {
25  struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
26  zfcp_qdio_siosl(adapter);
28  "fsrth_1");
29 }
30 
31 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
32  unsigned long timeout)
33 {
34  fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
35  fsf_req->timer.data = (unsigned long) fsf_req->adapter;
36  fsf_req->timer.expires = jiffies + timeout;
37  add_timer(&fsf_req->timer);
38 }
39 
40 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
41 {
42  BUG_ON(!fsf_req->erp_action);
43  fsf_req->timer.function = zfcp_erp_timeout_handler;
44  fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
45  fsf_req->timer.expires = jiffies + 30 * HZ;
46  add_timer(&fsf_req->timer);
47 }
48 
49 /* association between FSF command and FSF QTCB type */
50 static u32 fsf_qtcb_type[] = {
64 };
65 
66 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
67 {
68  dev_err(&req->adapter->ccw_device->dev, "FCP device not "
69  "operational because of an unsupported FC class\n");
70  zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
72 }
73 
79 {
80  if (likely(req->pool)) {
81  if (likely(req->qtcb))
82  mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
83  mempool_free(req, req->pool);
84  return;
85  }
86 
87  if (likely(req->qtcb))
88  kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
89  kfree(req);
90 }
91 
92 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
93 {
94  unsigned long flags;
95  struct fsf_status_read_buffer *sr_buf = req->data;
96  struct zfcp_adapter *adapter = req->adapter;
97  struct zfcp_port *port;
98  int d_id = ntoh24(sr_buf->d_id);
99 
100  read_lock_irqsave(&adapter->port_list_lock, flags);
101  list_for_each_entry(port, &adapter->port_list, list)
102  if (port->d_id == d_id) {
103  zfcp_erp_port_reopen(port, 0, "fssrpc1");
104  break;
105  }
106  read_unlock_irqrestore(&adapter->port_list_lock, flags);
107 }
108 
109 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
111 {
112  struct zfcp_adapter *adapter = req->adapter;
113 
115  return;
116 
118 
120 
121  if (!link_down)
122  goto out;
123 
124  switch (link_down->error_code) {
126  dev_warn(&req->adapter->ccw_device->dev,
127  "There is no light signal from the local "
128  "fibre channel cable\n");
129  break;
131  dev_warn(&req->adapter->ccw_device->dev,
132  "There is a wrap plug instead of a fibre "
133  "channel cable\n");
134  break;
135  case FSF_PSQ_LINK_NO_FCP:
136  dev_warn(&req->adapter->ccw_device->dev,
137  "The adjacent fibre channel node does not "
138  "support FCP\n");
139  break;
141  dev_warn(&req->adapter->ccw_device->dev,
142  "The FCP device is suspended because of a "
143  "firmware update\n");
144  break;
146  dev_warn(&req->adapter->ccw_device->dev,
147  "The FCP device detected a WWPN that is "
148  "duplicate or not valid\n");
149  break;
151  dev_warn(&req->adapter->ccw_device->dev,
152  "The fibre channel fabric does not support NPIV\n");
153  break;
155  dev_warn(&req->adapter->ccw_device->dev,
156  "The FCP adapter cannot support more NPIV ports\n");
157  break;
159  dev_warn(&req->adapter->ccw_device->dev,
160  "The adjacent switch cannot support "
161  "more NPIV ports\n");
162  break;
164  dev_warn(&req->adapter->ccw_device->dev,
165  "The FCP adapter could not log in to the "
166  "fibre channel fabric\n");
167  break;
169  dev_warn(&req->adapter->ccw_device->dev,
170  "The WWPN assignment file on the FCP adapter "
171  "has been damaged\n");
172  break;
174  dev_warn(&req->adapter->ccw_device->dev,
175  "The mode table on the FCP adapter "
176  "has been damaged\n");
177  break;
179  dev_warn(&req->adapter->ccw_device->dev,
180  "All NPIV ports on the FCP adapter have "
181  "been assigned\n");
182  break;
183  default:
184  dev_warn(&req->adapter->ccw_device->dev,
185  "The link between the FCP adapter and "
186  "the FC fabric is down\n");
187  }
188 out:
190 }
191 
192 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
193 {
194  struct fsf_status_read_buffer *sr_buf = req->data;
195  struct fsf_link_down_info *ldi =
196  (struct fsf_link_down_info *) &sr_buf->payload;
197 
198  switch (sr_buf->status_subtype) {
200  zfcp_fsf_link_down_info_eval(req, ldi);
201  break;
203  zfcp_fsf_link_down_info_eval(req, ldi);
204  break;
206  zfcp_fsf_link_down_info_eval(req, NULL);
207  };
208 }
209 
210 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
211 {
212  struct zfcp_adapter *adapter = req->adapter;
213  struct fsf_status_read_buffer *sr_buf = req->data;
214 
216  zfcp_dbf_hba_fsf_uss("fssrh_1", req);
217  mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
218  zfcp_fsf_req_free(req);
219  return;
220  }
221 
222  zfcp_dbf_hba_fsf_uss("fssrh_4", req);
223 
224  switch (sr_buf->status_type) {
226  zfcp_fsf_status_read_port_closed(req);
227  break;
230  break;
232  break;
234  dev_warn(&adapter->ccw_device->dev,
235  "The error threshold for checksum statistics "
236  "has been exceeded\n");
237  zfcp_dbf_hba_bit_err("fssrh_3", req);
238  break;
240  zfcp_fsf_status_read_link_down(req);
242  break;
244  dev_info(&adapter->ccw_device->dev,
245  "The local link has been restored\n");
246  /* All ports should be marked as ready to run again */
249  zfcp_erp_adapter_reopen(adapter,
252  "fssrh_2");
254 
255  break;
261  break;
264  break;
266  adapter->adapter_features = sr_buf->payload.word[0];
267  break;
268  }
269 
270  mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
271  zfcp_fsf_req_free(req);
272 
273  atomic_inc(&adapter->stat_miss);
274  queue_work(adapter->work_queue, &adapter->stat_work);
275 }
276 
277 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
278 {
279  switch (req->qtcb->header.fsf_status_qual.word[0]) {
284  return;
286  break;
287  case FSF_SQ_NO_RECOM:
288  dev_err(&req->adapter->ccw_device->dev,
289  "The FCP adapter reported a problem "
290  "that cannot be recovered\n");
291  zfcp_qdio_siosl(req->adapter);
292  zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
293  break;
294  }
295  /* all non-return stats set FSFREQ_ERROR*/
297 }
298 
299 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
300 {
302  return;
303 
304  switch (req->qtcb->header.fsf_status) {
305  case FSF_UNKNOWN_COMMAND:
306  dev_err(&req->adapter->ccw_device->dev,
307  "The FCP adapter does not recognize the command 0x%x\n",
308  req->qtcb->header.fsf_command);
309  zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
311  break;
313  zfcp_fsf_fsfstatus_qual_eval(req);
314  break;
315  }
316 }
317 
318 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
319 {
320  struct zfcp_adapter *adapter = req->adapter;
321  struct fsf_qtcb *qtcb = req->qtcb;
322  union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
323 
324  zfcp_dbf_hba_fsf_response(req);
325 
328  return;
329  }
330 
331  switch (qtcb->prefix.prot_status) {
332  case FSF_PROT_GOOD:
334  return;
336  dev_err(&adapter->ccw_device->dev,
337  "QTCB version 0x%x not supported by FCP adapter "
338  "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
339  psq->word[0], psq->word[1]);
340  zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
341  break;
344  zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
346  break;
348  dev_err(&adapter->ccw_device->dev,
349  "The QTCB type is not supported by the FCP adapter\n");
350  zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
351  break;
354  &adapter->status);
355  break;
357  dev_err(&adapter->ccw_device->dev,
358  "0x%Lx is an ambiguous request identifier\n",
359  (unsigned long long)qtcb->bottom.support.req_handle);
360  zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
361  break;
362  case FSF_PROT_LINK_DOWN:
363  zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
364  /* go through reopen to flush pending requests */
365  zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
366  break;
368  /* All ports should be marked as ready to run again */
371  zfcp_erp_adapter_reopen(adapter,
374  "fspse_8");
375  break;
376  default:
377  dev_err(&adapter->ccw_device->dev,
378  "0x%x is not a valid transfer protocol status\n",
379  qtcb->prefix.prot_status);
380  zfcp_qdio_siosl(adapter);
381  zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
382  }
384 }
385 
395 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
396 {
398  zfcp_fsf_status_read_handler(req);
399  return;
400  }
401 
402  del_timer(&req->timer);
403  zfcp_fsf_protstatus_eval(req);
404  zfcp_fsf_fsfstatus_eval(req);
405  req->handler(req);
406 
407  if (req->erp_action)
408  zfcp_erp_notify(req->erp_action, 0);
409 
411  zfcp_fsf_req_free(req);
412  else
413  complete(&req->completion);
414 }
415 
426 {
427  struct zfcp_fsf_req *req, *tmp;
428  LIST_HEAD(remove_queue);
429 
431  zfcp_reqlist_move(adapter->req_list, &remove_queue);
432 
433  list_for_each_entry_safe(req, tmp, &remove_queue, list) {
434  list_del(&req->list);
436  zfcp_fsf_req_complete(req);
437  }
438 }
439 
440 #define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0)
441 #define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1)
442 #define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2)
443 #define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3)
444 #define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4)
445 #define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5)
446 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
447 
448 static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
449 {
450  u32 fdmi_speed = 0;
451  if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
452  fdmi_speed |= FC_PORTSPEED_1GBIT;
453  if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
454  fdmi_speed |= FC_PORTSPEED_2GBIT;
455  if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
456  fdmi_speed |= FC_PORTSPEED_4GBIT;
457  if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
458  fdmi_speed |= FC_PORTSPEED_10GBIT;
459  if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
460  fdmi_speed |= FC_PORTSPEED_8GBIT;
461  if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
462  fdmi_speed |= FC_PORTSPEED_16GBIT;
463  if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
464  fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
465  return fdmi_speed;
466 }
467 
468 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
469 {
470  struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
471  struct zfcp_adapter *adapter = req->adapter;
472  struct Scsi_Host *shost = adapter->scsi_host;
473  struct fc_els_flogi *nsp, *plogi;
474 
475  /* adjust pointers for missing command code */
476  nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
477  - sizeof(u32));
478  plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
479  - sizeof(u32));
480 
481  if (req->data)
482  memcpy(req->data, bottom, sizeof(*bottom));
483 
484  fc_host_port_name(shost) = nsp->fl_wwpn;
485  fc_host_node_name(shost) = nsp->fl_wwnn;
486  fc_host_port_id(shost) = ntoh24(bottom->s_id);
487  fc_host_speed(shost) =
488  zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
490 
491  adapter->hydra_version = bottom->adapter_type;
493  adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
495 
496  if (fc_host_permanent_port_name(shost) == -1)
498 
499  switch (bottom->fc_topology) {
500  case FSF_TOPO_P2P:
501  adapter->peer_d_id = ntoh24(bottom->peer_d_id);
502  adapter->peer_wwpn = plogi->fl_wwpn;
503  adapter->peer_wwnn = plogi->fl_wwnn;
505  break;
506  case FSF_TOPO_FABRIC:
508  break;
509  case FSF_TOPO_AL:
511  /* fall through */
512  default:
513  dev_err(&adapter->ccw_device->dev,
514  "Unknown or unsupported arbitrated loop "
515  "fibre channel topology detected\n");
516  zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
517  return -EIO;
518  }
519 
520  zfcp_scsi_set_prot(adapter);
521 
522  return 0;
523 }
524 
525 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
526 {
527  struct zfcp_adapter *adapter = req->adapter;
528  struct fsf_qtcb *qtcb = req->qtcb;
529  struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
530  struct Scsi_Host *shost = adapter->scsi_host;
531 
532  if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
533  return;
534 
535  adapter->fsf_lic_version = bottom->lic_version;
536  adapter->adapter_features = bottom->adapter_features;
537  adapter->connection_features = bottom->connection_features;
538  adapter->peer_wwpn = 0;
539  adapter->peer_wwnn = 0;
540  adapter->peer_d_id = 0;
541 
542  switch (qtcb->header.fsf_status) {
543  case FSF_GOOD:
544  if (zfcp_fsf_exchange_config_evaluate(req))
545  return;
546 
547  if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
548  dev_err(&adapter->ccw_device->dev,
549  "FCP adapter maximum QTCB size (%d bytes) "
550  "is too small\n",
551  bottom->max_qtcb_size);
552  zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
553  return;
554  }
556  &adapter->status);
557  break;
559  fc_host_node_name(shost) = 0;
560  fc_host_port_name(shost) = 0;
561  fc_host_port_id(shost) = 0;
564  adapter->hydra_version = 0;
565 
566  zfcp_fsf_link_down_info_eval(req,
567  &qtcb->header.fsf_status_qual.link_down_info);
568  break;
569  default:
570  zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
571  return;
572  }
573 
575  adapter->hardware_version = bottom->hardware_version;
580  }
581 
582  if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
583  dev_err(&adapter->ccw_device->dev,
584  "The FCP adapter only supports newer "
585  "control block versions\n");
586  zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
587  return;
588  }
590  dev_err(&adapter->ccw_device->dev,
591  "The FCP adapter only supports older "
592  "control block versions\n");
593  zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
594  }
595 }
596 
597 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
598 {
599  struct zfcp_adapter *adapter = req->adapter;
600  struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
601  struct Scsi_Host *shost = adapter->scsi_host;
602 
603  if (req->data)
604  memcpy(req->data, bottom, sizeof(*bottom));
605 
607  fc_host_permanent_port_name(shost) = bottom->wwpn;
609  } else
611  fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
612  fc_host_supported_speeds(shost) =
613  zfcp_fsf_convert_portspeed(bottom->supported_speed);
618 }
619 
620 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
621 {
622  struct fsf_qtcb *qtcb = req->qtcb;
623 
624  if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
625  return;
626 
627  switch (qtcb->header.fsf_status) {
628  case FSF_GOOD:
629  zfcp_fsf_exchange_port_evaluate(req);
630  break;
632  zfcp_fsf_exchange_port_evaluate(req);
633  zfcp_fsf_link_down_info_eval(req,
634  &qtcb->header.fsf_status_qual.link_down_info);
635  break;
636  }
637 }
638 
639 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
640 {
641  struct zfcp_fsf_req *req;
642 
643  if (likely(pool))
644  req = mempool_alloc(pool, GFP_ATOMIC);
645  else
646  req = kmalloc(sizeof(*req), GFP_ATOMIC);
647 
648  if (unlikely(!req))
649  return NULL;
650 
651  memset(req, 0, sizeof(*req));
652  req->pool = pool;
653  return req;
654 }
655 
656 static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
657 {
658  struct fsf_qtcb *qtcb;
659 
660  if (likely(pool))
661  qtcb = mempool_alloc(pool, GFP_ATOMIC);
662  else
663  qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
664 
665  if (unlikely(!qtcb))
666  return NULL;
667 
668  memset(qtcb, 0, sizeof(*qtcb));
669  return qtcb;
670 }
671 
672 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
673  u32 fsf_cmd, u8 sbtype,
674  mempool_t *pool)
675 {
676  struct zfcp_adapter *adapter = qdio->adapter;
677  struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
678 
679  if (unlikely(!req))
680  return ERR_PTR(-ENOMEM);
681 
682  if (adapter->req_no == 0)
683  adapter->req_no++;
684 
685  INIT_LIST_HEAD(&req->list);
686  init_timer(&req->timer);
687  init_completion(&req->completion);
688 
689  req->adapter = adapter;
690  req->fsf_command = fsf_cmd;
691  req->req_id = adapter->req_no;
692 
693  if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
694  if (likely(pool))
695  req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
696  else
697  req->qtcb = zfcp_qtcb_alloc(NULL);
698 
699  if (unlikely(!req->qtcb)) {
700  zfcp_fsf_req_free(req);
701  return ERR_PTR(-ENOMEM);
702  }
703 
704  req->seq_no = adapter->fsf_req_seq_no;
705  req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
706  req->qtcb->prefix.req_id = req->req_id;
707  req->qtcb->prefix.ulp_info = 26;
708  req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
709  req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
710  req->qtcb->header.req_handle = req->req_id;
711  req->qtcb->header.fsf_command = req->fsf_command;
712  }
713 
714  zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
715  req->qtcb, sizeof(struct fsf_qtcb));
716 
717  return req;
718 }
719 
720 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
721 {
722  struct zfcp_adapter *adapter = req->adapter;
723  struct zfcp_qdio *qdio = adapter->qdio;
724  int with_qtcb = (req->qtcb != NULL);
725  int req_id = req->req_id;
726 
727  zfcp_reqlist_add(adapter->req_list, req);
728 
729  req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
730  req->issued = get_clock();
731  if (zfcp_qdio_send(qdio, &req->qdio_req)) {
732  del_timer(&req->timer);
733  /* lookup request again, list might have changed */
734  zfcp_reqlist_find_rm(adapter->req_list, req_id);
735  zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
736  return -EIO;
737  }
738 
739  /* Don't increase for unsolicited status */
740  if (with_qtcb)
741  adapter->fsf_req_seq_no++;
742  adapter->req_no++;
743 
744  return 0;
745 }
746 
754 {
755  struct zfcp_adapter *adapter = qdio->adapter;
756  struct zfcp_fsf_req *req;
757  struct fsf_status_read_buffer *sr_buf;
758  struct page *page;
759  int retval = -EIO;
760 
761  spin_lock_irq(&qdio->req_q_lock);
762  if (zfcp_qdio_sbal_get(qdio))
763  goto out;
764 
765  req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 0,
766  adapter->pool.status_read_req);
767  if (IS_ERR(req)) {
768  retval = PTR_ERR(req);
769  goto out;
770  }
771 
772  page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
773  if (!page) {
774  retval = -ENOMEM;
775  goto failed_buf;
776  }
777  sr_buf = page_address(page);
778  memset(sr_buf, 0, sizeof(*sr_buf));
779  req->data = sr_buf;
780 
781  zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
782  zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
783 
784  retval = zfcp_fsf_req_send(req);
785  if (retval)
786  goto failed_req_send;
787 
788  goto out;
789 
790 failed_req_send:
791  req->data = NULL;
792  mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
793 failed_buf:
794  zfcp_dbf_hba_fsf_uss("fssr__1", req);
795  zfcp_fsf_req_free(req);
796 out:
797  spin_unlock_irq(&qdio->req_q_lock);
798  return retval;
799 }
800 
801 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
802 {
803  struct scsi_device *sdev = req->data;
804  struct zfcp_scsi_dev *zfcp_sdev;
805  union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
806 
807  if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
808  return;
809 
810  zfcp_sdev = sdev_to_zfcp(sdev);
811 
812  switch (req->qtcb->header.fsf_status) {
814  if (fsq->word[0] == fsq->word[1]) {
815  zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
816  "fsafch1");
818  }
819  break;
821  if (fsq->word[0] == fsq->word[1]) {
822  zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
824  }
825  break;
828  break;
829  case FSF_PORT_BOXED:
830  zfcp_erp_set_port_status(zfcp_sdev->port,
832  zfcp_erp_port_reopen(zfcp_sdev->port,
833  ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
835  break;
836  case FSF_LUN_BOXED:
839  "fsafch4");
841  break;
843  switch (fsq->word[0]) {
845  zfcp_fc_test_link(zfcp_sdev->port);
846  /* fall through */
849  break;
850  }
851  break;
852  case FSF_GOOD:
854  break;
855  }
856 }
857 
865 {
866  struct zfcp_fsf_req *req = NULL;
867  struct scsi_device *sdev = scmnd->device;
868  struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
869  struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
870  unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
871 
872  spin_lock_irq(&qdio->req_q_lock);
873  if (zfcp_qdio_sbal_get(qdio))
874  goto out;
875  req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
877  qdio->adapter->pool.scsi_abort);
878  if (IS_ERR(req)) {
879  req = NULL;
880  goto out;
881  }
882 
883  if (unlikely(!(atomic_read(&zfcp_sdev->status) &
885  goto out_error_free;
886 
887  zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
888 
889  req->data = sdev;
890  req->handler = zfcp_fsf_abort_fcp_command_handler;
891  req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
892  req->qtcb->header.port_handle = zfcp_sdev->port->handle;
893  req->qtcb->bottom.support.req_handle = (u64) old_req_id;
894 
895  zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
896  if (!zfcp_fsf_req_send(req))
897  goto out;
898 
899 out_error_free:
900  zfcp_fsf_req_free(req);
901  req = NULL;
902 out:
903  spin_unlock_irq(&qdio->req_q_lock);
904  return req;
905 }
906 
907 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
908 {
909  struct zfcp_adapter *adapter = req->adapter;
910  struct zfcp_fsf_ct_els *ct = req->data;
911  struct fsf_qtcb_header *header = &req->qtcb->header;
912 
913  ct->status = -EINVAL;
914 
915  if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
916  goto skip_fsfstatus;
917 
918  switch (header->fsf_status) {
919  case FSF_GOOD:
920  zfcp_dbf_san_res("fsscth2", req);
921  ct->status = 0;
922  break;
924  zfcp_fsf_class_not_supp(req);
925  break;
927  switch (header->fsf_status_qual.word[0]){
931  break;
932  }
933  break;
934  case FSF_ACCESS_DENIED:
935  break;
936  case FSF_PORT_BOXED:
938  break;
940  zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
941  /* fall through */
946  case FSF_SBAL_MISMATCH:
948  break;
949  }
950 
951 skip_fsfstatus:
952  if (ct->handler)
953  ct->handler(ct->handler_data);
954 }
955 
956 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
957  struct zfcp_qdio_req *q_req,
958  struct scatterlist *sg_req,
959  struct scatterlist *sg_resp)
960 {
961  zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
962  zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
963  zfcp_qdio_set_sbale_last(qdio, q_req);
964 }
965 
966 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
967  struct scatterlist *sg_req,
968  struct scatterlist *sg_resp)
969 {
970  struct zfcp_adapter *adapter = req->adapter;
971  struct zfcp_qdio *qdio = adapter->qdio;
972  struct fsf_qtcb *qtcb = req->qtcb;
973  u32 feat = adapter->adapter_features;
974 
975  if (zfcp_adapter_multi_buffer_active(adapter)) {
976  if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
977  return -EIO;
978  if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
979  return -EIO;
980 
981  zfcp_qdio_set_data_div(qdio, &req->qdio_req,
982  zfcp_qdio_sbale_count(sg_req));
983  zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
984  zfcp_qdio_set_scount(qdio, &req->qdio_req);
985  return 0;
986  }
987 
988  /* use single, unchained SBAL if it can hold the request */
989  if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
990  zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
991  sg_req, sg_resp);
992  return 0;
993  }
994 
995  if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
996  return -EOPNOTSUPP;
997 
998  if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
999  return -EIO;
1000 
1001  qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
1002 
1003  zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1004  zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
1005 
1006  if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1007  return -EIO;
1008 
1009  qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
1010 
1011  zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1012 
1013  return 0;
1014 }
1015 
1016 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1017  struct scatterlist *sg_req,
1018  struct scatterlist *sg_resp,
1019  unsigned int timeout)
1020 {
1021  int ret;
1022 
1023  ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
1024  if (ret)
1025  return ret;
1026 
1027  /* common settings for ct/gs and els requests */
1028  if (timeout > 255)
1029  timeout = 255; /* max value accepted by hardware */
1030  req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1031  req->qtcb->bottom.support.timeout = timeout;
1032  zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
1033 
1034  return 0;
1035 }
1036 
1042 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1043  struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1044  unsigned int timeout)
1045 {
1046  struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1047  struct zfcp_fsf_req *req;
1048  int ret = -EIO;
1049 
1050  spin_lock_irq(&qdio->req_q_lock);
1051  if (zfcp_qdio_sbal_get(qdio))
1052  goto out;
1053 
1054  req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1056 
1057  if (IS_ERR(req)) {
1058  ret = PTR_ERR(req);
1059  goto out;
1060  }
1061 
1063  ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
1064  if (ret)
1065  goto failed_send;
1066 
1067  req->handler = zfcp_fsf_send_ct_handler;
1068  req->qtcb->header.port_handle = wka_port->handle;
1069  req->data = ct;
1070 
1071  zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
1072 
1073  ret = zfcp_fsf_req_send(req);
1074  if (ret)
1075  goto failed_send;
1076 
1077  goto out;
1078 
1079 failed_send:
1080  zfcp_fsf_req_free(req);
1081 out:
1082  spin_unlock_irq(&qdio->req_q_lock);
1083  return ret;
1084 }
1085 
1086 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1087 {
1088  struct zfcp_fsf_ct_els *send_els = req->data;
1089  struct zfcp_port *port = send_els->port;
1090  struct fsf_qtcb_header *header = &req->qtcb->header;
1091 
1092  send_els->status = -EINVAL;
1093 
1094  if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1095  goto skip_fsfstatus;
1096 
1097  switch (header->fsf_status) {
1098  case FSF_GOOD:
1099  zfcp_dbf_san_res("fsselh1", req);
1100  send_els->status = 0;
1101  break;
1103  zfcp_fsf_class_not_supp(req);
1104  break;
1106  switch (header->fsf_status_qual.word[0]){
1111  break;
1112  }
1113  break;
1118  break;
1119  case FSF_ACCESS_DENIED:
1120  if (port) {
1121  zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1123  }
1124  break;
1125  case FSF_SBAL_MISMATCH:
1126  /* should never occur, avoided in zfcp_fsf_send_els */
1127  /* fall through */
1128  default:
1130  break;
1131  }
1132 skip_fsfstatus:
1133  if (send_els->handler)
1134  send_els->handler(send_els->handler_data);
1135 }
1136 
1142  struct zfcp_fsf_ct_els *els, unsigned int timeout)
1143 {
1144  struct zfcp_fsf_req *req;
1145  struct zfcp_qdio *qdio = adapter->qdio;
1146  int ret = -EIO;
1147 
1148  spin_lock_irq(&qdio->req_q_lock);
1149  if (zfcp_qdio_sbal_get(qdio))
1150  goto out;
1151 
1152  req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1154 
1155  if (IS_ERR(req)) {
1156  ret = PTR_ERR(req);
1157  goto out;
1158  }
1159 
1161 
1162  if (!zfcp_adapter_multi_buffer_active(adapter))
1163  zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1164 
1165  ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1166 
1167  if (ret)
1168  goto failed_send;
1169 
1170  hton24(req->qtcb->bottom.support.d_id, d_id);
1171  req->handler = zfcp_fsf_send_els_handler;
1172  req->data = els;
1173 
1174  zfcp_dbf_san_req("fssels1", req, d_id);
1175 
1176  ret = zfcp_fsf_req_send(req);
1177  if (ret)
1178  goto failed_send;
1179 
1180  goto out;
1181 
1182 failed_send:
1183  zfcp_fsf_req_free(req);
1184 out:
1185  spin_unlock_irq(&qdio->req_q_lock);
1186  return ret;
1187 }
1188 
1190 {
1191  struct zfcp_fsf_req *req;
1192  struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1193  int retval = -EIO;
1194 
1195  spin_lock_irq(&qdio->req_q_lock);
1196  if (zfcp_qdio_sbal_get(qdio))
1197  goto out;
1198 
1199  req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1201  qdio->adapter->pool.erp_req);
1202 
1203  if (IS_ERR(req)) {
1204  retval = PTR_ERR(req);
1205  goto out;
1206  }
1207 
1209  zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1210 
1211  req->qtcb->bottom.config.feature_selection =
1216  req->erp_action = erp_action;
1217  req->handler = zfcp_fsf_exchange_config_data_handler;
1218  erp_action->fsf_req_id = req->req_id;
1219 
1220  zfcp_fsf_start_erp_timer(req);
1221  retval = zfcp_fsf_req_send(req);
1222  if (retval) {
1223  zfcp_fsf_req_free(req);
1224  erp_action->fsf_req_id = 0;
1225  }
1226 out:
1227  spin_unlock_irq(&qdio->req_q_lock);
1228  return retval;
1229 }
1230 
1232  struct fsf_qtcb_bottom_config *data)
1233 {
1234  struct zfcp_fsf_req *req = NULL;
1235  int retval = -EIO;
1236 
1237  spin_lock_irq(&qdio->req_q_lock);
1238  if (zfcp_qdio_sbal_get(qdio))
1239  goto out_unlock;
1240 
1241  req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1243 
1244  if (IS_ERR(req)) {
1245  retval = PTR_ERR(req);
1246  goto out_unlock;
1247  }
1248 
1249  zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1250  req->handler = zfcp_fsf_exchange_config_data_handler;
1251 
1252  req->qtcb->bottom.config.feature_selection =
1257 
1258  if (data)
1259  req->data = data;
1260 
1261  zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1262  retval = zfcp_fsf_req_send(req);
1263  spin_unlock_irq(&qdio->req_q_lock);
1264  if (!retval)
1266 
1267  zfcp_fsf_req_free(req);
1268  return retval;
1269 
1270 out_unlock:
1271  spin_unlock_irq(&qdio->req_q_lock);
1272  return retval;
1273 }
1274 
1281 {
1282  struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1283  struct zfcp_fsf_req *req;
1284  int retval = -EIO;
1285 
1286  if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1287  return -EOPNOTSUPP;
1288 
1289  spin_lock_irq(&qdio->req_q_lock);
1290  if (zfcp_qdio_sbal_get(qdio))
1291  goto out;
1292 
1293  req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1295  qdio->adapter->pool.erp_req);
1296 
1297  if (IS_ERR(req)) {
1298  retval = PTR_ERR(req);
1299  goto out;
1300  }
1301 
1303  zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1304 
1305  req->handler = zfcp_fsf_exchange_port_data_handler;
1306  req->erp_action = erp_action;
1307  erp_action->fsf_req_id = req->req_id;
1308 
1309  zfcp_fsf_start_erp_timer(req);
1310  retval = zfcp_fsf_req_send(req);
1311  if (retval) {
1312  zfcp_fsf_req_free(req);
1313  erp_action->fsf_req_id = 0;
1314  }
1315 out:
1316  spin_unlock_irq(&qdio->req_q_lock);
1317  return retval;
1318 }
1319 
1327  struct fsf_qtcb_bottom_port *data)
1328 {
1329  struct zfcp_fsf_req *req = NULL;
1330  int retval = -EIO;
1331 
1332  if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1333  return -EOPNOTSUPP;
1334 
1335  spin_lock_irq(&qdio->req_q_lock);
1336  if (zfcp_qdio_sbal_get(qdio))
1337  goto out_unlock;
1338 
1339  req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1341 
1342  if (IS_ERR(req)) {
1343  retval = PTR_ERR(req);
1344  goto out_unlock;
1345  }
1346 
1347  if (data)
1348  req->data = data;
1349 
1350  zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1351 
1352  req->handler = zfcp_fsf_exchange_port_data_handler;
1353  zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1354  retval = zfcp_fsf_req_send(req);
1355  spin_unlock_irq(&qdio->req_q_lock);
1356 
1357  if (!retval)
1359 
1360  zfcp_fsf_req_free(req);
1361 
1362  return retval;
1363 
1364 out_unlock:
1365  spin_unlock_irq(&qdio->req_q_lock);
1366  return retval;
1367 }
1368 
1369 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1370 {
1371  struct zfcp_port *port = req->data;
1372  struct fsf_qtcb_header *header = &req->qtcb->header;
1373  struct fc_els_flogi *plogi;
1374 
1375  if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1376  goto out;
1377 
1378  switch (header->fsf_status) {
1379  case FSF_PORT_ALREADY_OPEN:
1380  break;
1381  case FSF_ACCESS_DENIED:
1382  zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1384  break;
1386  dev_warn(&req->adapter->ccw_device->dev,
1387  "Not enough FCP adapter resources to open "
1388  "remote port 0x%016Lx\n",
1389  (unsigned long long)port->wwpn);
1393  break;
1395  switch (header->fsf_status_qual.word[0]) {
1400  break;
1401  }
1402  break;
1403  case FSF_GOOD:
1404  port->handle = header->port_handle;
1409  &port->status);
1410  /* check whether D_ID has changed during open */
1411  /*
1412  * FIXME: This check is not airtight, as the FCP channel does
1413  * not monitor closures of target port connections caused on
1414  * the remote side. Thus, they might miss out on invalidating
1415  * locally cached WWPNs (and other N_Port parameters) of gone
1416  * target ports. So, our heroic attempt to make things safe
1417  * could be undermined by 'open port' response data tagged with
1418  * obsolete WWPNs. Another reason to monitor potential
1419  * connection closures ourself at least (by interpreting
1420  * incoming ELS' and unsolicited status). It just crosses my
1421  * mind that one should be able to cross-check by means of
1422  * another GID_PN straight after a port has been opened.
1423  * Alternately, an ADISC/PDISC ELS should suffice, as well.
1424  */
1425  plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
1426  if (req->qtcb->bottom.support.els1_length >=
1428  zfcp_fc_plogi_evaluate(port, plogi);
1429  break;
1432  break;
1433  }
1434 
1435 out:
1436  put_device(&port->dev);
1437 }
1438 
1444 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1445 {
1446  struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1447  struct zfcp_port *port = erp_action->port;
1448  struct zfcp_fsf_req *req;
1449  int retval = -EIO;
1450 
1451  spin_lock_irq(&qdio->req_q_lock);
1452  if (zfcp_qdio_sbal_get(qdio))
1453  goto out;
1454 
1455  req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1457  qdio->adapter->pool.erp_req);
1458 
1459  if (IS_ERR(req)) {
1460  retval = PTR_ERR(req);
1461  goto out;
1462  }
1463 
1465  zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1466 
1467  req->handler = zfcp_fsf_open_port_handler;
1468  hton24(req->qtcb->bottom.support.d_id, port->d_id);
1469  req->data = port;
1470  req->erp_action = erp_action;
1471  erp_action->fsf_req_id = req->req_id;
1472  get_device(&port->dev);
1473 
1474  zfcp_fsf_start_erp_timer(req);
1475  retval = zfcp_fsf_req_send(req);
1476  if (retval) {
1477  zfcp_fsf_req_free(req);
1478  erp_action->fsf_req_id = 0;
1479  put_device(&port->dev);
1480  }
1481 out:
1482  spin_unlock_irq(&qdio->req_q_lock);
1483  return retval;
1484 }
1485 
1486 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1487 {
1488  struct zfcp_port *port = req->data;
1489 
1490  if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1491  return;
1492 
1493  switch (req->qtcb->header.fsf_status) {
1495  zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
1497  break;
1499  break;
1500  case FSF_GOOD:
1502  break;
1503  }
1504 }
1505 
1512 {
1513  struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1514  struct zfcp_fsf_req *req;
1515  int retval = -EIO;
1516 
1517  spin_lock_irq(&qdio->req_q_lock);
1518  if (zfcp_qdio_sbal_get(qdio))
1519  goto out;
1520 
1521  req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1523  qdio->adapter->pool.erp_req);
1524 
1525  if (IS_ERR(req)) {
1526  retval = PTR_ERR(req);
1527  goto out;
1528  }
1529 
1531  zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1532 
1533  req->handler = zfcp_fsf_close_port_handler;
1534  req->data = erp_action->port;
1535  req->erp_action = erp_action;
1536  req->qtcb->header.port_handle = erp_action->port->handle;
1537  erp_action->fsf_req_id = req->req_id;
1538 
1539  zfcp_fsf_start_erp_timer(req);
1540  retval = zfcp_fsf_req_send(req);
1541  if (retval) {
1542  zfcp_fsf_req_free(req);
1543  erp_action->fsf_req_id = 0;
1544  }
1545 out:
1546  spin_unlock_irq(&qdio->req_q_lock);
1547  return retval;
1548 }
1549 
1550 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1551 {
1552  struct zfcp_fc_wka_port *wka_port = req->data;
1553  struct fsf_qtcb_header *header = &req->qtcb->header;
1554 
1555  if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1556  wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1557  goto out;
1558  }
1559 
1560  switch (header->fsf_status) {
1562  dev_warn(&req->adapter->ccw_device->dev,
1563  "Opening WKA port 0x%x failed\n", wka_port->d_id);
1564  /* fall through */
1567  /* fall through */
1568  case FSF_ACCESS_DENIED:
1569  wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1570  break;
1571  case FSF_GOOD:
1572  wka_port->handle = header->port_handle;
1573  /* fall through */
1574  case FSF_PORT_ALREADY_OPEN:
1575  wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1576  }
1577 out:
1578  wake_up(&wka_port->completion_wq);
1579 }
1580 
1586 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1587 {
1588  struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1589  struct zfcp_fsf_req *req;
1590  int retval = -EIO;
1591 
1592  spin_lock_irq(&qdio->req_q_lock);
1593  if (zfcp_qdio_sbal_get(qdio))
1594  goto out;
1595 
1596  req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1598  qdio->adapter->pool.erp_req);
1599 
1600  if (IS_ERR(req)) {
1601  retval = PTR_ERR(req);
1602  goto out;
1603  }
1604 
1606  zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1607 
1608  req->handler = zfcp_fsf_open_wka_port_handler;
1609  hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1610  req->data = wka_port;
1611 
1612  zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1613  retval = zfcp_fsf_req_send(req);
1614  if (retval)
1615  zfcp_fsf_req_free(req);
1616 out:
1617  spin_unlock_irq(&qdio->req_q_lock);
1618  return retval;
1619 }
1620 
1621 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1622 {
1623  struct zfcp_fc_wka_port *wka_port = req->data;
1624 
1625  if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1627  zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
1628  }
1629 
1630  wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1631  wake_up(&wka_port->completion_wq);
1632 }
1633 
1639 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1640 {
1641  struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1642  struct zfcp_fsf_req *req;
1643  int retval = -EIO;
1644 
1645  spin_lock_irq(&qdio->req_q_lock);
1646  if (zfcp_qdio_sbal_get(qdio))
1647  goto out;
1648 
1649  req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1651  qdio->adapter->pool.erp_req);
1652 
1653  if (IS_ERR(req)) {
1654  retval = PTR_ERR(req);
1655  goto out;
1656  }
1657 
1659  zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1660 
1661  req->handler = zfcp_fsf_close_wka_port_handler;
1662  req->data = wka_port;
1663  req->qtcb->header.port_handle = wka_port->handle;
1664 
1665  zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1666  retval = zfcp_fsf_req_send(req);
1667  if (retval)
1668  zfcp_fsf_req_free(req);
1669 out:
1670  spin_unlock_irq(&qdio->req_q_lock);
1671  return retval;
1672 }
1673 
1674 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1675 {
1676  struct zfcp_port *port = req->data;
1677  struct fsf_qtcb_header *header = &req->qtcb->header;
1678  struct scsi_device *sdev;
1679 
1680  if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1681  return;
1682 
1683  switch (header->fsf_status) {
1685  zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
1687  break;
1688  case FSF_ACCESS_DENIED:
1689  zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1690  break;
1691  case FSF_PORT_BOXED:
1692  /* can't use generic zfcp_erp_modify_port_status because
1693  * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1695  shost_for_each_device(sdev, port->adapter->scsi_host)
1696  if (sdev_to_zfcp(sdev)->port == port)
1698  &sdev_to_zfcp(sdev)->status);
1701  "fscpph2");
1702  req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1703  break;
1705  switch (header->fsf_status_qual.word[0]) {
1707  /* fall through */
1710  break;
1711  }
1712  break;
1713  case FSF_GOOD:
1714  /* can't use generic zfcp_erp_modify_port_status because
1715  * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1716  */
1718  shost_for_each_device(sdev, port->adapter->scsi_host)
1719  if (sdev_to_zfcp(sdev)->port == port)
1721  &sdev_to_zfcp(sdev)->status);
1722  break;
1723  }
1724 }
1725 
1732 {
1733  struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1734  struct zfcp_fsf_req *req;
1735  int retval = -EIO;
1736 
1737  spin_lock_irq(&qdio->req_q_lock);
1738  if (zfcp_qdio_sbal_get(qdio))
1739  goto out;
1740 
1741  req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1743  qdio->adapter->pool.erp_req);
1744 
1745  if (IS_ERR(req)) {
1746  retval = PTR_ERR(req);
1747  goto out;
1748  }
1749 
1751  zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1752 
1753  req->data = erp_action->port;
1754  req->qtcb->header.port_handle = erp_action->port->handle;
1755  req->erp_action = erp_action;
1756  req->handler = zfcp_fsf_close_physical_port_handler;
1757  erp_action->fsf_req_id = req->req_id;
1758 
1759  zfcp_fsf_start_erp_timer(req);
1760  retval = zfcp_fsf_req_send(req);
1761  if (retval) {
1762  zfcp_fsf_req_free(req);
1763  erp_action->fsf_req_id = 0;
1764  }
1765 out:
1766  spin_unlock_irq(&qdio->req_q_lock);
1767  return retval;
1768 }
1769 
1770 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1771 {
1772  struct zfcp_adapter *adapter = req->adapter;
1773  struct scsi_device *sdev = req->data;
1774  struct zfcp_scsi_dev *zfcp_sdev;
1775  struct fsf_qtcb_header *header = &req->qtcb->header;
1776  struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1777 
1778  if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1779  return;
1780 
1781  zfcp_sdev = sdev_to_zfcp(sdev);
1782 
1787  &zfcp_sdev->status);
1788 
1789  switch (header->fsf_status) {
1790 
1792  zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
1793  /* fall through */
1794  case FSF_LUN_ALREADY_OPEN:
1795  break;
1796  case FSF_ACCESS_DENIED:
1797  zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
1799  break;
1800  case FSF_PORT_BOXED:
1801  zfcp_erp_set_port_status(zfcp_sdev->port,
1803  zfcp_erp_port_reopen(zfcp_sdev->port,
1804  ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
1806  break;
1808  zfcp_cfdc_lun_shrng_vltn(sdev, &header->fsf_status_qual);
1810  break;
1812  dev_warn(&adapter->ccw_device->dev,
1813  "No handle is available for LUN "
1814  "0x%016Lx on port 0x%016Lx\n",
1815  (unsigned long long)zfcp_scsi_dev_lun(sdev),
1816  (unsigned long long)zfcp_sdev->port->wwpn);
1818  /* fall through */
1821  break;
1823  switch (header->fsf_status_qual.word[0]) {
1825  zfcp_fc_test_link(zfcp_sdev->port);
1826  /* fall through */
1829  break;
1830  }
1831  break;
1832 
1833  case FSF_GOOD:
1834  zfcp_sdev->lun_handle = header->lun_handle;
1836  zfcp_cfdc_open_lun_eval(sdev, bottom);
1837  break;
1838  }
1839 }
1840 
1846 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
1847 {
1848  struct zfcp_adapter *adapter = erp_action->adapter;
1849  struct zfcp_qdio *qdio = adapter->qdio;
1850  struct zfcp_fsf_req *req;
1851  int retval = -EIO;
1852 
1853  spin_lock_irq(&qdio->req_q_lock);
1854  if (zfcp_qdio_sbal_get(qdio))
1855  goto out;
1856 
1857  req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1859  adapter->pool.erp_req);
1860 
1861  if (IS_ERR(req)) {
1862  retval = PTR_ERR(req);
1863  goto out;
1864  }
1865 
1867  zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1868 
1869  req->qtcb->header.port_handle = erp_action->port->handle;
1870  req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
1871  req->handler = zfcp_fsf_open_lun_handler;
1872  req->data = erp_action->sdev;
1873  req->erp_action = erp_action;
1874  erp_action->fsf_req_id = req->req_id;
1875 
1876  if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1877  req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1878 
1879  zfcp_fsf_start_erp_timer(req);
1880  retval = zfcp_fsf_req_send(req);
1881  if (retval) {
1882  zfcp_fsf_req_free(req);
1883  erp_action->fsf_req_id = 0;
1884  }
1885 out:
1886  spin_unlock_irq(&qdio->req_q_lock);
1887  return retval;
1888 }
1889 
1890 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1891 {
1892  struct scsi_device *sdev = req->data;
1893  struct zfcp_scsi_dev *zfcp_sdev;
1894 
1895  if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1896  return;
1897 
1898  zfcp_sdev = sdev_to_zfcp(sdev);
1899 
1900  switch (req->qtcb->header.fsf_status) {
1902  zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
1904  break;
1906  zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
1908  break;
1909  case FSF_PORT_BOXED:
1910  zfcp_erp_set_port_status(zfcp_sdev->port,
1912  zfcp_erp_port_reopen(zfcp_sdev->port,
1913  ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
1915  break;
1917  switch (req->qtcb->header.fsf_status_qual.word[0]) {
1919  zfcp_fc_test_link(zfcp_sdev->port);
1920  /* fall through */
1923  break;
1924  }
1925  break;
1926  case FSF_GOOD:
1928  break;
1929  }
1930 }
1931 
1938 {
1939  struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1940  struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
1941  struct zfcp_fsf_req *req;
1942  int retval = -EIO;
1943 
1944  spin_lock_irq(&qdio->req_q_lock);
1945  if (zfcp_qdio_sbal_get(qdio))
1946  goto out;
1947 
1948  req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
1950  qdio->adapter->pool.erp_req);
1951 
1952  if (IS_ERR(req)) {
1953  retval = PTR_ERR(req);
1954  goto out;
1955  }
1956 
1958  zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1959 
1960  req->qtcb->header.port_handle = erp_action->port->handle;
1961  req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
1962  req->handler = zfcp_fsf_close_lun_handler;
1963  req->data = erp_action->sdev;
1964  req->erp_action = erp_action;
1965  erp_action->fsf_req_id = req->req_id;
1966 
1967  zfcp_fsf_start_erp_timer(req);
1968  retval = zfcp_fsf_req_send(req);
1969  if (retval) {
1970  zfcp_fsf_req_free(req);
1971  erp_action->fsf_req_id = 0;
1972  }
1973 out:
1974  spin_unlock_irq(&qdio->req_q_lock);
1975  return retval;
1976 }
1977 
1978 static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
1979 {
1980  lat_rec->sum += lat;
1981  lat_rec->min = min(lat_rec->min, lat);
1982  lat_rec->max = max(lat_rec->max, lat);
1983 }
1984 
1985 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
1986 {
1987  struct fsf_qual_latency_info *lat_in;
1988  struct latency_cont *lat = NULL;
1989  struct zfcp_scsi_dev *zfcp_sdev;
1990  struct zfcp_blk_drv_data blktrc;
1991  int ticks = req->adapter->timer_ticks;
1992 
1993  lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
1994 
1995  blktrc.flags = 0;
1996  blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
1997  if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1998  blktrc.flags |= ZFCP_BLK_REQ_ERROR;
1999  blktrc.inb_usage = 0;
2000  blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
2001 
2002  if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
2003  !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2004  zfcp_sdev = sdev_to_zfcp(scsi->device);
2005  blktrc.flags |= ZFCP_BLK_LAT_VALID;
2006  blktrc.channel_lat = lat_in->channel_lat * ticks;
2007  blktrc.fabric_lat = lat_in->fabric_lat * ticks;
2008 
2009  switch (req->qtcb->bottom.io.data_direction) {
2012  case FSF_DATADIR_READ:
2013  lat = &zfcp_sdev->latencies.read;
2014  break;
2017  case FSF_DATADIR_WRITE:
2018  lat = &zfcp_sdev->latencies.write;
2019  break;
2020  case FSF_DATADIR_CMND:
2021  lat = &zfcp_sdev->latencies.cmd;
2022  break;
2023  }
2024 
2025  if (lat) {
2026  spin_lock(&zfcp_sdev->latencies.lock);
2027  zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2028  zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2029  lat->counter++;
2030  spin_unlock(&zfcp_sdev->latencies.lock);
2031  }
2032  }
2033 
2034  blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
2035  sizeof(blktrc));
2036 }
2037 
2038 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
2039 {
2040  struct scsi_cmnd *scmnd = req->data;
2041  struct scsi_device *sdev = scmnd->device;
2042  struct zfcp_scsi_dev *zfcp_sdev;
2043  struct fsf_qtcb_header *header = &req->qtcb->header;
2044 
2046  return;
2047 
2048  zfcp_sdev = sdev_to_zfcp(sdev);
2049 
2050  switch (header->fsf_status) {
2051  case FSF_HANDLE_MISMATCH:
2053  zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1");
2055  break;
2056  case FSF_FCPLUN_NOT_VALID:
2058  zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
2060  break;
2062  zfcp_fsf_class_not_supp(req);
2063  break;
2064  case FSF_ACCESS_DENIED:
2065  zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
2067  break;
2069  dev_err(&req->adapter->ccw_device->dev,
2070  "Incorrect direction %d, LUN 0x%016Lx on port "
2071  "0x%016Lx closed\n",
2072  req->qtcb->bottom.io.data_direction,
2073  (unsigned long long)zfcp_scsi_dev_lun(sdev),
2074  (unsigned long long)zfcp_sdev->port->wwpn);
2075  zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2076  "fssfch3");
2078  break;
2080  dev_err(&req->adapter->ccw_device->dev,
2081  "Incorrect CDB length %d, LUN 0x%016Lx on "
2082  "port 0x%016Lx closed\n",
2083  req->qtcb->bottom.io.fcp_cmnd_length,
2084  (unsigned long long)zfcp_scsi_dev_lun(sdev),
2085  (unsigned long long)zfcp_sdev->port->wwpn);
2086  zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2087  "fssfch4");
2089  break;
2090  case FSF_PORT_BOXED:
2091  zfcp_erp_set_port_status(zfcp_sdev->port,
2093  zfcp_erp_port_reopen(zfcp_sdev->port,
2094  ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
2096  break;
2097  case FSF_LUN_BOXED:
2100  "fssfch6");
2102  break;
2104  if (header->fsf_status_qual.word[0] ==
2106  zfcp_fc_test_link(zfcp_sdev->port);
2108  break;
2109  }
2110 }
2111 
2112 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2113 {
2114  struct scsi_cmnd *scpnt;
2115  struct fcp_resp_with_ext *fcp_rsp;
2116  unsigned long flags;
2117 
2118  read_lock_irqsave(&req->adapter->abort_lock, flags);
2119 
2120  scpnt = req->data;
2121  if (unlikely(!scpnt)) {
2122  read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2123  return;
2124  }
2125 
2126  zfcp_fsf_fcp_handler_common(req);
2127 
2129  set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2130  goto skip_fsfstatus;
2131  }
2132 
2133  switch (req->qtcb->header.fsf_status) {
2135  case FSF_INVALID_PROT_PARM:
2136  set_host_byte(scpnt, DID_ERROR);
2137  goto skip_fsfstatus;
2139  zfcp_scsi_dif_sense_error(scpnt, 0x1);
2140  goto skip_fsfstatus;
2142  zfcp_scsi_dif_sense_error(scpnt, 0x2);
2143  goto skip_fsfstatus;
2145  zfcp_scsi_dif_sense_error(scpnt, 0x3);
2146  goto skip_fsfstatus;
2147  }
2148  fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2149  zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2150 
2151 skip_fsfstatus:
2152  zfcp_fsf_req_trace(req, scpnt);
2153  zfcp_dbf_scsi_result(scpnt, req);
2154 
2155  scpnt->host_scribble = NULL;
2156  (scpnt->scsi_done) (scpnt);
2157  /*
2158  * We must hold this lock until scsi_done has been called.
2159  * Otherwise we may call scsi_done after abort regarding this
2160  * command has completed.
2161  * Note: scsi_done must not block!
2162  */
2163  read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2164 }
2165 
2166 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2167 {
2168  switch (scsi_get_prot_op(scsi_cmnd)) {
2169  case SCSI_PROT_NORMAL:
2170  switch (scsi_cmnd->sc_data_direction) {
2171  case DMA_NONE:
2172  *data_dir = FSF_DATADIR_CMND;
2173  break;
2174  case DMA_FROM_DEVICE:
2175  *data_dir = FSF_DATADIR_READ;
2176  break;
2177  case DMA_TO_DEVICE:
2178  *data_dir = FSF_DATADIR_WRITE;
2179  break;
2180  case DMA_BIDIRECTIONAL:
2181  return -EINVAL;
2182  }
2183  break;
2184 
2185  case SCSI_PROT_READ_STRIP:
2186  *data_dir = FSF_DATADIR_DIF_READ_STRIP;
2187  break;
2189  *data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
2190  break;
2191  case SCSI_PROT_READ_PASS:
2192  *data_dir = FSF_DATADIR_DIF_READ_CONVERT;
2193  break;
2194  case SCSI_PROT_WRITE_PASS:
2195  *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
2196  break;
2197  default:
2198  return -EINVAL;
2199  }
2200 
2201  return 0;
2202 }
2203 
2208 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2209 {
2210  struct zfcp_fsf_req *req;
2211  struct fcp_cmnd *fcp_cmnd;
2212  u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2213  int retval = -EIO;
2214  struct scsi_device *sdev = scsi_cmnd->device;
2215  struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2216  struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2217  struct zfcp_qdio *qdio = adapter->qdio;
2218  struct fsf_qtcb_bottom_io *io;
2219  unsigned long flags;
2220 
2221  if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2223  return -EBUSY;
2224 
2225  spin_lock_irqsave(&qdio->req_q_lock, flags);
2226  if (atomic_read(&qdio->req_q_free) <= 0) {
2227  atomic_inc(&qdio->req_q_full);
2228  goto out;
2229  }
2230 
2231  if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2232  sbtype = SBAL_SFLAGS0_TYPE_WRITE;
2233 
2234  req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2235  sbtype, adapter->pool.scsi_req);
2236 
2237  if (IS_ERR(req)) {
2238  retval = PTR_ERR(req);
2239  goto out;
2240  }
2241 
2242  scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2243 
2244  io = &req->qtcb->bottom.io;
2246  req->data = scsi_cmnd;
2247  req->handler = zfcp_fsf_fcp_cmnd_handler;
2248  req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2249  req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2250  io->service_class = FSF_CLASS_3;
2252 
2253  if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
2254  io->data_block_length = scsi_cmnd->device->sector_size;
2255  io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
2256  }
2257 
2258  if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
2259  goto failed_scsi_cmnd;
2260 
2261  fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2262  zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
2263 
2264  if (scsi_prot_sg_count(scsi_cmnd)) {
2265  zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2266  scsi_prot_sg_count(scsi_cmnd));
2267  retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2268  scsi_prot_sglist(scsi_cmnd));
2269  if (retval)
2270  goto failed_scsi_cmnd;
2271  io->prot_data_length = zfcp_qdio_real_bytes(
2272  scsi_prot_sglist(scsi_cmnd));
2273  }
2274 
2275  retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2276  scsi_sglist(scsi_cmnd));
2277  if (unlikely(retval))
2278  goto failed_scsi_cmnd;
2279 
2280  zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2281  if (zfcp_adapter_multi_buffer_active(adapter))
2282  zfcp_qdio_set_scount(qdio, &req->qdio_req);
2283 
2284  retval = zfcp_fsf_req_send(req);
2285  if (unlikely(retval))
2286  goto failed_scsi_cmnd;
2287 
2288  goto out;
2289 
2290 failed_scsi_cmnd:
2291  zfcp_fsf_req_free(req);
2292  scsi_cmnd->host_scribble = NULL;
2293 out:
2294  spin_unlock_irqrestore(&qdio->req_q_lock, flags);
2295  return retval;
2296 }
2297 
2298 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2299 {
2300  struct fcp_resp_with_ext *fcp_rsp;
2301  struct fcp_resp_rsp_info *rsp_info;
2302 
2303  zfcp_fsf_fcp_handler_common(req);
2304 
2305  fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2306  rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2307 
2308  if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2311 }
2312 
2319 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
2320  u8 tm_flags)
2321 {
2322  struct zfcp_fsf_req *req = NULL;
2323  struct fcp_cmnd *fcp_cmnd;
2324  struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
2325  struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2326 
2327  if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2329  return NULL;
2330 
2331  spin_lock_irq(&qdio->req_q_lock);
2332  if (zfcp_qdio_sbal_get(qdio))
2333  goto out;
2334 
2335  req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2337  qdio->adapter->pool.scsi_req);
2338 
2339  if (IS_ERR(req)) {
2340  req = NULL;
2341  goto out;
2342  }
2343 
2344  req->data = scmnd;
2345  req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2346  req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2347  req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2348  req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2349  req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2350  req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2351 
2352  zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2353 
2354  fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2355  zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
2356 
2357  zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2358  if (!zfcp_fsf_req_send(req))
2359  goto out;
2360 
2361  zfcp_fsf_req_free(req);
2362  req = NULL;
2363 out:
2364  spin_unlock_irq(&qdio->req_q_lock);
2365  return req;
2366 }
2367 
2368 static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
2369 {
2370 }
2371 
2379  struct zfcp_fsf_cfdc *fsf_cfdc)
2380 {
2381  struct zfcp_qdio *qdio = adapter->qdio;
2382  struct zfcp_fsf_req *req = NULL;
2384  int retval = -EIO;
2385  u8 direction;
2386 
2387  if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
2388  return ERR_PTR(-EOPNOTSUPP);
2389 
2390  switch (fsf_cfdc->command) {
2392  direction = SBAL_SFLAGS0_TYPE_WRITE;
2393  break;
2395  direction = SBAL_SFLAGS0_TYPE_READ;
2396  break;
2397  default:
2398  return ERR_PTR(-EINVAL);
2399  }
2400 
2401  spin_lock_irq(&qdio->req_q_lock);
2402  if (zfcp_qdio_sbal_get(qdio))
2403  goto out;
2404 
2405  req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, direction, NULL);
2406  if (IS_ERR(req)) {
2407  retval = -EPERM;
2408  goto out;
2409  }
2410 
2411  req->handler = zfcp_fsf_control_file_handler;
2412 
2413  bottom = &req->qtcb->bottom.support;
2415  bottom->option = fsf_cfdc->option;
2416 
2417  retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg);
2418 
2419  if (retval ||
2420  (zfcp_qdio_real_bytes(fsf_cfdc->sg) != ZFCP_CFDC_MAX_SIZE)) {
2421  zfcp_fsf_req_free(req);
2422  retval = -EIO;
2423  goto out;
2424  }
2425  zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2426  if (zfcp_adapter_multi_buffer_active(adapter))
2427  zfcp_qdio_set_scount(qdio, &req->qdio_req);
2428 
2429  zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2430  retval = zfcp_fsf_req_send(req);
2431 out:
2432  spin_unlock_irq(&qdio->req_q_lock);
2433 
2434  if (!retval) {
2436  return req;
2437  }
2438  return ERR_PTR(retval);
2439 }
2440 
2446 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2447 {
2448  struct zfcp_adapter *adapter = qdio->adapter;
2449  struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
2450  struct qdio_buffer_element *sbale;
2451  struct zfcp_fsf_req *fsf_req;
2452  unsigned long req_id;
2453  int idx;
2454 
2455  for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2456 
2457  sbale = &sbal->element[idx];
2458  req_id = (unsigned long) sbale->addr;
2459  fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2460 
2461  if (!fsf_req) {
2462  /*
2463  * Unknown request means that we have potentially memory
2464  * corruption and must stop the machine immediately.
2465  */
2466  zfcp_qdio_siosl(adapter);
2467  panic("error: unknown req_id (%lx) on adapter %s.\n",
2468  req_id, dev_name(&adapter->ccw_device->dev));
2469  }
2470 
2471  fsf_req->qdio_req.sbal_response = sbal_idx;
2472  zfcp_fsf_req_complete(fsf_req);
2473 
2474  if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
2475  break;
2476  }
2477 }
2478 
2480  struct qdio_buffer *sbal)
2481 {
2482  struct qdio_buffer_element *sbale = &sbal->element[0];
2483  u64 req_id = (unsigned long) sbale->addr;
2484 
2485  return zfcp_reqlist_find(qdio->adapter->req_list, req_id);
2486 }