Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
request.c
Go to the documentation of this file.
1 /*
2  * This file is provided under a dual BSD/GPLv2 license. When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  * * Redistributions of source code must retain the above copyright
34  * notice, this list of conditions and the following disclaimer.
35  * * Redistributions in binary form must reproduce the above copyright
36  * notice, this list of conditions and the following disclaimer in
37  * the documentation and/or other materials provided with the
38  * distribution.
39  * * Neither the name of Intel Corporation nor the names of its
40  * contributors may be used to endorse or promote products derived
41  * from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55 
56 #include <scsi/scsi_cmnd.h>
57 #include "isci.h"
58 #include "task.h"
59 #include "request.h"
60 #include "scu_completion_codes.h"
61 #include "scu_event_codes.h"
62 #include "sas.h"
63 
64 #undef C
65 #define C(a) (#a)
66 const char *req_state_name(enum sci_base_request_states state)
67 {
68  static const char * const strings[] = REQUEST_STATES;
69 
70  return strings[state];
71 }
72 #undef C
73 
74 static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
75  int idx)
76 {
77  if (idx == 0)
78  return &ireq->tc->sgl_pair_ab;
79  else if (idx == 1)
80  return &ireq->tc->sgl_pair_cd;
81  else if (idx < 0)
82  return NULL;
83  else
84  return &ireq->sg_table[idx - 2];
85 }
86 
87 static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
88  struct isci_request *ireq, u32 idx)
89 {
90  u32 offset;
91 
92  if (idx == 0) {
93  offset = (void *) &ireq->tc->sgl_pair_ab -
94  (void *) &ihost->task_context_table[0];
95  return ihost->tc_dma + offset;
96  } else if (idx == 1) {
97  offset = (void *) &ireq->tc->sgl_pair_cd -
98  (void *) &ihost->task_context_table[0];
99  return ihost->tc_dma + offset;
100  }
101 
102  return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
103 }
104 
105 static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
106 {
107  e->length = sg_dma_len(sg);
110  e->address_modifier = 0;
111 }
112 
113 static void sci_request_build_sgl(struct isci_request *ireq)
114 {
115  struct isci_host *ihost = ireq->isci_host;
116  struct sas_task *task = isci_request_access_task(ireq);
117  struct scatterlist *sg = NULL;
119  u32 sg_idx = 0;
120  struct scu_sgl_element_pair *scu_sg = NULL;
121  struct scu_sgl_element_pair *prev_sg = NULL;
122 
123  if (task->num_scatter > 0) {
124  sg = task->scatter;
125 
126  while (sg) {
127  scu_sg = to_sgl_element_pair(ireq, sg_idx);
128  init_sgl_element(&scu_sg->A, sg);
129  sg = sg_next(sg);
130  if (sg) {
131  init_sgl_element(&scu_sg->B, sg);
132  sg = sg_next(sg);
133  } else
134  memset(&scu_sg->B, 0, sizeof(scu_sg->B));
135 
136  if (prev_sg) {
137  dma_addr = to_sgl_element_pair_dma(ihost,
138  ireq,
139  sg_idx);
140 
141  prev_sg->next_pair_upper =
142  upper_32_bits(dma_addr);
143  prev_sg->next_pair_lower =
144  lower_32_bits(dma_addr);
145  }
146 
147  prev_sg = scu_sg;
148  sg_idx++;
149  }
150  } else { /* handle when no sg */
151  scu_sg = to_sgl_element_pair(ireq, sg_idx);
152 
153  dma_addr = dma_map_single(&ihost->pdev->dev,
154  task->scatter,
155  task->total_xfer_len,
156  task->data_dir);
157 
159 
160  scu_sg->A.length = task->total_xfer_len;
161  scu_sg->A.address_upper = upper_32_bits(dma_addr);
162  scu_sg->A.address_lower = lower_32_bits(dma_addr);
163  }
164 
165  if (scu_sg) {
166  scu_sg->next_pair_upper = 0;
167  scu_sg->next_pair_lower = 0;
168  }
169 }
170 
171 static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
172 {
173  struct ssp_cmd_iu *cmd_iu;
174  struct sas_task *task = isci_request_access_task(ireq);
175 
176  cmd_iu = &ireq->ssp.cmd;
177 
178  memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
179  cmd_iu->add_cdb_len = 0;
180  cmd_iu->_r_a = 0;
181  cmd_iu->_r_b = 0;
182  cmd_iu->en_fburst = 0; /* unsupported */
183  cmd_iu->task_prio = task->ssp_task.task_prio;
184  cmd_iu->task_attr = task->ssp_task.task_attr;
185  cmd_iu->_r_c = 0;
186 
187  sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
188  sizeof(task->ssp_task.cdb) / sizeof(u32));
189 }
190 
191 static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
192 {
193  struct ssp_task_iu *task_iu;
194  struct sas_task *task = isci_request_access_task(ireq);
195  struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
196 
197  task_iu = &ireq->ssp.tmf;
198 
199  memset(task_iu, 0, sizeof(struct ssp_task_iu));
200 
201  memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
202 
203  task_iu->task_func = isci_tmf->tmf_code;
204  task_iu->task_tag =
205  (test_bit(IREQ_TMF, &ireq->flags)) ?
206  isci_tmf->io_tag :
208 }
209 
216 static void scu_ssp_reqeust_construct_task_context(
217  struct isci_request *ireq,
219 {
221  struct isci_remote_device *idev;
222  struct isci_port *iport;
223 
224  idev = ireq->target_device;
225  iport = idev->owning_port;
226 
227  /* Fill in the TC with the its required data */
228  task_context->abort = 0;
229  task_context->priority = 0;
230  task_context->initiator_request = 1;
231  task_context->connection_rate = idev->connection_rate;
232  task_context->protocol_engine_index = ISCI_PEG;
233  task_context->logical_port_index = iport->physical_port_index;
235  task_context->valid = SCU_TASK_CONTEXT_VALID;
236  task_context->context_type = SCU_TASK_CONTEXT_TYPE;
237 
238  task_context->remote_node_index = idev->rnc.remote_node_index;
239  task_context->command_code = 0;
240 
241  task_context->link_layer_control = 0;
242  task_context->do_not_dma_ssp_good_response = 1;
243  task_context->strict_ordering = 0;
244  task_context->control_frame = 0;
245  task_context->timeout_enable = 0;
246  task_context->block_guard_enable = 0;
247 
248  task_context->address_modifier = 0;
249 
250  /* task_context->type.ssp.tag = ireq->io_tag; */
251  task_context->task_phase = 0x01;
252 
255  (iport->physical_port_index <<
257  ISCI_TAG_TCI(ireq->io_tag));
258 
259  /*
260  * Copy the physical address for the command buffer to the
261  * SCU Task Context
262  */
263  dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
264 
265  task_context->command_iu_upper = upper_32_bits(dma_addr);
266  task_context->command_iu_lower = lower_32_bits(dma_addr);
267 
268  /*
269  * Copy the physical address for the response buffer to the
270  * SCU Task Context
271  */
272  dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
273 
274  task_context->response_iu_upper = upper_32_bits(dma_addr);
275  task_context->response_iu_lower = lower_32_bits(dma_addr);
276 }
277 
278 static u8 scu_bg_blk_size(struct scsi_device *sdp)
279 {
280  switch (sdp->sector_size) {
281  case 512:
282  return 0;
283  case 1024:
284  return 1;
285  case 4096:
286  return 3;
287  default:
288  return 0xff;
289  }
290 }
291 
292 static u32 scu_dif_bytes(u32 len, u32 sector_size)
293 {
294  return (len >> ilog2(sector_size)) * 8;
295 }
296 
297 static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
298 {
299  struct scu_task_context *tc = ireq->tc;
300  struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
301  u8 blk_sz = scu_bg_blk_size(scmd->device);
302 
303  tc->block_guard_enable = 1;
304  tc->blk_prot_en = 1;
305  tc->blk_sz = blk_sz;
306  /* DIF write insert */
307  tc->blk_prot_func = 0x2;
308 
309  tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
310  scmd->device->sector_size);
311 
312  /* always init to 0, used by hw */
313  tc->interm_crc_val = 0;
314 
315  tc->init_crc_seed = 0;
316  tc->app_tag_verify = 0;
317  tc->app_tag_gen = 0;
318  tc->ref_tag_seed_verify = 0;
319 
320  /* always init to same as bg_blk_sz */
321  tc->UD_bytes_immed_val = scmd->device->sector_size;
322 
323  tc->reserved_DC_0 = 0;
324 
325  /* always init to 8 */
326  tc->DIF_bytes_immed_val = 8;
327 
328  tc->reserved_DC_1 = 0;
329  tc->bgc_blk_sz = scmd->device->sector_size;
330  tc->reserved_E0_0 = 0;
331  tc->app_tag_gen_mask = 0;
332 
334  tc->bgctl = 0;
335 
336  /* DIF write insert */
337  tc->bgctl_f.op = 0x2;
338 
339  tc->app_tag_verify_mask = 0;
340 
341  /* must init to 0 for hw */
342  tc->blk_guard_err = 0;
343 
344  tc->reserved_E8_0 = 0;
345 
346  if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
347  tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff;
348  else if (type & SCSI_PROT_DIF_TYPE3)
349  tc->ref_tag_seed_gen = 0;
350 }
351 
352 static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
353 {
354  struct scu_task_context *tc = ireq->tc;
355  struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
356  u8 blk_sz = scu_bg_blk_size(scmd->device);
357 
358  tc->block_guard_enable = 1;
359  tc->blk_prot_en = 1;
360  tc->blk_sz = blk_sz;
361  /* DIF read strip */
362  tc->blk_prot_func = 0x1;
363 
364  tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
365  scmd->device->sector_size);
366 
367  /* always init to 0, used by hw */
368  tc->interm_crc_val = 0;
369 
370  tc->init_crc_seed = 0;
371  tc->app_tag_verify = 0;
372  tc->app_tag_gen = 0;
373 
374  if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
375  tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff;
376  else if (type & SCSI_PROT_DIF_TYPE3)
377  tc->ref_tag_seed_verify = 0;
378 
379  /* always init to same as bg_blk_sz */
380  tc->UD_bytes_immed_val = scmd->device->sector_size;
381 
382  tc->reserved_DC_0 = 0;
383 
384  /* always init to 8 */
385  tc->DIF_bytes_immed_val = 8;
386 
387  tc->reserved_DC_1 = 0;
388  tc->bgc_blk_sz = scmd->device->sector_size;
389  tc->reserved_E0_0 = 0;
390  tc->app_tag_gen_mask = 0;
391 
393  tc->bgctl = 0;
394 
395  /* DIF read strip */
396  tc->bgctl_f.crc_verify = 1;
397  tc->bgctl_f.op = 0x1;
398  if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) {
399  tc->bgctl_f.ref_tag_chk = 1;
400  tc->bgctl_f.app_f_detect = 1;
401  } else if (type & SCSI_PROT_DIF_TYPE3)
402  tc->bgctl_f.app_ref_f_detect = 1;
403 
404  tc->app_tag_verify_mask = 0;
405 
406  /* must init to 0 for hw */
407  tc->blk_guard_err = 0;
408 
409  tc->reserved_E8_0 = 0;
410  tc->ref_tag_seed_gen = 0;
411 }
412 
418 static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
419  enum dma_data_direction dir,
420  u32 len)
421 {
422  struct scu_task_context *task_context = ireq->tc;
423  struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr;
424  struct scsi_cmnd *scmd = sas_task->uldd_task;
425  u8 prot_type = scsi_get_prot_type(scmd);
426  u8 prot_op = scsi_get_prot_op(scmd);
427 
428  scu_ssp_reqeust_construct_task_context(ireq, task_context);
429 
430  task_context->ssp_command_iu_length =
431  sizeof(struct ssp_cmd_iu) / sizeof(u32);
432  task_context->type.ssp.frame_type = SSP_COMMAND;
433 
434  switch (dir) {
435  case DMA_FROM_DEVICE:
436  case DMA_NONE:
437  default:
438  task_context->task_type = SCU_TASK_TYPE_IOREAD;
439  break;
440  case DMA_TO_DEVICE:
441  task_context->task_type = SCU_TASK_TYPE_IOWRITE;
442  break;
443  }
444 
445  task_context->transfer_length_bytes = len;
446 
447  if (task_context->transfer_length_bytes > 0)
448  sci_request_build_sgl(ireq);
449 
450  if (prot_type != SCSI_PROT_DIF_TYPE0) {
451  if (prot_op == SCSI_PROT_READ_STRIP)
452  scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op);
453  else if (prot_op == SCSI_PROT_WRITE_INSERT)
454  scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op);
455  }
456 }
457 
471 static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
472 {
473  struct scu_task_context *task_context = ireq->tc;
474 
475  scu_ssp_reqeust_construct_task_context(ireq, task_context);
476 
477  task_context->control_frame = 1;
478  task_context->priority = SCU_TASK_PRIORITY_HIGH;
479  task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
480  task_context->transfer_length_bytes = 0;
481  task_context->type.ssp.frame_type = SSP_TASK;
482  task_context->ssp_command_iu_length =
483  sizeof(struct ssp_task_iu) / sizeof(u32);
484 }
485 
498 static void scu_sata_reqeust_construct_task_context(
499  struct isci_request *ireq,
500  struct scu_task_context *task_context)
501 {
503  struct isci_remote_device *idev;
504  struct isci_port *iport;
505 
506  idev = ireq->target_device;
507  iport = idev->owning_port;
508 
509  /* Fill in the TC with the its required data */
510  task_context->abort = 0;
511  task_context->priority = SCU_TASK_PRIORITY_NORMAL;
512  task_context->initiator_request = 1;
513  task_context->connection_rate = idev->connection_rate;
514  task_context->protocol_engine_index = ISCI_PEG;
515  task_context->logical_port_index = iport->physical_port_index;
517  task_context->valid = SCU_TASK_CONTEXT_VALID;
518  task_context->context_type = SCU_TASK_CONTEXT_TYPE;
519 
520  task_context->remote_node_index = idev->rnc.remote_node_index;
521  task_context->command_code = 0;
522 
523  task_context->link_layer_control = 0;
524  task_context->do_not_dma_ssp_good_response = 1;
525  task_context->strict_ordering = 0;
526  task_context->control_frame = 0;
527  task_context->timeout_enable = 0;
528  task_context->block_guard_enable = 0;
529 
530  task_context->address_modifier = 0;
531  task_context->task_phase = 0x01;
532 
533  task_context->ssp_command_iu_length =
534  (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
535 
536  /* Set the first word of the H2D REG FIS */
537  task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
538 
541  (iport->physical_port_index <<
543  ISCI_TAG_TCI(ireq->io_tag));
544  /*
545  * Copy the physical address for the command buffer to the SCU Task
546  * Context. We must offset the command buffer by 4 bytes because the
547  * first 4 bytes are transfered in the body of the TC.
548  */
549  dma_addr = sci_io_request_get_dma_addr(ireq,
550  ((char *) &ireq->stp.cmd) +
551  sizeof(u32));
552 
553  task_context->command_iu_upper = upper_32_bits(dma_addr);
554  task_context->command_iu_lower = lower_32_bits(dma_addr);
555 
556  /* SATA Requests do not have a response buffer */
557  task_context->response_iu_upper = 0;
558  task_context->response_iu_lower = 0;
559 }
560 
561 static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
562 {
563  struct scu_task_context *task_context = ireq->tc;
564 
565  scu_sata_reqeust_construct_task_context(ireq, task_context);
566 
567  task_context->control_frame = 0;
568  task_context->priority = SCU_TASK_PRIORITY_NORMAL;
569  task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
570  task_context->type.stp.fis_type = FIS_REGH2D;
571  task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
572 }
573 
574 static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
575  bool copy_rx_frame)
576 {
577  struct isci_stp_request *stp_req = &ireq->stp.req;
578 
579  scu_stp_raw_request_construct_task_context(ireq);
580 
581  stp_req->status = 0;
582  stp_req->sgl.offset = 0;
583  stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
584 
585  if (copy_rx_frame) {
586  sci_request_build_sgl(ireq);
587  stp_req->sgl.index = 0;
588  } else {
589  /* The user does not want the data copied to the SGL buffer location */
590  stp_req->sgl.index = -1;
591  }
592 
593  return SCI_SUCCESS;
594 }
595 
608 static void sci_stp_optimized_request_construct(struct isci_request *ireq,
609  u8 optimized_task_type,
610  u32 len,
611  enum dma_data_direction dir)
612 {
613  struct scu_task_context *task_context = ireq->tc;
614 
615  /* Build the STP task context structure */
616  scu_sata_reqeust_construct_task_context(ireq, task_context);
617 
618  /* Copy over the SGL elements */
619  sci_request_build_sgl(ireq);
620 
621  /* Copy over the number of bytes to be transfered */
622  task_context->transfer_length_bytes = len;
623 
624  if (dir == DMA_TO_DEVICE) {
625  /*
626  * The difference between the DMA IN and DMA OUT request task type
627  * values are consistent with the difference between FPDMA READ
628  * and FPDMA WRITE values. Add the supplied task type parameter
629  * to this difference to set the task type properly for this
630  * DATA OUT (WRITE) case. */
631  task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
633  } else {
634  /*
635  * For the DATA IN (READ) case, simply save the supplied
636  * optimized task type. */
637  task_context->task_type = optimized_task_type;
638  }
639 }
640 
641 static void sci_atapi_construct(struct isci_request *ireq)
642 {
643  struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd;
644  struct sas_task *task;
645 
646  /* To simplify the implementation we take advantage of the
647  * silicon's partial acceleration of atapi protocol (dma data
648  * transfers), so we promote all commands to dma protocol. This
649  * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives.
650  */
651  h2d_fis->features |= ATAPI_PKT_DMA;
652 
653  scu_stp_raw_request_construct_task_context(ireq);
654 
655  task = isci_request_access_task(ireq);
656  if (task->data_dir == DMA_NONE)
657  task->total_xfer_len = 0;
658 
659  /* clear the response so we can detect arrivial of an
660  * unsolicited h2d fis
661  */
662  ireq->stp.rsp.fis_type = 0;
663 }
664 
665 static enum sci_status
666 sci_io_request_construct_sata(struct isci_request *ireq,
667  u32 len,
668  enum dma_data_direction dir,
669  bool copy)
670 {
672  struct sas_task *task = isci_request_access_task(ireq);
673  struct domain_device *dev = ireq->target_device->domain_dev;
674 
675  /* check for management protocols */
676  if (test_bit(IREQ_TMF, &ireq->flags)) {
677  struct isci_tmf *tmf = isci_request_access_tmf(ireq);
678 
679  dev_err(&ireq->owning_controller->pdev->dev,
680  "%s: Request 0x%p received un-handled SAT "
681  "management protocol 0x%x.\n",
682  __func__, ireq, tmf->tmf_code);
683 
684  return SCI_FAILURE;
685  }
686 
687  if (!sas_protocol_ata(task->task_proto)) {
688  dev_err(&ireq->owning_controller->pdev->dev,
689  "%s: Non-ATA protocol in SATA path: 0x%x\n",
690  __func__,
691  task->task_proto);
692  return SCI_FAILURE;
693 
694  }
695 
696  /* ATAPI */
697  if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
698  task->ata_task.fis.command == ATA_CMD_PACKET) {
699  sci_atapi_construct(ireq);
700  return SCI_SUCCESS;
701  }
702 
703  /* non data */
704  if (task->data_dir == DMA_NONE) {
705  scu_stp_raw_request_construct_task_context(ireq);
706  return SCI_SUCCESS;
707  }
708 
709  /* NCQ */
710  if (task->ata_task.use_ncq) {
711  sci_stp_optimized_request_construct(ireq,
713  len, dir);
714  return SCI_SUCCESS;
715  }
716 
717  /* DMA */
718  if (task->ata_task.dma_xfer) {
719  sci_stp_optimized_request_construct(ireq,
721  len, dir);
722  return SCI_SUCCESS;
723  } else /* PIO */
724  return sci_stp_pio_request_construct(ireq, copy);
725 
726  return status;
727 }
728 
729 static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
730 {
731  struct sas_task *task = isci_request_access_task(ireq);
732 
733  ireq->protocol = SAS_PROTOCOL_SSP;
734 
735  scu_ssp_io_request_construct_task_context(ireq,
736  task->data_dir,
737  task->total_xfer_len);
738 
739  sci_io_request_build_ssp_command_iu(ireq);
740 
741  sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
742 
743  return SCI_SUCCESS;
744 }
745 
747  struct isci_request *ireq)
748 {
749  /* Construct the SSP Task SCU Task Context */
750  scu_ssp_task_request_construct_task_context(ireq);
751 
752  /* Fill in the SSP Task IU */
753  sci_task_request_build_ssp_task_iu(ireq);
754 
755  sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
756 
757  return SCI_SUCCESS;
758 }
759 
760 static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
761 {
762  enum sci_status status;
763  bool copy = false;
764  struct sas_task *task = isci_request_access_task(ireq);
765 
766  ireq->protocol = SAS_PROTOCOL_STP;
767 
768  copy = (task->data_dir == DMA_NONE) ? false : true;
769 
770  status = sci_io_request_construct_sata(ireq,
771  task->total_xfer_len,
772  task->data_dir,
773  copy);
774 
775  if (status == SCI_SUCCESS)
776  sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
777 
778  return status;
779 }
780 
785 #define SCU_TASK_CONTEXT_SRAM 0x200000
786 static u32 sci_req_tx_bytes(struct isci_request *ireq)
787 {
788  struct isci_host *ihost = ireq->owning_controller;
789  u32 ret_val = 0;
790 
791  if (readl(&ihost->smu_registers->address_modifier) == 0) {
792  void __iomem *scu_reg_base = ihost->scu_registers;
793 
794  /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
795  * BAR1 is the scu_registers
796  * 0x20002C = 0x200000 + 0x2c
797  * = start of task context SRAM + offset of (type.ssp.data_offset)
798  * TCi is the io_tag of struct sci_request
799  */
800  ret_val = readl(scu_reg_base +
801  (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
802  ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
803  }
804 
805  return ret_val;
806 }
807 
809 {
810  enum sci_base_request_states state;
811  struct scu_task_context *tc = ireq->tc;
812  struct isci_host *ihost = ireq->owning_controller;
813 
814  state = ireq->sm.current_state_id;
815  if (state != SCI_REQ_CONSTRUCTED) {
816  dev_warn(&ihost->pdev->dev,
817  "%s: SCIC IO Request requested to start while in wrong "
818  "state %d\n", __func__, state);
820  }
821 
822  tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
823 
824  switch (tc->protocol_type) {
827  /* SSP/SMP Frame */
828  tc->type.ssp.tag = ireq->io_tag;
829  tc->type.ssp.target_port_transfer_tag = 0xFFFF;
830  break;
831 
833  /* STP/SATA Frame
834  * tc->type.stp.ncq_tag = ireq->ncq_tag;
835  */
836  break;
837 
839  /* / @todo When do we set no protocol type? */
840  break;
841 
842  default:
843  /* This should never happen since we build the IO
844  * requests */
845  break;
846  }
847 
848  /* Add to the post_context the io tag value */
849  ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
850 
851  /* Everything is good go ahead and change state */
852  sci_change_state(&ireq->sm, SCI_REQ_STARTED);
853 
854  return SCI_SUCCESS;
855 }
856 
857 enum sci_status
859 {
860  enum sci_base_request_states state;
861 
862  state = ireq->sm.current_state_id;
863 
864  switch (state) {
865  case SCI_REQ_CONSTRUCTED:
866  /* Set to make sure no HW terminate posting is done: */
870  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
871  return SCI_SUCCESS;
872  case SCI_REQ_STARTED:
873  case SCI_REQ_TASK_WAIT_TC_COMP:
874  case SCI_REQ_SMP_WAIT_RESP:
875  case SCI_REQ_SMP_WAIT_TC_COMP:
876  case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
877  case SCI_REQ_STP_UDMA_WAIT_D2H:
878  case SCI_REQ_STP_NON_DATA_WAIT_H2D:
879  case SCI_REQ_STP_NON_DATA_WAIT_D2H:
880  case SCI_REQ_STP_PIO_WAIT_H2D:
881  case SCI_REQ_STP_PIO_WAIT_FRAME:
882  case SCI_REQ_STP_PIO_DATA_IN:
883  case SCI_REQ_STP_PIO_DATA_OUT:
884  case SCI_REQ_ATAPI_WAIT_H2D:
885  case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
886  case SCI_REQ_ATAPI_WAIT_D2H:
887  case SCI_REQ_ATAPI_WAIT_TC_COMP:
888  /* Fall through and change state to ABORTING... */
889  case SCI_REQ_TASK_WAIT_TC_RESP:
890  /* The task frame was already confirmed to have been
891  * sent by the SCU HW. Since the state machine is
892  * now only waiting for the task response itself,
893  * abort the request and complete it immediately
894  * and don't wait for the task response.
895  */
896  sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
897  /* Fall through and handle like ABORTING... */
898  case SCI_REQ_ABORTING:
901  else
903  /* If the request is only waiting on the remote device
904  * suspension, return SUCCESS so the caller will wait too.
905  */
906  return SCI_SUCCESS;
907  case SCI_REQ_COMPLETED:
908  default:
909  dev_warn(&ireq->owning_controller->pdev->dev,
910  "%s: SCIC IO Request requested to abort while in wrong "
911  "state %d\n", __func__, ireq->sm.current_state_id);
912  break;
913  }
914 
916 }
917 
919 {
920  enum sci_base_request_states state;
921  struct isci_host *ihost = ireq->owning_controller;
922 
923  state = ireq->sm.current_state_id;
924  if (WARN_ONCE(state != SCI_REQ_COMPLETED,
925  "isci: request completion from wrong state (%s)\n",
926  req_state_name(state)))
928 
931  ireq->saved_rx_frame_index);
932 
933  /* XXX can we just stop the machine and remove the 'final' state? */
934  sci_change_state(&ireq->sm, SCI_REQ_FINAL);
935  return SCI_SUCCESS;
936 }
937 
939  u32 event_code)
940 {
941  enum sci_base_request_states state;
942  struct isci_host *ihost = ireq->owning_controller;
943 
944  state = ireq->sm.current_state_id;
945 
946  if (state != SCI_REQ_STP_PIO_DATA_IN) {
947  dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n",
948  __func__, event_code, req_state_name(state));
949 
951  }
952 
953  switch (scu_get_event_specifier(event_code)) {
955  /* We are waiting for data and the SCU has R_ERR the data frame.
956  * Go back to waiting for the D2H Register FIS
957  */
958  sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
959  return SCI_SUCCESS;
960  default:
961  dev_err(&ihost->pdev->dev,
962  "%s: pio request unexpected event %#x\n",
963  __func__, event_code);
964 
965  /* TODO Should we fail the PIO request when we get an
966  * unexpected event?
967  */
968  return SCI_FAILURE;
969  }
970 }
971 
972 /*
973  * This function copies response data for requests returning response data
974  * instead of sense data.
975  * @sci_req: This parameter specifies the request object for which to copy
976  * the response data.
977  */
978 static void sci_io_request_copy_response(struct isci_request *ireq)
979 {
980  void *resp_buf;
981  u32 len;
982  struct ssp_response_iu *ssp_response;
983  struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
984 
985  ssp_response = &ireq->ssp.rsp;
986 
987  resp_buf = &isci_tmf->resp.resp_iu;
988 
989  len = min_t(u32,
991  be32_to_cpu(ssp_response->response_data_len));
992 
993  memcpy(resp_buf, ssp_response->resp_data, len);
994 }
995 
996 static enum sci_status
997 request_started_state_tc_event(struct isci_request *ireq,
999 {
1000  struct ssp_response_iu *resp_iu;
1001  u8 datapres;
1002 
1003  /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
1004  * to determine SDMA status
1005  */
1006  switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1008  ireq->scu_status = SCU_TASK_DONE_GOOD;
1009  ireq->sci_status = SCI_SUCCESS;
1010  break;
1012  /* There are times when the SCU hardware will return an early
1013  * response because the io request specified more data than is
1014  * returned by the target device (mode pages, inquiry data,
1015  * etc.). We must check the response stats to see if this is
1016  * truly a failed request or a good request that just got
1017  * completed early.
1018  */
1019  struct ssp_response_iu *resp = &ireq->ssp.rsp;
1020  ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1021 
1022  sci_swab32_cpy(&ireq->ssp.rsp,
1023  &ireq->ssp.rsp,
1024  word_cnt);
1025 
1026  if (resp->status == 0) {
1029  } else {
1032  }
1033  break;
1034  }
1036  ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1037 
1038  sci_swab32_cpy(&ireq->ssp.rsp,
1039  &ireq->ssp.rsp,
1040  word_cnt);
1041 
1044  break;
1045  }
1046 
1048  /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
1049  * guaranteed to be received before this completion status is
1050  * posted?
1051  */
1052  resp_iu = &ireq->ssp.rsp;
1053  datapres = resp_iu->datapres;
1054 
1055  if (datapres == 1 || datapres == 2) {
1058  } else {
1060  ireq->sci_status = SCI_SUCCESS;
1061  }
1062  break;
1063  /* only stp device gets suspended. */
1075  if (ireq->protocol == SAS_PROTOCOL_STP) {
1076  ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1079  } else {
1080  ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1083  }
1084  break;
1085 
1086  /* both stp/ssp device gets suspended */
1097  ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1100  break;
1101 
1102  /* neither ssp nor stp gets suspended. */
1118  default:
1119  ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1122  break;
1123  }
1124 
1125  /*
1126  * TODO: This is probably wrong for ACK/NAK timeout conditions
1127  */
1128 
1129  /* In all cases we will treat this as the completion of the IO req. */
1130  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1131  return SCI_SUCCESS;
1132 }
1133 
1134 static enum sci_status
1135 request_aborting_state_tc_event(struct isci_request *ireq,
1136  u32 completion_code)
1137 {
1138  switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1143  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1144  break;
1145 
1146  default:
1147  /* Unless we get some strange error wait for the task abort to complete
1148  * TODO: Should there be a state change for this completion?
1149  */
1150  break;
1151  }
1152 
1153  return SCI_SUCCESS;
1154 }
1155 
1156 static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
1157  u32 completion_code)
1158 {
1159  switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1161  ireq->scu_status = SCU_TASK_DONE_GOOD;
1162  ireq->sci_status = SCI_SUCCESS;
1163  sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1164  break;
1166  /* Currently, the decision is to simply allow the task request
1167  * to timeout if the task IU wasn't received successfully.
1168  * There is a potential for receiving multiple task responses if
1169  * we decide to send the task IU again.
1170  */
1171  dev_warn(&ireq->owning_controller->pdev->dev,
1172  "%s: TaskRequest:0x%p CompletionCode:%x - "
1173  "ACK/NAK timeout\n", __func__, ireq,
1174  completion_code);
1175 
1176  sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1177  break;
1178  default:
1179  /*
1180  * All other completion status cause the IO to be complete.
1181  * If a NAK was received, then it is up to the user to retry
1182  * the request.
1183  */
1184  ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1186  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1187  break;
1188  }
1189 
1190  return SCI_SUCCESS;
1191 }
1192 
1193 static enum sci_status
1194 smp_request_await_response_tc_event(struct isci_request *ireq,
1195  u32 completion_code)
1196 {
1197  switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1199  /* In the AWAIT RESPONSE state, any TC completion is
1200  * unexpected. but if the TC has success status, we
1201  * complete the IO anyway.
1202  */
1203  ireq->scu_status = SCU_TASK_DONE_GOOD;
1204  ireq->sci_status = SCI_SUCCESS;
1205  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1206  break;
1211  /* These status has been seen in a specific LSI
1212  * expander, which sometimes is not able to send smp
1213  * response within 2 ms. This causes our hardware break
1214  * the connection and set TC completion with one of
1215  * these SMP_XXX_XX_ERR status. For these type of error,
1216  * we ask ihost user to retry the request.
1217  */
1218  ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
1220  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1221  break;
1222  default:
1223  /* All other completion status cause the IO to be complete. If a NAK
1224  * was received, then it is up to the user to retry the request
1225  */
1226  ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1228  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1229  break;
1230  }
1231 
1232  return SCI_SUCCESS;
1233 }
1234 
1235 static enum sci_status
1236 smp_request_await_tc_event(struct isci_request *ireq,
1237  u32 completion_code)
1238 {
1239  switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1241  ireq->scu_status = SCU_TASK_DONE_GOOD;
1242  ireq->sci_status = SCI_SUCCESS;
1243  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1244  break;
1245  default:
1246  /* All other completion status cause the IO to be
1247  * complete. If a NAK was received, then it is up to
1248  * the user to retry the request.
1249  */
1250  ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1252  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1253  break;
1254  }
1255 
1256  return SCI_SUCCESS;
1257 }
1258 
1259 static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
1260 {
1261  struct scu_sgl_element *sgl;
1262  struct scu_sgl_element_pair *sgl_pair;
1263  struct isci_request *ireq = to_ireq(stp_req);
1264  struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
1265 
1266  sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1267  if (!sgl_pair)
1268  sgl = NULL;
1269  else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
1270  if (sgl_pair->B.address_lower == 0 &&
1271  sgl_pair->B.address_upper == 0) {
1272  sgl = NULL;
1273  } else {
1274  pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
1275  sgl = &sgl_pair->B;
1276  }
1277  } else {
1278  if (sgl_pair->next_pair_lower == 0 &&
1279  sgl_pair->next_pair_upper == 0) {
1280  sgl = NULL;
1281  } else {
1282  pio_sgl->index++;
1283  pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
1284  sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1285  sgl = &sgl_pair->A;
1286  }
1287  }
1288 
1289  return sgl;
1290 }
1291 
1292 static enum sci_status
1293 stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
1294  u32 completion_code)
1295 {
1296  switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1298  ireq->scu_status = SCU_TASK_DONE_GOOD;
1299  ireq->sci_status = SCI_SUCCESS;
1300  sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
1301  break;
1302 
1303  default:
1304  /* All other completion status cause the IO to be
1305  * complete. If a NAK was received, then it is up to
1306  * the user to retry the request.
1307  */
1308  ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1310  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1311  break;
1312  }
1313 
1314  return SCI_SUCCESS;
1315 }
1316 
1317 #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
1318 
1319 /* transmit DATA_FIS from (current sgl + offset) for input
1320  * parameter length. current sgl and offset is alreay stored in the IO request
1321  */
1322 static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
1323  struct isci_request *ireq,
1324  u32 length)
1325 {
1326  struct isci_stp_request *stp_req = &ireq->stp.req;
1327  struct scu_task_context *task_context = ireq->tc;
1328  struct scu_sgl_element_pair *sgl_pair;
1329  struct scu_sgl_element *current_sgl;
1330 
1331  /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1332  * for the data from current_sgl+offset for the input length
1333  */
1334  sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1335  if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
1336  current_sgl = &sgl_pair->A;
1337  else
1338  current_sgl = &sgl_pair->B;
1339 
1340  /* update the TC */
1341  task_context->command_iu_upper = current_sgl->address_upper;
1342  task_context->command_iu_lower = current_sgl->address_lower;
1343  task_context->transfer_length_bytes = length;
1344  task_context->type.stp.fis_type = FIS_DATA;
1345 
1346  /* send the new TC out. */
1347  return sci_controller_continue_io(ireq);
1348 }
1349 
1350 static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
1351 {
1352  struct isci_stp_request *stp_req = &ireq->stp.req;
1353  struct scu_sgl_element_pair *sgl_pair;
1354  enum sci_status status = SCI_SUCCESS;
1355  struct scu_sgl_element *sgl;
1356  u32 offset;
1357  u32 len = 0;
1358 
1359  offset = stp_req->sgl.offset;
1360  sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1361  if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
1362  return SCI_FAILURE;
1363 
1364  if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
1365  sgl = &sgl_pair->A;
1366  len = sgl_pair->A.length - offset;
1367  } else {
1368  sgl = &sgl_pair->B;
1369  len = sgl_pair->B.length - offset;
1370  }
1371 
1372  if (stp_req->pio_len == 0)
1373  return SCI_SUCCESS;
1374 
1375  if (stp_req->pio_len >= len) {
1376  status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
1377  if (status != SCI_SUCCESS)
1378  return status;
1379  stp_req->pio_len -= len;
1380 
1381  /* update the current sgl, offset and save for future */
1382  sgl = pio_sgl_next(stp_req);
1383  offset = 0;
1384  } else if (stp_req->pio_len < len) {
1385  sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
1386 
1387  /* Sgl offset will be adjusted and saved for future */
1388  offset += stp_req->pio_len;
1389  sgl->address_lower += stp_req->pio_len;
1390  stp_req->pio_len = 0;
1391  }
1392 
1393  stp_req->sgl.offset = offset;
1394 
1395  return status;
1396 }
1397 
1407 static enum sci_status
1408 sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
1409  u8 *data_buf, u32 len)
1410 {
1411  struct isci_request *ireq;
1412  u8 *src_addr;
1413  int copy_len;
1414  struct sas_task *task;
1415  struct scatterlist *sg;
1416  void *kaddr;
1417  int total_len = len;
1418 
1419  ireq = to_ireq(stp_req);
1420  task = isci_request_access_task(ireq);
1421  src_addr = data_buf;
1422 
1423  if (task->num_scatter > 0) {
1424  sg = task->scatter;
1425 
1426  while (total_len > 0) {
1427  struct page *page = sg_page(sg);
1428 
1429  copy_len = min_t(int, total_len, sg_dma_len(sg));
1430  kaddr = kmap_atomic(page);
1431  memcpy(kaddr + sg->offset, src_addr, copy_len);
1432  kunmap_atomic(kaddr);
1433  total_len -= copy_len;
1434  src_addr += copy_len;
1435  sg = sg_next(sg);
1436  }
1437  } else {
1438  BUG_ON(task->total_xfer_len < total_len);
1439  memcpy(task->scatter, src_addr, total_len);
1440  }
1441 
1442  return SCI_SUCCESS;
1443 }
1444 
1452 static enum sci_status sci_stp_request_pio_data_in_copy_data(
1453  struct isci_stp_request *stp_req,
1454  u8 *data_buffer)
1455 {
1456  enum sci_status status;
1457 
1458  /*
1459  * If there is less than 1K remaining in the transfer request
1460  * copy just the data for the transfer */
1461  if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
1462  status = sci_stp_request_pio_data_in_copy_data_buffer(
1463  stp_req, data_buffer, stp_req->pio_len);
1464 
1465  if (status == SCI_SUCCESS)
1466  stp_req->pio_len = 0;
1467  } else {
1468  /* We are transfering the whole frame so copy */
1469  status = sci_stp_request_pio_data_in_copy_data_buffer(
1470  stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1471 
1472  if (status == SCI_SUCCESS)
1473  stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
1474  }
1475 
1476  return status;
1477 }
1478 
1479 static enum sci_status
1480 stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
1481  u32 completion_code)
1482 {
1483  enum sci_status status = SCI_SUCCESS;
1484 
1485  switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1487  ireq->scu_status = SCU_TASK_DONE_GOOD;
1488  ireq->sci_status = SCI_SUCCESS;
1489  sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1490  break;
1491 
1492  default:
1493  /* All other completion status cause the IO to be
1494  * complete. If a NAK was received, then it is up to
1495  * the user to retry the request.
1496  */
1497  ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1499  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1500  break;
1501  }
1502 
1503  return status;
1504 }
1505 
1506 static enum sci_status
1507 pio_data_out_tx_done_tc_event(struct isci_request *ireq,
1508  u32 completion_code)
1509 {
1510  enum sci_status status = SCI_SUCCESS;
1511  bool all_frames_transferred = false;
1512  struct isci_stp_request *stp_req = &ireq->stp.req;
1513 
1514  switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1516  /* Transmit data */
1517  if (stp_req->pio_len != 0) {
1518  status = sci_stp_request_pio_data_out_transmit_data(ireq);
1519  if (status == SCI_SUCCESS) {
1520  if (stp_req->pio_len == 0)
1521  all_frames_transferred = true;
1522  }
1523  } else if (stp_req->pio_len == 0) {
1524  /*
1525  * this will happen if the all data is written at the
1526  * first time after the pio setup fis is received
1527  */
1528  all_frames_transferred = true;
1529  }
1530 
1531  /* all data transferred. */
1532  if (all_frames_transferred) {
1533  /*
1534  * Change the state to SCI_REQ_STP_PIO_DATA_IN
1535  * and wait for PIO_SETUP fis / or D2H REg fis. */
1536  sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1537  }
1538  break;
1539 
1540  default:
1541  /*
1542  * All other completion status cause the IO to be complete.
1543  * If a NAK was received, then it is up to the user to retry
1544  * the request.
1545  */
1546  ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1548  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1549  break;
1550  }
1551 
1552  return status;
1553 }
1554 
1555 static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
1556  u32 frame_index)
1557 {
1558  struct isci_host *ihost = ireq->owning_controller;
1559  struct dev_to_host_fis *frame_header;
1560  enum sci_status status;
1561  u32 *frame_buffer;
1562 
1564  frame_index,
1565  (void **)&frame_header);
1566 
1567  if ((status == SCI_SUCCESS) &&
1568  (frame_header->fis_type == FIS_REGD2H)) {
1570  frame_index,
1571  (void **)&frame_buffer);
1572 
1574  frame_header,
1575  frame_buffer);
1576  }
1577 
1578  sci_controller_release_frame(ihost, frame_index);
1579 
1580  return status;
1581 }
1582 
1583 static enum sci_status process_unsolicited_fis(struct isci_request *ireq,
1584  u32 frame_index)
1585 {
1586  struct isci_host *ihost = ireq->owning_controller;
1587  enum sci_status status;
1588  struct dev_to_host_fis *frame_header;
1589  u32 *frame_buffer;
1590 
1592  frame_index,
1593  (void **)&frame_header);
1594 
1595  if (status != SCI_SUCCESS)
1596  return status;
1597 
1598  if (frame_header->fis_type != FIS_REGD2H) {
1599  dev_err(&ireq->isci_host->pdev->dev,
1600  "%s ERROR: invalid fis type 0x%X\n",
1601  __func__, frame_header->fis_type);
1602  return SCI_FAILURE;
1603  }
1604 
1606  frame_index,
1607  (void **)&frame_buffer);
1608 
1610  (u32 *)frame_header,
1611  frame_buffer);
1612 
1613  /* Frame has been decoded return it to the controller */
1614  sci_controller_release_frame(ihost, frame_index);
1615 
1616  return status;
1617 }
1618 
1619 static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
1620  u32 frame_index)
1621 {
1622  struct sas_task *task = isci_request_access_task(ireq);
1623  enum sci_status status;
1624 
1625  status = process_unsolicited_fis(ireq, frame_index);
1626 
1627  if (status == SCI_SUCCESS) {
1628  if (ireq->stp.rsp.status & ATA_ERR)
1630  } else {
1632  }
1633 
1634  if (status != SCI_SUCCESS) {
1636  ireq->sci_status = status;
1637  } else {
1639  ireq->sci_status = SCI_SUCCESS;
1640  }
1641 
1642  /* the d2h ufi is the end of non-data commands */
1643  if (task->data_dir == DMA_NONE)
1644  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1645 
1646  return status;
1647 }
1648 
1649 static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq)
1650 {
1651  struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
1652  void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet;
1653  struct scu_task_context *task_context = ireq->tc;
1654 
1655  /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame
1656  * type. The TC for previous Packet fis was already there, we only need to
1657  * change the H2D fis content.
1658  */
1659  memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis));
1660  memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN);
1661  memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context));
1662  task_context->type.stp.fis_type = FIS_DATA;
1663  task_context->transfer_length_bytes = dev->cdb_len;
1664 }
1665 
1666 static void scu_atapi_construct_task_context(struct isci_request *ireq)
1667 {
1668  struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
1669  struct sas_task *task = isci_request_access_task(ireq);
1670  struct scu_task_context *task_context = ireq->tc;
1671  int cdb_len = dev->cdb_len;
1672 
1673  /* reference: SSTL 1.13.4.2
1674  * task_type, sata_direction
1675  */
1676  if (task->data_dir == DMA_TO_DEVICE) {
1677  task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT;
1678  task_context->sata_direction = 0;
1679  } else {
1680  /* todo: for NO_DATA command, we need to send out raw frame. */
1681  task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN;
1682  task_context->sata_direction = 1;
1683  }
1684 
1685  memset(&task_context->type.stp, 0, sizeof(task_context->type.stp));
1686  task_context->type.stp.fis_type = FIS_DATA;
1687 
1688  memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
1689  memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len);
1690  task_context->ssp_command_iu_length = cdb_len / sizeof(u32);
1691 
1692  /* task phase is set to TX_CMD */
1693  task_context->task_phase = 0x1;
1694 
1695  /* retry counter */
1696  task_context->stp_retry_count = 0;
1697 
1698  /* data transfer size. */
1699  task_context->transfer_length_bytes = task->total_xfer_len;
1700 
1701  /* setup sgl */
1702  sci_request_build_sgl(ireq);
1703 }
1704 
1705 enum sci_status
1707  u32 frame_index)
1708 {
1709  struct isci_host *ihost = ireq->owning_controller;
1710  struct isci_stp_request *stp_req = &ireq->stp.req;
1711  enum sci_base_request_states state;
1712  enum sci_status status;
1713  ssize_t word_cnt;
1714 
1715  state = ireq->sm.current_state_id;
1716  switch (state) {
1717  case SCI_REQ_STARTED: {
1718  struct ssp_frame_hdr ssp_hdr;
1719  void *frame_header;
1720 
1722  frame_index,
1723  &frame_header);
1724 
1725  word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1726  sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1727 
1728  if (ssp_hdr.frame_type == SSP_RESPONSE) {
1729  struct ssp_response_iu *resp_iu;
1730  ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1731 
1733  frame_index,
1734  (void **)&resp_iu);
1735 
1736  sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
1737 
1738  resp_iu = &ireq->ssp.rsp;
1739 
1740  if (resp_iu->datapres == 0x01 ||
1741  resp_iu->datapres == 0x02) {
1744  } else {
1746  ireq->sci_status = SCI_SUCCESS;
1747  }
1748  } else {
1749  /* not a response frame, why did it get forwarded? */
1750  dev_err(&ihost->pdev->dev,
1751  "%s: SCIC IO Request 0x%p received unexpected "
1752  "frame %d type 0x%02x\n", __func__, ireq,
1753  frame_index, ssp_hdr.frame_type);
1754  }
1755 
1756  /*
1757  * In any case we are done with this frame buffer return it to
1758  * the controller
1759  */
1760  sci_controller_release_frame(ihost, frame_index);
1761 
1762  return SCI_SUCCESS;
1763  }
1764 
1765  case SCI_REQ_TASK_WAIT_TC_RESP:
1766  sci_io_request_copy_response(ireq);
1767  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1768  sci_controller_release_frame(ihost, frame_index);
1769  return SCI_SUCCESS;
1770 
1771  case SCI_REQ_SMP_WAIT_RESP: {
1772  struct sas_task *task = isci_request_access_task(ireq);
1773  struct scatterlist *sg = &task->smp_task.smp_resp;
1774  void *frame_header, *kaddr;
1775  u8 *rsp;
1776 
1778  frame_index,
1779  &frame_header);
1780  kaddr = kmap_atomic(sg_page(sg));
1781  rsp = kaddr + sg->offset;
1782  sci_swab32_cpy(rsp, frame_header, 1);
1783 
1784  if (rsp[0] == SMP_RESPONSE) {
1785  void *smp_resp;
1786 
1788  frame_index,
1789  &smp_resp);
1790 
1791  word_cnt = (sg->length/4)-1;
1792  if (word_cnt > 0)
1793  word_cnt = min_t(unsigned int, word_cnt,
1795  sci_swab32_cpy(rsp + 4, smp_resp, word_cnt);
1796 
1798  ireq->sci_status = SCI_SUCCESS;
1799  sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
1800  } else {
1801  /*
1802  * This was not a response frame why did it get
1803  * forwarded?
1804  */
1805  dev_err(&ihost->pdev->dev,
1806  "%s: SCIC SMP Request 0x%p received unexpected "
1807  "frame %d type 0x%02x\n",
1808  __func__,
1809  ireq,
1810  frame_index,
1811  rsp[0]);
1812 
1815  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1816  }
1817  kunmap_atomic(kaddr);
1818 
1819  sci_controller_release_frame(ihost, frame_index);
1820 
1821  return SCI_SUCCESS;
1822  }
1823 
1824  case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1825  return sci_stp_request_udma_general_frame_handler(ireq,
1826  frame_index);
1827 
1828  case SCI_REQ_STP_UDMA_WAIT_D2H:
1829  /* Use the general frame handler to copy the resposne data */
1830  status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
1831 
1832  if (status != SCI_SUCCESS)
1833  return status;
1834 
1837  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1838  return SCI_SUCCESS;
1839 
1840  case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
1841  struct dev_to_host_fis *frame_header;
1842  u32 *frame_buffer;
1843 
1845  frame_index,
1846  (void **)&frame_header);
1847 
1848  if (status != SCI_SUCCESS) {
1849  dev_err(&ihost->pdev->dev,
1850  "%s: SCIC IO Request 0x%p could not get frame "
1851  "header for frame index %d, status %x\n",
1852  __func__,
1853  stp_req,
1854  frame_index,
1855  status);
1856 
1857  return status;
1858  }
1859 
1860  switch (frame_header->fis_type) {
1861  case FIS_REGD2H:
1863  frame_index,
1864  (void **)&frame_buffer);
1865 
1867  frame_header,
1868  frame_buffer);
1869 
1870  /* The command has completed with error */
1873  break;
1874 
1875  default:
1876  dev_warn(&ihost->pdev->dev,
1877  "%s: IO Request:0x%p Frame Id:%d protocol "
1878  "violation occurred\n", __func__, stp_req,
1879  frame_index);
1880 
1883  break;
1884  }
1885 
1886  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1887 
1888  /* Frame has been decoded return it to the controller */
1889  sci_controller_release_frame(ihost, frame_index);
1890 
1891  return status;
1892  }
1893 
1894  case SCI_REQ_STP_PIO_WAIT_FRAME: {
1895  struct sas_task *task = isci_request_access_task(ireq);
1896  struct dev_to_host_fis *frame_header;
1897  u32 *frame_buffer;
1898 
1900  frame_index,
1901  (void **)&frame_header);
1902 
1903  if (status != SCI_SUCCESS) {
1904  dev_err(&ihost->pdev->dev,
1905  "%s: SCIC IO Request 0x%p could not get frame "
1906  "header for frame index %d, status %x\n",
1907  __func__, stp_req, frame_index, status);
1908  return status;
1909  }
1910 
1911  switch (frame_header->fis_type) {
1912  case FIS_PIO_SETUP:
1913  /* Get from the frame buffer the PIO Setup Data */
1915  frame_index,
1916  (void **)&frame_buffer);
1917 
1918  /* Get the data from the PIO Setup The SCU Hardware
1919  * returns first word in the frame_header and the rest
1920  * of the data is in the frame buffer so we need to
1921  * back up one dword
1922  */
1923 
1924  /* transfer_count: first 16bits in the 4th dword */
1925  stp_req->pio_len = frame_buffer[3] & 0xffff;
1926 
1927  /* status: 4th byte in the 3rd dword */
1928  stp_req->status = (frame_buffer[2] >> 24) & 0xff;
1929 
1931  frame_header,
1932  frame_buffer);
1933 
1934  ireq->stp.rsp.status = stp_req->status;
1935 
1936  /* The next state is dependent on whether the
1937  * request was PIO Data-in or Data out
1938  */
1939  if (task->data_dir == DMA_FROM_DEVICE) {
1940  sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
1941  } else if (task->data_dir == DMA_TO_DEVICE) {
1942  /* Transmit data */
1943  status = sci_stp_request_pio_data_out_transmit_data(ireq);
1944  if (status != SCI_SUCCESS)
1945  break;
1946  sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
1947  }
1948  break;
1949 
1950  case FIS_SETDEVBITS:
1951  sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1952  break;
1953 
1954  case FIS_REGD2H:
1955  if (frame_header->status & ATA_BUSY) {
1956  /*
1957  * Now why is the drive sending a D2H Register
1958  * FIS when it is still busy? Do nothing since
1959  * we are still in the right state.
1960  */
1961  dev_dbg(&ihost->pdev->dev,
1962  "%s: SCIC PIO Request 0x%p received "
1963  "D2H Register FIS with BSY status "
1964  "0x%x\n",
1965  __func__,
1966  stp_req,
1967  frame_header->status);
1968  break;
1969  }
1970 
1972  frame_index,
1973  (void **)&frame_buffer);
1974 
1976  frame_header,
1977  frame_buffer);
1978 
1981  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1982  break;
1983 
1984  default:
1985  /* FIXME: what do we do here? */
1986  break;
1987  }
1988 
1989  /* Frame is decoded return it to the controller */
1990  sci_controller_release_frame(ihost, frame_index);
1991 
1992  return status;
1993  }
1994 
1995  case SCI_REQ_STP_PIO_DATA_IN: {
1996  struct dev_to_host_fis *frame_header;
1997  struct sata_fis_data *frame_buffer;
1998 
2000  frame_index,
2001  (void **)&frame_header);
2002 
2003  if (status != SCI_SUCCESS) {
2004  dev_err(&ihost->pdev->dev,
2005  "%s: SCIC IO Request 0x%p could not get frame "
2006  "header for frame index %d, status %x\n",
2007  __func__,
2008  stp_req,
2009  frame_index,
2010  status);
2011  return status;
2012  }
2013 
2014  if (frame_header->fis_type != FIS_DATA) {
2015  dev_err(&ihost->pdev->dev,
2016  "%s: SCIC PIO Request 0x%p received frame %d "
2017  "with fis type 0x%02x when expecting a data "
2018  "fis.\n",
2019  __func__,
2020  stp_req,
2021  frame_index,
2022  frame_header->fis_type);
2023 
2026  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2027 
2028  /* Frame is decoded return it to the controller */
2029  sci_controller_release_frame(ihost, frame_index);
2030  return status;
2031  }
2032 
2033  if (stp_req->sgl.index < 0) {
2035  stp_req->pio_len = 0;
2036  } else {
2038  frame_index,
2039  (void **)&frame_buffer);
2040 
2041  status = sci_stp_request_pio_data_in_copy_data(stp_req,
2042  (u8 *)frame_buffer);
2043 
2044  /* Frame is decoded return it to the controller */
2045  sci_controller_release_frame(ihost, frame_index);
2046  }
2047 
2048  /* Check for the end of the transfer, are there more
2049  * bytes remaining for this data transfer
2050  */
2051  if (status != SCI_SUCCESS || stp_req->pio_len != 0)
2052  return status;
2053 
2054  if ((stp_req->status & ATA_BUSY) == 0) {
2057  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2058  } else {
2059  sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
2060  }
2061  return status;
2062  }
2063 
2064  case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
2065  struct sas_task *task = isci_request_access_task(ireq);
2066 
2067  sci_controller_release_frame(ihost, frame_index);
2068  ireq->target_device->working_request = ireq;
2069  if (task->data_dir == DMA_NONE) {
2070  sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP);
2071  scu_atapi_reconstruct_raw_frame_task_context(ireq);
2072  } else {
2073  sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
2074  scu_atapi_construct_task_context(ireq);
2075  }
2076 
2078  return SCI_SUCCESS;
2079  }
2080  case SCI_REQ_ATAPI_WAIT_D2H:
2081  return atapi_d2h_reg_frame_handler(ireq, frame_index);
2082  case SCI_REQ_ABORTING:
2083  /*
2084  * TODO: Is it even possible to get an unsolicited frame in the
2085  * aborting state?
2086  */
2087  sci_controller_release_frame(ihost, frame_index);
2088  return SCI_SUCCESS;
2089 
2090  default:
2091  dev_warn(&ihost->pdev->dev,
2092  "%s: SCIC IO Request given unexpected frame %x while "
2093  "in state %d\n",
2094  __func__,
2095  frame_index,
2096  state);
2097 
2098  sci_controller_release_frame(ihost, frame_index);
2100  }
2101 }
2102 
2103 static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
2104  u32 completion_code)
2105 {
2106  enum sci_status status = SCI_SUCCESS;
2107 
2108  switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2110  ireq->scu_status = SCU_TASK_DONE_GOOD;
2111  ireq->sci_status = SCI_SUCCESS;
2112  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2113  break;
2116  /* We must check ther response buffer to see if the D2H
2117  * Register FIS was received before we got the TC
2118  * completion.
2119  */
2120  if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
2123 
2126  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2127  } else {
2128  /* If we have an error completion status for the
2129  * TC then we can expect a D2H register FIS from
2130  * the device so we must change state to wait
2131  * for it
2132  */
2133  sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
2134  }
2135  break;
2136 
2137  /* TODO Check to see if any of these completion status need to
2138  * wait for the device to host register fis.
2139  */
2140  /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
2141  * - this comes only for B0
2142  */
2143  default:
2144  /* All other completion status cause the IO to be complete. */
2145  ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2147  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2148  break;
2149  }
2150 
2151  return status;
2152 }
2153 
2154 static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
2155  enum sci_base_request_states next)
2156 {
2157  enum sci_status status = SCI_SUCCESS;
2158 
2159  switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2161  ireq->scu_status = SCU_TASK_DONE_GOOD;
2162  ireq->sci_status = SCI_SUCCESS;
2163  sci_change_state(&ireq->sm, next);
2164  break;
2165  default:
2166  /* All other completion status cause the IO to be complete.
2167  * If a NAK was received, then it is up to the user to retry
2168  * the request.
2169  */
2170  ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2172 
2173  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2174  break;
2175  }
2176 
2177  return status;
2178 }
2179 
2180 static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq,
2181  u32 completion_code)
2182 {
2183  struct isci_remote_device *idev = ireq->target_device;
2184  struct dev_to_host_fis *d2h = &ireq->stp.rsp;
2185  enum sci_status status = SCI_SUCCESS;
2186 
2187  switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2189  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2190  break;
2191 
2193  u16 len = sci_req_tx_bytes(ireq);
2194 
2195  /* likely non-error data underrrun, workaround missing
2196  * d2h frame from the controller
2197  */
2198  if (d2h->fis_type != FIS_REGD2H) {
2199  d2h->fis_type = FIS_REGD2H;
2200  d2h->flags = (1 << 6);
2201  d2h->status = 0x50;
2202  d2h->error = 0;
2203  d2h->lbal = 0;
2204  d2h->byte_count_low = len & 0xff;
2205  d2h->byte_count_high = len >> 8;
2206  d2h->device = 0xa0;
2207  d2h->lbal_exp = 0;
2208  d2h->lbam_exp = 0;
2209  d2h->lbah_exp = 0;
2210  d2h->_r_a = 0;
2211  d2h->sector_count = 0x3;
2212  d2h->sector_count_exp = 0;
2213  d2h->_r_b = 0;
2214  d2h->_r_c = 0;
2215  d2h->_r_d = 0;
2216  }
2217 
2220  status = ireq->sci_status;
2221 
2222  /* the hw will have suspended the rnc, so complete the
2223  * request upon pending resume
2224  */
2225  sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
2226  break;
2227  }
2229  /* In this case, there is no UF coming after.
2230  * compelte the IO now.
2231  */
2233  ireq->sci_status = SCI_SUCCESS;
2234  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2235  break;
2236 
2237  default:
2238  if (d2h->fis_type == FIS_REGD2H) {
2239  /* UF received change the device state to ATAPI_ERROR */
2240  status = ireq->sci_status;
2241  sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
2242  } else {
2243  /* If receiving any non-success TC status, no UF
2244  * received yet, then an UF for the status fis
2245  * is coming after (XXX: suspect this is
2246  * actually a protocol error or a bug like the
2247  * DONE_UNEXP_FIS case)
2248  */
2251 
2252  sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
2253  }
2254  break;
2255  }
2256 
2257  return status;
2258 }
2259 
2260 static int sci_request_smp_completion_status_is_tx_suspend(
2261  unsigned int completion_status)
2262 {
2263  switch (completion_status) {
2270  return 1;
2271  }
2272  return 0;
2273 }
2274 
2275 static int sci_request_smp_completion_status_is_tx_rx_suspend(
2276  unsigned int completion_status)
2277 {
2278  return 0; /* There are no Tx/Rx SMP suspend conditions. */
2279 }
2280 
2281 static int sci_request_ssp_completion_status_is_tx_suspend(
2282  unsigned int completion_status)
2283 {
2284  switch (completion_status) {
2286  case SCU_TASK_DONE_LF_ERR:
2296  return 1;
2297  }
2298  return 0;
2299 }
2300 
2301 static int sci_request_ssp_completion_status_is_tx_rx_suspend(
2302  unsigned int completion_status)
2303 {
2304  return 0; /* There are no Tx/Rx SSP suspend conditions. */
2305 }
2306 
2307 static int sci_request_stpsata_completion_status_is_tx_suspend(
2308  unsigned int completion_status)
2309 {
2310  switch (completion_status) {
2313  case SCU_TASK_DONE_LL_PERR:
2314  case SCU_TASK_DONE_REG_ERR:
2315  case SCU_TASK_DONE_SDB_ERR:
2325  return 1;
2326  }
2327  return 0;
2328 }
2329 
2330 
2331 static int sci_request_stpsata_completion_status_is_tx_rx_suspend(
2332  unsigned int completion_status)
2333 {
2334  switch (completion_status) {
2335  case SCU_TASK_DONE_LF_ERR:
2343  return 1;
2344  }
2345  return 0;
2346 }
2347 
2348 static void sci_request_handle_suspending_completions(
2349  struct isci_request *ireq,
2350  u32 completion_code)
2351 {
2352  int is_tx = 0;
2353  int is_tx_rx = 0;
2354 
2355  switch (ireq->protocol) {
2356  case SAS_PROTOCOL_SMP:
2357  is_tx = sci_request_smp_completion_status_is_tx_suspend(
2358  completion_code);
2359  is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend(
2360  completion_code);
2361  break;
2362  case SAS_PROTOCOL_SSP:
2363  is_tx = sci_request_ssp_completion_status_is_tx_suspend(
2364  completion_code);
2365  is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend(
2366  completion_code);
2367  break;
2368  case SAS_PROTOCOL_STP:
2369  is_tx = sci_request_stpsata_completion_status_is_tx_suspend(
2370  completion_code);
2371  is_tx_rx =
2372  sci_request_stpsata_completion_status_is_tx_rx_suspend(
2373  completion_code);
2374  break;
2375  default:
2376  dev_warn(&ireq->isci_host->pdev->dev,
2377  "%s: request %p has no valid protocol\n",
2378  __func__, ireq);
2379  break;
2380  }
2381  if (is_tx || is_tx_rx) {
2382  BUG_ON(is_tx && is_tx_rx);
2383 
2385  &ireq->target_device->rnc,
2387  (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX
2389  }
2390 }
2391 
2392 enum sci_status
2394  u32 completion_code)
2395 {
2396  enum sci_base_request_states state;
2397  struct isci_host *ihost = ireq->owning_controller;
2398 
2399  state = ireq->sm.current_state_id;
2400 
2401  /* Decode those completions that signal upcoming suspension events. */
2402  sci_request_handle_suspending_completions(
2403  ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code));
2404 
2405  switch (state) {
2406  case SCI_REQ_STARTED:
2407  return request_started_state_tc_event(ireq, completion_code);
2408 
2409  case SCI_REQ_TASK_WAIT_TC_COMP:
2410  return ssp_task_request_await_tc_event(ireq,
2411  completion_code);
2412 
2413  case SCI_REQ_SMP_WAIT_RESP:
2414  return smp_request_await_response_tc_event(ireq,
2415  completion_code);
2416 
2417  case SCI_REQ_SMP_WAIT_TC_COMP:
2418  return smp_request_await_tc_event(ireq, completion_code);
2419 
2420  case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
2421  return stp_request_udma_await_tc_event(ireq,
2422  completion_code);
2423 
2424  case SCI_REQ_STP_NON_DATA_WAIT_H2D:
2425  return stp_request_non_data_await_h2d_tc_event(ireq,
2426  completion_code);
2427 
2428  case SCI_REQ_STP_PIO_WAIT_H2D:
2429  return stp_request_pio_await_h2d_completion_tc_event(ireq,
2430  completion_code);
2431 
2432  case SCI_REQ_STP_PIO_DATA_OUT:
2433  return pio_data_out_tx_done_tc_event(ireq, completion_code);
2434 
2435  case SCI_REQ_ABORTING:
2436  return request_aborting_state_tc_event(ireq,
2437  completion_code);
2438 
2439  case SCI_REQ_ATAPI_WAIT_H2D:
2440  return atapi_raw_completion(ireq, completion_code,
2441  SCI_REQ_ATAPI_WAIT_PIO_SETUP);
2442 
2443  case SCI_REQ_ATAPI_WAIT_TC_COMP:
2444  return atapi_raw_completion(ireq, completion_code,
2445  SCI_REQ_ATAPI_WAIT_D2H);
2446 
2447  case SCI_REQ_ATAPI_WAIT_D2H:
2448  return atapi_data_tc_completion_handler(ireq, completion_code);
2449 
2450  default:
2451  dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n",
2452  __func__, completion_code, req_state_name(state));
2454  }
2455 }
2456 
2467 static void isci_request_process_response_iu(
2468  struct sas_task *task,
2469  struct ssp_response_iu *resp_iu,
2470  struct device *dev)
2471 {
2472  dev_dbg(dev,
2473  "%s: resp_iu = %p "
2474  "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2475  "resp_iu->response_data_len = %x, "
2476  "resp_iu->sense_data_len = %x\nrepsonse data: ",
2477  __func__,
2478  resp_iu,
2479  resp_iu->status,
2480  resp_iu->datapres,
2481  resp_iu->response_data_len,
2482  resp_iu->sense_data_len);
2483 
2484  task->task_status.stat = resp_iu->status;
2485 
2486  /* libsas updates the task status fields based on the response iu. */
2487  sas_ssp_task_response(dev, task, resp_iu);
2488 }
2489 
2501 static void isci_request_set_open_reject_status(
2502  struct isci_request *request,
2503  struct sas_task *task,
2504  enum service_response *response_ptr,
2505  enum exec_status *status_ptr,
2506  enum sas_open_rej_reason open_rej_reason)
2507 {
2508  /* Task in the target is done. */
2510  *response_ptr = SAS_TASK_UNDELIVERED;
2511  *status_ptr = SAS_OPEN_REJECT;
2512  task->task_status.open_rej_reason = open_rej_reason;
2513 }
2514 
2524 static void isci_request_handle_controller_specific_errors(
2525  struct isci_remote_device *idev,
2526  struct isci_request *request,
2527  struct sas_task *task,
2528  enum service_response *response_ptr,
2529  enum exec_status *status_ptr)
2530 {
2531  unsigned int cstatus;
2532 
2533  cstatus = request->scu_status;
2534 
2535  dev_dbg(&request->isci_host->pdev->dev,
2536  "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2537  "- controller status = 0x%x\n",
2538  __func__, request, cstatus);
2539 
2540  /* Decode the controller-specific errors; most
2541  * important is to recognize those conditions in which
2542  * the target may still have a task outstanding that
2543  * must be aborted.
2544  *
2545  * Note that there are SCU completion codes being
2546  * named in the decode below for which SCIC has already
2547  * done work to handle them in a way other than as
2548  * a controller-specific completion code; these are left
2549  * in the decode below for completeness sake.
2550  */
2551  switch (cstatus) {
2553  /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2555  /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2556  if (task->task_proto == SAS_PROTOCOL_SMP) {
2557  /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2558  *response_ptr = SAS_TASK_COMPLETE;
2559 
2560  /* See if the device has been/is being stopped. Note
2561  * that we ignore the quiesce state, since we are
2562  * concerned about the actual device state.
2563  */
2564  if (!idev)
2565  *status_ptr = SAS_DEVICE_UNKNOWN;
2566  else
2567  *status_ptr = SAS_ABORTED_TASK;
2568 
2570  } else {
2571  /* Task in the target is not done. */
2572  *response_ptr = SAS_TASK_UNDELIVERED;
2573 
2574  if (!idev)
2575  *status_ptr = SAS_DEVICE_UNKNOWN;
2576  else
2577  *status_ptr = SAM_STAT_TASK_ABORTED;
2578 
2580  }
2581 
2582  break;
2583 
2584  case SCU_TASK_DONE_CRC_ERR:
2588  /* Also SCU_TASK_DONE_UNEXP_RESP: */
2589  case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
2590  case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
2591  case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
2592  /* These are conditions in which the target
2593  * has completed the task, so that no cleanup
2594  * is necessary.
2595  */
2596  *response_ptr = SAS_TASK_COMPLETE;
2597 
2598  /* See if the device has been/is being stopped. Note
2599  * that we ignore the quiesce state, since we are
2600  * concerned about the actual device state.
2601  */
2602  if (!idev)
2603  *status_ptr = SAS_DEVICE_UNKNOWN;
2604  else
2605  *status_ptr = SAS_ABORTED_TASK;
2606 
2608  break;
2609 
2610 
2611  /* Note that the only open reject completion codes seen here will be
2612  * abandon-class codes; all others are automatically retried in the SCU.
2613  */
2615 
2616  isci_request_set_open_reject_status(
2617  request, task, response_ptr, status_ptr,
2619  break;
2620 
2622 
2623  /* Note - the return of AB0 will change when
2624  * libsas implements detection of zone violations.
2625  */
2626  isci_request_set_open_reject_status(
2627  request, task, response_ptr, status_ptr,
2629  break;
2630 
2632 
2633  isci_request_set_open_reject_status(
2634  request, task, response_ptr, status_ptr,
2636  break;
2637 
2639 
2640  isci_request_set_open_reject_status(
2641  request, task, response_ptr, status_ptr,
2643  break;
2644 
2646 
2647  isci_request_set_open_reject_status(
2648  request, task, response_ptr, status_ptr,
2650  break;
2651 
2653 
2654  isci_request_set_open_reject_status(
2655  request, task, response_ptr, status_ptr,
2657  break;
2658 
2660 
2661  isci_request_set_open_reject_status(
2662  request, task, response_ptr, status_ptr,
2664  break;
2665 
2667 
2668  isci_request_set_open_reject_status(
2669  request, task, response_ptr, status_ptr,
2670  SAS_OREJ_EPROTO);
2671  break;
2672 
2674 
2675  isci_request_set_open_reject_status(
2676  request, task, response_ptr, status_ptr,
2678  break;
2679 
2681  /* Also SCU_TASK_DONE_ACK_NAK_TO: */
2682  case SCU_TASK_DONE_LL_PERR:
2684  /* Also SCU_TASK_DONE_NAK_ERR:*/
2686  /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2689  /* Also SCU_TASK_DONE_UNEXP_XR: */
2692  /* Also SCU_TASK_DONE_XR_WD_LEN: */
2696  case SCU_TASK_DONE_LF_ERR:
2697  case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
2701  case SCU_TASK_DONE_REG_ERR:
2702  case SCU_TASK_DONE_SDB_ERR:
2704  default:
2705  /* Task in the target is not done. */
2706  *response_ptr = SAS_TASK_UNDELIVERED;
2707  *status_ptr = SAM_STAT_TASK_ABORTED;
2708 
2709  if (task->task_proto == SAS_PROTOCOL_SMP)
2711  else
2713  break;
2714  }
2715 }
2716 
2717 static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
2718 {
2719  struct task_status_struct *ts = &task->task_status;
2720  struct ata_task_resp *resp = (void *)&ts->buf[0];
2721 
2722  resp->frame_len = sizeof(*fis);
2723  memcpy(resp->ending_fis, fis, sizeof(*fis));
2724  ts->buf_valid_size = sizeof(*resp);
2725 
2726  /* If the device fault bit is set in the status register, then
2727  * set the sense data and return.
2728  */
2729  if (fis->status & ATA_DF)
2730  ts->stat = SAS_PROTO_RESPONSE;
2731  else if (fis->status & ATA_ERR)
2733  else
2734  ts->stat = SAM_STAT_GOOD;
2735 
2736  ts->resp = SAS_TASK_COMPLETE;
2737 }
2738 
2739 static void isci_request_io_request_complete(struct isci_host *ihost,
2740  struct isci_request *request,
2741  enum sci_io_status completion_status)
2742 {
2743  struct sas_task *task = isci_request_access_task(request);
2744  struct ssp_response_iu *resp_iu;
2745  unsigned long task_flags;
2746  struct isci_remote_device *idev = request->target_device;
2748  enum exec_status status = SAS_ABORTED_TASK;
2749 
2750  dev_dbg(&ihost->pdev->dev,
2751  "%s: request = %p, task = %p, "
2752  "task->data_dir = %d completion_status = 0x%x\n",
2753  __func__, request, task, task->data_dir, completion_status);
2754 
2755  /* The request is done from an SCU HW perspective. */
2756 
2757  /* This is an active request being completed from the core. */
2758  switch (completion_status) {
2759 
2761  dev_dbg(&ihost->pdev->dev,
2762  "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2763  __func__, request, task);
2764 
2765  if (sas_protocol_ata(task->task_proto)) {
2766  isci_process_stp_response(task, &request->stp.rsp);
2767  } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2768 
2769  /* crack the iu response buffer. */
2770  resp_iu = &request->ssp.rsp;
2771  isci_request_process_response_iu(task, resp_iu,
2772  &ihost->pdev->dev);
2773 
2774  } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2775 
2776  dev_err(&ihost->pdev->dev,
2777  "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2778  "SAS_PROTOCOL_SMP protocol\n",
2779  __func__);
2780 
2781  } else
2782  dev_err(&ihost->pdev->dev,
2783  "%s: unknown protocol\n", __func__);
2784 
2785  /* use the task status set in the task struct by the
2786  * isci_request_process_response_iu call.
2787  */
2789  response = task->task_status.resp;
2790  status = task->task_status.stat;
2791  break;
2792 
2793  case SCI_IO_SUCCESS:
2795 
2796  response = SAS_TASK_COMPLETE;
2797  status = SAM_STAT_GOOD;
2799 
2800  if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2801 
2802  /* This was an SSP / STP / SATA transfer.
2803  * There is a possibility that less data than
2804  * the maximum was transferred.
2805  */
2806  u32 transferred_length = sci_req_tx_bytes(request);
2807 
2808  task->task_status.residual
2809  = task->total_xfer_len - transferred_length;
2810 
2811  /* If there were residual bytes, call this an
2812  * underrun.
2813  */
2814  if (task->task_status.residual != 0)
2815  status = SAS_DATA_UNDERRUN;
2816 
2817  dev_dbg(&ihost->pdev->dev,
2818  "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2819  __func__, status);
2820 
2821  } else
2822  dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n",
2823  __func__);
2824  break;
2825 
2827 
2828  dev_dbg(&ihost->pdev->dev,
2829  "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2830  __func__, request, task);
2831 
2832  /* The request was terminated explicitly. */
2834  response = SAS_TASK_UNDELIVERED;
2835 
2836  /* See if the device has been/is being stopped. Note
2837  * that we ignore the quiesce state, since we are
2838  * concerned about the actual device state.
2839  */
2840  if (!idev)
2841  status = SAS_DEVICE_UNKNOWN;
2842  else
2843  status = SAS_ABORTED_TASK;
2844  break;
2845 
2847 
2848  isci_request_handle_controller_specific_errors(idev, request,
2849  task, &response,
2850  &status);
2851  break;
2852 
2854  /* This is a special case, in that the I/O completion
2855  * is telling us that the device needs a reset.
2856  * In order for the device reset condition to be
2857  * noticed, the I/O has to be handled in the error
2858  * handler. Set the reset flag and cause the
2859  * SCSI error thread to be scheduled.
2860  */
2861  spin_lock_irqsave(&task->task_state_lock, task_flags);
2863  spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2864 
2865  /* Fail the I/O. */
2866  response = SAS_TASK_UNDELIVERED;
2867  status = SAM_STAT_TASK_ABORTED;
2868 
2870  break;
2871 
2873 
2874  /* Fail the I/O so it can be retried. */
2875  response = SAS_TASK_UNDELIVERED;
2876  if (!idev)
2877  status = SAS_DEVICE_UNKNOWN;
2878  else
2879  status = SAS_ABORTED_TASK;
2880 
2882  break;
2883 
2884 
2885  default:
2886  /* Catch any otherwise unhandled error codes here. */
2887  dev_dbg(&ihost->pdev->dev,
2888  "%s: invalid completion code: 0x%x - "
2889  "isci_request = %p\n",
2890  __func__, completion_status, request);
2891 
2892  response = SAS_TASK_UNDELIVERED;
2893 
2894  /* See if the device has been/is being stopped. Note
2895  * that we ignore the quiesce state, since we are
2896  * concerned about the actual device state.
2897  */
2898  if (!idev)
2899  status = SAS_DEVICE_UNKNOWN;
2900  else
2901  status = SAS_ABORTED_TASK;
2902 
2903  if (SAS_PROTOCOL_SMP == task->task_proto)
2905  else
2907  break;
2908  }
2909 
2910  switch (task->task_proto) {
2911  case SAS_PROTOCOL_SSP:
2912  if (task->data_dir == DMA_NONE)
2913  break;
2914  if (task->num_scatter == 0)
2915  /* 0 indicates a single dma address */
2916  dma_unmap_single(&ihost->pdev->dev,
2917  request->zero_scatter_daddr,
2918  task->total_xfer_len, task->data_dir);
2919  else /* unmap the sgl dma addresses */
2920  dma_unmap_sg(&ihost->pdev->dev, task->scatter,
2921  request->num_sg_entries, task->data_dir);
2922  break;
2923  case SAS_PROTOCOL_SMP: {
2924  struct scatterlist *sg = &task->smp_task.smp_req;
2925  struct smp_req *smp_req;
2926  void *kaddr;
2927 
2928  dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
2929 
2930  /* need to swab it back in case the command buffer is re-used */
2931  kaddr = kmap_atomic(sg_page(sg));
2932  smp_req = kaddr + sg->offset;
2933  sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
2934  kunmap_atomic(kaddr);
2935  break;
2936  }
2937  default:
2938  break;
2939  }
2940 
2941  spin_lock_irqsave(&task->task_state_lock, task_flags);
2942 
2943  task->task_status.resp = response;
2944  task->task_status.stat = status;
2945 
2946  if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) {
2947  /* Normal notification (task_done) */
2951  }
2952  spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2953 
2954  /* complete the io request to the core. */
2955  sci_controller_complete_io(ihost, request->target_device, request);
2956 
2957  /* set terminated handle so it cannot be completed or
2958  * terminated again, and to cause any calls into abort
2959  * task to recognize the already completed case.
2960  */
2961  set_bit(IREQ_TERMINATED, &request->flags);
2962 
2963  ireq_done(ihost, request, task);
2964 }
2965 
2966 static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
2967 {
2968  struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2969  struct domain_device *dev = ireq->target_device->domain_dev;
2970  enum sci_base_request_states state;
2971  struct sas_task *task;
2972 
2973  /* XXX as hch said always creating an internal sas_task for tmf
2974  * requests would simplify the driver
2975  */
2976  task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq);
2977 
2978  /* all unaccelerated request types (non ssp or ncq) handled with
2979  * substates
2980  */
2981  if (!task && dev->dev_type == SAS_END_DEV) {
2982  state = SCI_REQ_TASK_WAIT_TC_COMP;
2983  } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
2984  state = SCI_REQ_SMP_WAIT_RESP;
2985  } else if (task && sas_protocol_ata(task->task_proto) &&
2986  !task->ata_task.use_ncq) {
2987  if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
2988  task->ata_task.fis.command == ATA_CMD_PACKET) {
2989  state = SCI_REQ_ATAPI_WAIT_H2D;
2990  } else if (task->data_dir == DMA_NONE) {
2991  state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
2992  } else if (task->ata_task.dma_xfer) {
2993  state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
2994  } else /* PIO */ {
2995  state = SCI_REQ_STP_PIO_WAIT_H2D;
2996  }
2997  } else {
2998  /* SSP or NCQ are fully accelerated, no substates */
2999  return;
3000  }
3001  sci_change_state(sm, state);
3002 }
3003 
3004 static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
3005 {
3006  struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3007  struct isci_host *ihost = ireq->owning_controller;
3008 
3009  /* Tell the SCI_USER that the IO request is complete */
3010  if (!test_bit(IREQ_TMF, &ireq->flags))
3011  isci_request_io_request_complete(ihost, ireq,
3012  ireq->sci_status);
3013  else
3014  isci_task_request_complete(ihost, ireq, ireq->sci_status);
3015 }
3016 
3017 static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
3018 {
3019  struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3020 
3021  /* Setting the abort bit in the Task Context is required by the silicon. */
3022  ireq->tc->abort = 1;
3023 }
3024 
3025 static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3026 {
3027  struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3028 
3029  ireq->target_device->working_request = ireq;
3030 }
3031 
3032 static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3033 {
3034  struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3035 
3036  ireq->target_device->working_request = ireq;
3037 }
3038 
3039 static const struct sci_base_state sci_request_state_table[] = {
3040  [SCI_REQ_INIT] = { },
3041  [SCI_REQ_CONSTRUCTED] = { },
3042  [SCI_REQ_STARTED] = {
3043  .enter_state = sci_request_started_state_enter,
3044  },
3045  [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
3046  .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
3047  },
3048  [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
3049  [SCI_REQ_STP_PIO_WAIT_H2D] = {
3050  .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
3051  },
3052  [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
3053  [SCI_REQ_STP_PIO_DATA_IN] = { },
3054  [SCI_REQ_STP_PIO_DATA_OUT] = { },
3055  [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
3056  [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
3057  [SCI_REQ_TASK_WAIT_TC_COMP] = { },
3058  [SCI_REQ_TASK_WAIT_TC_RESP] = { },
3059  [SCI_REQ_SMP_WAIT_RESP] = { },
3060  [SCI_REQ_SMP_WAIT_TC_COMP] = { },
3061  [SCI_REQ_ATAPI_WAIT_H2D] = { },
3062  [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { },
3063  [SCI_REQ_ATAPI_WAIT_D2H] = { },
3064  [SCI_REQ_ATAPI_WAIT_TC_COMP] = { },
3065  [SCI_REQ_COMPLETED] = {
3066  .enter_state = sci_request_completed_state_enter,
3067  },
3068  [SCI_REQ_ABORTING] = {
3069  .enter_state = sci_request_aborting_state_enter,
3070  },
3071  [SCI_REQ_FINAL] = { },
3072 };
3073 
3074 static void
3075 sci_general_request_construct(struct isci_host *ihost,
3076  struct isci_remote_device *idev,
3077  struct isci_request *ireq)
3078 {
3079  sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
3080 
3081  ireq->target_device = idev;
3082  ireq->protocol = SAS_PROTOCOL_NONE;
3084 
3085  ireq->sci_status = SCI_SUCCESS;
3086  ireq->scu_status = 0;
3087  ireq->post_context = 0xFFFFFFFF;
3088 }
3089 
3090 static enum sci_status
3091 sci_io_request_construct(struct isci_host *ihost,
3092  struct isci_remote_device *idev,
3093  struct isci_request *ireq)
3094 {
3095  struct domain_device *dev = idev->domain_dev;
3096  enum sci_status status = SCI_SUCCESS;
3097 
3098  /* Build the common part of the request */
3099  sci_general_request_construct(ihost, idev, ireq);
3100 
3101  if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
3103 
3104  if (dev->dev_type == SAS_END_DEV)
3105  /* pass */;
3106  else if (dev_is_sata(dev))
3107  memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
3108  else if (dev_is_expander(dev))
3109  /* pass */;
3110  else
3112 
3113  memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
3114 
3115  return status;
3116 }
3117 
3119  struct isci_remote_device *idev,
3120  u16 io_tag, struct isci_request *ireq)
3121 {
3122  struct domain_device *dev = idev->domain_dev;
3123  enum sci_status status = SCI_SUCCESS;
3124 
3125  /* Build the common part of the request */
3126  sci_general_request_construct(ihost, idev, ireq);
3127 
3128  if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) {
3129  set_bit(IREQ_TMF, &ireq->flags);
3130  memset(ireq->tc, 0, sizeof(struct scu_task_context));
3131 
3132  /* Set the protocol indicator. */
3133  if (dev_is_sata(dev))
3134  ireq->protocol = SAS_PROTOCOL_STP;
3135  else
3136  ireq->protocol = SAS_PROTOCOL_SSP;
3137  } else
3139 
3140  return status;
3141 }
3142 
3143 static enum sci_status isci_request_ssp_request_construct(
3144  struct isci_request *request)
3145 {
3146  enum sci_status status;
3147 
3148  dev_dbg(&request->isci_host->pdev->dev,
3149  "%s: request = %p\n",
3150  __func__,
3151  request);
3152  status = sci_io_request_construct_basic_ssp(request);
3153  return status;
3154 }
3155 
3156 static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
3157 {
3158  struct sas_task *task = isci_request_access_task(ireq);
3159  struct host_to_dev_fis *fis = &ireq->stp.cmd;
3160  struct ata_queued_cmd *qc = task->uldd_task;
3161  enum sci_status status;
3162 
3163  dev_dbg(&ireq->isci_host->pdev->dev,
3164  "%s: ireq = %p\n",
3165  __func__,
3166  ireq);
3167 
3168  memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
3169  if (!task->ata_task.device_control_reg_update)
3170  fis->flags |= 0x80;
3171  fis->flags &= 0xF0;
3172 
3173  status = sci_io_request_construct_basic_sata(ireq);
3174 
3175  if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
3176  qc->tf.command == ATA_CMD_FPDMA_READ)) {
3177  fis->sector_count = qc->tag << 3;
3178  ireq->tc->type.stp.ncq_tag = qc->tag;
3179  }
3180 
3181  return status;
3182 }
3183 
3184 static enum sci_status
3185 sci_io_request_construct_smp(struct device *dev,
3186  struct isci_request *ireq,
3187  struct sas_task *task)
3188 {
3189  struct scatterlist *sg = &task->smp_task.smp_req;
3190  struct isci_remote_device *idev;
3192  struct isci_port *iport;
3193  struct smp_req *smp_req;
3194  void *kaddr;
3195  u8 req_len;
3196  u32 cmd;
3197 
3198  kaddr = kmap_atomic(sg_page(sg));
3199  smp_req = kaddr + sg->offset;
3200  /*
3201  * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3202  * functions under SAS 2.0, a zero request length really indicates
3203  * a non-zero default length.
3204  */
3205  if (smp_req->req_len == 0) {
3206  switch (smp_req->func) {
3207  case SMP_DISCOVER:
3209  case SMP_REPORT_PHY_SATA:
3210  case SMP_REPORT_ROUTE_INFO:
3211  smp_req->req_len = 2;
3212  break;
3213  case SMP_CONF_ROUTE_INFO:
3214  case SMP_PHY_CONTROL:
3215  case SMP_PHY_TEST_FUNCTION:
3216  smp_req->req_len = 9;
3217  break;
3218  /* Default - zero is a valid default for 2.0. */
3219  }
3220  }
3221  req_len = smp_req->req_len;
3222  sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3223  cmd = *(u32 *) smp_req;
3224  kunmap_atomic(kaddr);
3225 
3226  if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3227  return SCI_FAILURE;
3228 
3229  ireq->protocol = SAS_PROTOCOL_SMP;
3230 
3231  /* byte swap the smp request. */
3232 
3233  task_context = ireq->tc;
3234 
3235  idev = ireq->target_device;
3236  iport = idev->owning_port;
3237 
3238  /*
3239  * Fill in the TC with the its required data
3240  * 00h
3241  */
3242  task_context->priority = 0;
3243  task_context->initiator_request = 1;
3244  task_context->connection_rate = idev->connection_rate;
3245  task_context->protocol_engine_index = ISCI_PEG;
3246  task_context->logical_port_index = iport->physical_port_index;
3248  task_context->abort = 0;
3249  task_context->valid = SCU_TASK_CONTEXT_VALID;
3250  task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3251 
3252  /* 04h */
3253  task_context->remote_node_index = idev->rnc.remote_node_index;
3254  task_context->command_code = 0;
3255  task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3256 
3257  /* 08h */
3258  task_context->link_layer_control = 0;
3259  task_context->do_not_dma_ssp_good_response = 1;
3260  task_context->strict_ordering = 0;
3261  task_context->control_frame = 1;
3262  task_context->timeout_enable = 0;
3263  task_context->block_guard_enable = 0;
3264 
3265  /* 0ch */
3266  task_context->address_modifier = 0;
3267 
3268  /* 10h */
3269  task_context->ssp_command_iu_length = req_len;
3270 
3271  /* 14h */
3272  task_context->transfer_length_bytes = 0;
3273 
3274  /*
3275  * 18h ~ 30h, protocol specific
3276  * since commandIU has been build by framework at this point, we just
3277  * copy the frist DWord from command IU to this location. */
3278  memcpy(&task_context->type.smp, &cmd, sizeof(u32));
3279 
3280  /*
3281  * 40h
3282  * "For SMP you could program it to zero. We would prefer that way
3283  * so that done code will be consistent." - Venki
3284  */
3285  task_context->task_phase = 0;
3286 
3289  (iport->physical_port_index <<
3291  ISCI_TAG_TCI(ireq->io_tag));
3292  /*
3293  * Copy the physical address for the command buffer to the SCU Task
3294  * Context command buffer should not contain command header.
3295  */
3296  task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
3297  task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
3298 
3299  /* SMP response comes as UF, so no need to set response IU address. */
3300  task_context->response_iu_upper = 0;
3301  task_context->response_iu_lower = 0;
3302 
3303  sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
3304 
3305  return SCI_SUCCESS;
3306 }
3307 
3308 /*
3309  * isci_smp_request_build() - This function builds the smp request.
3310  * @ireq: This parameter points to the isci_request allocated in the
3311  * request construct function.
3312  *
3313  * SCI_SUCCESS on successfull completion, or specific failure code.
3314  */
3315 static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3316 {
3317  struct sas_task *task = isci_request_access_task(ireq);
3318  struct device *dev = &ireq->isci_host->pdev->dev;
3319  enum sci_status status = SCI_FAILURE;
3320 
3321  status = sci_io_request_construct_smp(dev, ireq, task);
3322  if (status != SCI_SUCCESS)
3323  dev_dbg(&ireq->isci_host->pdev->dev,
3324  "%s: failed with status = %d\n",
3325  __func__,
3326  status);
3327 
3328  return status;
3329 }
3330 
3341 static enum sci_status isci_io_request_build(struct isci_host *ihost,
3342  struct isci_request *request,
3343  struct isci_remote_device *idev)
3344 {
3345  enum sci_status status = SCI_SUCCESS;
3346  struct sas_task *task = isci_request_access_task(request);
3347 
3348  dev_dbg(&ihost->pdev->dev,
3349  "%s: idev = 0x%p; request = %p, "
3350  "num_scatter = %d\n",
3351  __func__,
3352  idev,
3353  request,
3354  task->num_scatter);
3355 
3356  /* map the sgl addresses, if present.
3357  * libata does the mapping for sata devices
3358  * before we get the request.
3359  */
3360  if (task->num_scatter &&
3361  !sas_protocol_ata(task->task_proto) &&
3362  !(SAS_PROTOCOL_SMP & task->task_proto)) {
3363 
3364  request->num_sg_entries = dma_map_sg(
3365  &ihost->pdev->dev,
3366  task->scatter,
3367  task->num_scatter,
3368  task->data_dir
3369  );
3370 
3371  if (request->num_sg_entries == 0)
3373  }
3374 
3375  status = sci_io_request_construct(ihost, idev, request);
3376 
3377  if (status != SCI_SUCCESS) {
3378  dev_dbg(&ihost->pdev->dev,
3379  "%s: failed request construct\n",
3380  __func__);
3381  return SCI_FAILURE;
3382  }
3383 
3384  switch (task->task_proto) {
3385  case SAS_PROTOCOL_SMP:
3386  status = isci_smp_request_build(request);
3387  break;
3388  case SAS_PROTOCOL_SSP:
3389  status = isci_request_ssp_request_construct(request);
3390  break;
3391  case SAS_PROTOCOL_SATA:
3392  case SAS_PROTOCOL_STP:
3394  status = isci_request_stp_request_construct(request);
3395  break;
3396  default:
3397  dev_dbg(&ihost->pdev->dev,
3398  "%s: unknown protocol\n", __func__);
3399  return SCI_FAILURE;
3400  }
3401 
3402  return SCI_SUCCESS;
3403 }
3404 
3405 static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
3406 {
3407  struct isci_request *ireq;
3408 
3409  ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
3410  ireq->io_tag = tag;
3411  ireq->io_request_completion = NULL;
3412  ireq->flags = 0;
3413  ireq->num_sg_entries = 0;
3414 
3415  return ireq;
3416 }
3417 
3418 static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
3419  struct sas_task *task,
3420  u16 tag)
3421 {
3422  struct isci_request *ireq;
3423 
3424  ireq = isci_request_from_tag(ihost, tag);
3425  ireq->ttype_ptr.io_task_ptr = task;
3426  clear_bit(IREQ_TMF, &ireq->flags);
3427  task->lldd_task = ireq;
3428 
3429  return ireq;
3430 }
3431 
3433  struct isci_tmf *isci_tmf,
3434  u16 tag)
3435 {
3436  struct isci_request *ireq;
3437 
3438  ireq = isci_request_from_tag(ihost, tag);
3439  ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
3440  set_bit(IREQ_TMF, &ireq->flags);
3441 
3442  return ireq;
3443 }
3444 
3445 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
3446  struct sas_task *task, u16 tag)
3447 {
3449  struct isci_request *ireq;
3450  unsigned long flags;
3451  int ret = 0;
3452 
3453  /* do common allocation and init of request object. */
3454  ireq = isci_io_request_from_tag(ihost, task, tag);
3455 
3456  status = isci_io_request_build(ihost, ireq, idev);
3457  if (status != SCI_SUCCESS) {
3458  dev_dbg(&ihost->pdev->dev,
3459  "%s: request_construct failed - status = 0x%x\n",
3460  __func__,
3461  status);
3462  return status;
3463  }
3464 
3465  spin_lock_irqsave(&ihost->scic_lock, flags);
3466 
3467  if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
3468 
3469  if (isci_task_is_ncq_recovery(task)) {
3470 
3471  /* The device is in an NCQ recovery state. Issue the
3472  * request on the task side. Note that it will
3473  * complete on the I/O request side because the
3474  * request was built that way (ie.
3475  * ireq->is_task_management_request is false).
3476  */
3477  status = sci_controller_start_task(ihost,
3478  idev,
3479  ireq);
3480  } else {
3481  status = SCI_FAILURE;
3482  }
3483  } else {
3484  /* send the request, let the core assign the IO TAG. */
3485  status = sci_controller_start_io(ihost, idev,
3486  ireq);
3487  }
3488 
3489  if (status != SCI_SUCCESS &&
3491  dev_dbg(&ihost->pdev->dev,
3492  "%s: failed request start (0x%x)\n",
3493  __func__, status);
3494  spin_unlock_irqrestore(&ihost->scic_lock, flags);
3495  return status;
3496  }
3497  /* Either I/O started OK, or the core has signaled that
3498  * the device needs a target reset.
3499  */
3500  if (status != SCI_SUCCESS) {
3501  /* The request did not really start in the
3502  * hardware, so clear the request handle
3503  * here so no terminations will be done.
3504  */
3505  set_bit(IREQ_TERMINATED, &ireq->flags);
3506  }
3507  spin_unlock_irqrestore(&ihost->scic_lock, flags);
3508 
3509  if (status ==
3511  /* Signal libsas that we need the SCSI error
3512  * handler thread to work on this I/O and that
3513  * we want a device reset.
3514  */
3515  spin_lock_irqsave(&task->task_state_lock, flags);
3517  spin_unlock_irqrestore(&task->task_state_lock, flags);
3518 
3519  /* Cause this task to be scheduled in the SCSI error
3520  * handler thread.
3521  */
3522  sas_task_abort(task);
3523 
3524  /* Change the status, since we are holding
3525  * the I/O until it is managed by the SCSI
3526  * error handler.
3527  */
3528  status = SCI_SUCCESS;
3529  }
3530 
3531  return ret;
3532 }