Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
remote_device.c
Go to the documentation of this file.
1 /*
2  * This file is provided under a dual BSD/GPLv2 license. When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  * * Redistributions of source code must retain the above copyright
34  * notice, this list of conditions and the following disclaimer.
35  * * Redistributions in binary form must reproduce the above copyright
36  * notice, this list of conditions and the following disclaimer in
37  * the documentation and/or other materials provided with the
38  * distribution.
39  * * Neither the name of Intel Corporation nor the names of its
40  * contributors may be used to endorse or promote products derived
41  * from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55 #include <scsi/sas.h>
56 #include <linux/bitops.h>
57 #include "isci.h"
58 #include "port.h"
59 #include "remote_device.h"
60 #include "request.h"
61 #include "remote_node_context.h"
62 #include "scu_event_codes.h"
63 #include "task.h"
64 
65 #undef C
66 #define C(a) (#a)
67 const char *dev_state_name(enum sci_remote_device_states state)
68 {
69  static const char * const strings[] = REMOTE_DEV_STATES;
70 
71  return strings[state];
72 }
73 #undef C
74 
77 {
78  return sci_remote_node_context_suspend(&idev->rnc, reason,
80 }
81 
90 static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
91 {
92  dev_dbg(&ihost->pdev->dev,
93  "%s: idev = %p\n", __func__, idev);
94 
96  set_bit(IDEV_IO_READY, &idev->flags);
98  wake_up(&ihost->eventq);
99 }
100 
101 static enum sci_status sci_remote_device_terminate_req(
102  struct isci_host *ihost,
103  struct isci_remote_device *idev,
104  int check_abort,
105  struct isci_request *ireq)
106 {
107  if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
108  (ireq->target_device != idev) ||
109  (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags)))
110  return SCI_SUCCESS;
111 
112  dev_dbg(&ihost->pdev->dev,
113  "%s: idev=%p; flags=%lx; req=%p; req target=%p\n",
114  __func__, idev, idev->flags, ireq, ireq->target_device);
115 
117 
118  return sci_controller_terminate_request(ihost, idev, ireq);
119 }
120 
121 static enum sci_status sci_remote_device_terminate_reqs_checkabort(
122  struct isci_remote_device *idev,
123  int chk)
124 {
125  struct isci_host *ihost = idev->owning_port->owning_controller;
127  u32 i;
128 
129  for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
130  struct isci_request *ireq = ihost->reqs[i];
131  enum sci_status s;
132 
133  s = sci_remote_device_terminate_req(ihost, idev, chk, ireq);
134  if (s != SCI_SUCCESS)
135  status = s;
136  }
137  return status;
138 }
139 
140 static bool isci_compare_suspendcount(
141  struct isci_remote_device *idev,
142  u32 localcount)
143 {
144  smp_rmb();
145 
146  /* Check for a change in the suspend count, or the RNC
147  * being destroyed.
148  */
149  return (localcount != idev->rnc.suspend_count)
150  || sci_remote_node_context_is_being_destroyed(&idev->rnc);
151 }
152 
153 static bool isci_check_reqterm(
154  struct isci_host *ihost,
155  struct isci_remote_device *idev,
156  struct isci_request *ireq,
157  u32 localcount)
158 {
159  unsigned long flags;
160  bool res;
161 
162  spin_lock_irqsave(&ihost->scic_lock, flags);
163  res = isci_compare_suspendcount(idev, localcount)
165  spin_unlock_irqrestore(&ihost->scic_lock, flags);
166 
167  return res;
168 }
169 
170 static bool isci_check_devempty(
171  struct isci_host *ihost,
172  struct isci_remote_device *idev,
173  u32 localcount)
174 {
175  unsigned long flags;
176  bool res;
177 
178  spin_lock_irqsave(&ihost->scic_lock, flags);
179  res = isci_compare_suspendcount(idev, localcount)
180  && idev->started_request_count == 0;
181  spin_unlock_irqrestore(&ihost->scic_lock, flags);
182 
183  return res;
184 }
185 
187  struct isci_host *ihost,
188  struct isci_remote_device *idev,
189  struct isci_request *ireq)
190 {
191  enum sci_status status = SCI_SUCCESS;
192  unsigned long flags;
193  u32 rnc_suspend_count;
194 
195  spin_lock_irqsave(&ihost->scic_lock, flags);
196 
197  if (isci_get_device(idev) == NULL) {
198  dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n",
199  __func__, idev);
200  spin_unlock_irqrestore(&ihost->scic_lock, flags);
201  status = SCI_FAILURE;
202  } else {
203  /* If already suspended, don't wait for another suspension. */
204  smp_rmb();
205  rnc_suspend_count
207  ? 0 : idev->rnc.suspend_count;
208 
209  dev_dbg(&ihost->pdev->dev,
210  "%s: idev=%p, ireq=%p; started_request_count=%d, "
211  "rnc_suspend_count=%d, rnc.suspend_count=%d"
212  "about to wait\n",
213  __func__, idev, ireq, idev->started_request_count,
214  rnc_suspend_count, idev->rnc.suspend_count);
215 
216  #define MAX_SUSPEND_MSECS 10000
217  if (ireq) {
218  /* Terminate a specific TC. */
220  sci_remote_device_terminate_req(ihost, idev, 0, ireq);
221  spin_unlock_irqrestore(&ihost->scic_lock, flags);
222  if (!wait_event_timeout(ihost->eventq,
223  isci_check_reqterm(ihost, idev, ireq,
224  rnc_suspend_count),
226 
227  dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n",
228  __func__, ihost->id);
229  dev_dbg(&ihost->pdev->dev,
230  "%s: ******* Timeout waiting for "
231  "suspend; idev=%p, current state %s; "
232  "started_request_count=%d, flags=%lx\n\t"
233  "rnc_suspend_count=%d, rnc.suspend_count=%d "
234  "RNC: current state %s, current "
235  "suspend_type %x dest state %d;\n"
236  "ireq=%p, ireq->flags = %lx\n",
237  __func__, idev,
238  dev_state_name(idev->sm.current_state_id),
239  idev->started_request_count, idev->flags,
240  rnc_suspend_count, idev->rnc.suspend_count,
241  rnc_state_name(idev->rnc.sm.current_state_id),
242  idev->rnc.suspend_type,
243  idev->rnc.destination_state,
244  ireq, ireq->flags);
245  }
246  spin_lock_irqsave(&ihost->scic_lock, flags);
248  if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
249  isci_free_tag(ihost, ireq->io_tag);
250  spin_unlock_irqrestore(&ihost->scic_lock, flags);
251  } else {
252  /* Terminate all TCs. */
254  spin_unlock_irqrestore(&ihost->scic_lock, flags);
255  if (!wait_event_timeout(ihost->eventq,
256  isci_check_devempty(ihost, idev,
257  rnc_suspend_count),
259 
260  dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n",
261  __func__, ihost->id);
262  dev_dbg(&ihost->pdev->dev,
263  "%s: ******* Timeout waiting for "
264  "suspend; idev=%p, current state %s; "
265  "started_request_count=%d, flags=%lx\n\t"
266  "rnc_suspend_count=%d, "
267  "RNC: current state %s, "
268  "rnc.suspend_count=%d, current "
269  "suspend_type %x dest state %d\n",
270  __func__, idev,
271  dev_state_name(idev->sm.current_state_id),
272  idev->started_request_count, idev->flags,
273  rnc_suspend_count,
274  rnc_state_name(idev->rnc.sm.current_state_id),
275  idev->rnc.suspend_count,
276  idev->rnc.suspend_type,
277  idev->rnc.destination_state);
278  }
279  }
280  dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n",
281  __func__, idev);
282  isci_put_device(idev);
283  }
284  return status;
285 }
286 
296 static void isci_remote_device_not_ready(struct isci_host *ihost,
297  struct isci_remote_device *idev,
298  u32 reason)
299 {
300  dev_dbg(&ihost->pdev->dev,
301  "%s: isci_device = %p; reason = %d\n", __func__, idev, reason);
302 
303  switch (reason) {
305  set_bit(IDEV_IO_NCQERROR, &idev->flags);
306 
307  /* Suspend the remote device so the I/O can be terminated. */
309 
310  /* Kill all outstanding requests for the device. */
312 
313  /* Fall through into the default case... */
314  default:
315  clear_bit(IDEV_IO_READY, &idev->flags);
316  break;
317  }
318 }
319 
320 /* called once the remote node context is ready to be freed.
321  * The remote device can now report that its stop operation is complete. none
322  */
323 static void rnc_destruct_done(void *_dev)
324 {
325  struct isci_remote_device *idev = _dev;
326 
327  BUG_ON(idev->started_request_count != 0);
328  sci_change_state(&idev->sm, SCI_DEV_STOPPED);
329 }
330 
332  struct isci_remote_device *idev)
333 {
334  return sci_remote_device_terminate_reqs_checkabort(idev, 0);
335 }
336 
338  u32 timeout)
339 {
340  struct sci_base_state_machine *sm = &idev->sm;
341  enum sci_remote_device_states state = sm->current_state_id;
342 
343  switch (state) {
344  case SCI_DEV_INITIAL:
345  case SCI_DEV_FAILED:
346  case SCI_DEV_FINAL:
347  default:
348  dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
349  __func__, dev_state_name(state));
351  case SCI_DEV_STOPPED:
352  return SCI_SUCCESS;
353  case SCI_DEV_STARTING:
354  /* device not started so there had better be no requests */
355  BUG_ON(idev->started_request_count != 0);
357  rnc_destruct_done, idev);
358  /* Transition to the stopping state and wait for the
359  * remote node to complete being posted and invalidated.
360  */
361  sci_change_state(sm, SCI_DEV_STOPPING);
362  return SCI_SUCCESS;
363  case SCI_DEV_READY:
364  case SCI_STP_DEV_IDLE:
365  case SCI_STP_DEV_CMD:
366  case SCI_STP_DEV_NCQ:
367  case SCI_STP_DEV_NCQ_ERROR:
368  case SCI_STP_DEV_AWAIT_RESET:
369  case SCI_SMP_DEV_IDLE:
370  case SCI_SMP_DEV_CMD:
371  sci_change_state(sm, SCI_DEV_STOPPING);
372  if (idev->started_request_count == 0)
374  rnc_destruct_done,
375  idev);
376  else {
380  }
381  return SCI_SUCCESS;
382  case SCI_DEV_STOPPING:
383  /* All requests should have been terminated, but if there is an
384  * attempt to stop a device already in the stopping state, then
385  * try again to terminate.
386  */
388  case SCI_DEV_RESETTING:
389  sci_change_state(sm, SCI_DEV_STOPPING);
390  return SCI_SUCCESS;
391  }
392 }
393 
395 {
396  struct sci_base_state_machine *sm = &idev->sm;
397  enum sci_remote_device_states state = sm->current_state_id;
398 
399  switch (state) {
400  case SCI_DEV_INITIAL:
401  case SCI_DEV_STOPPED:
402  case SCI_DEV_STARTING:
403  case SCI_SMP_DEV_IDLE:
404  case SCI_SMP_DEV_CMD:
405  case SCI_DEV_STOPPING:
406  case SCI_DEV_FAILED:
407  case SCI_DEV_RESETTING:
408  case SCI_DEV_FINAL:
409  default:
410  dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
411  __func__, dev_state_name(state));
413  case SCI_DEV_READY:
414  case SCI_STP_DEV_IDLE:
415  case SCI_STP_DEV_CMD:
416  case SCI_STP_DEV_NCQ:
417  case SCI_STP_DEV_NCQ_ERROR:
418  case SCI_STP_DEV_AWAIT_RESET:
419  sci_change_state(sm, SCI_DEV_RESETTING);
420  return SCI_SUCCESS;
421  }
422 }
423 
425 {
426  struct sci_base_state_machine *sm = &idev->sm;
427  enum sci_remote_device_states state = sm->current_state_id;
428 
429  if (state != SCI_DEV_RESETTING) {
430  dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
431  __func__, dev_state_name(state));
433  }
434 
435  sci_change_state(sm, SCI_DEV_READY);
436  return SCI_SUCCESS;
437 }
438 
441 {
442  struct sci_base_state_machine *sm = &idev->sm;
443  enum sci_remote_device_states state = sm->current_state_id;
444  struct isci_host *ihost = idev->owning_port->owning_controller;
445  enum sci_status status;
446 
447  switch (state) {
448  case SCI_DEV_INITIAL:
449  case SCI_DEV_STOPPED:
450  case SCI_DEV_STARTING:
451  case SCI_STP_DEV_IDLE:
452  case SCI_SMP_DEV_IDLE:
453  case SCI_DEV_FINAL:
454  default:
455  dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
456  __func__, dev_state_name(state));
457  /* Return the frame back to the controller */
458  sci_controller_release_frame(ihost, frame_index);
460  case SCI_DEV_READY:
461  case SCI_STP_DEV_NCQ_ERROR:
462  case SCI_STP_DEV_AWAIT_RESET:
463  case SCI_DEV_STOPPING:
464  case SCI_DEV_FAILED:
465  case SCI_DEV_RESETTING: {
466  struct isci_request *ireq;
467  struct ssp_frame_hdr hdr;
468  void *frame_header;
469  ssize_t word_cnt;
470 
472  frame_index,
473  &frame_header);
474  if (status != SCI_SUCCESS)
475  return status;
476 
477  word_cnt = sizeof(hdr) / sizeof(u32);
478  sci_swab32_cpy(&hdr, frame_header, word_cnt);
479 
480  ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
481  if (ireq && ireq->target_device == idev) {
482  /* The IO request is now in charge of releasing the frame */
483  status = sci_io_request_frame_handler(ireq, frame_index);
484  } else {
485  /* We could not map this tag to a valid IO
486  * request Just toss the frame and continue
487  */
488  sci_controller_release_frame(ihost, frame_index);
489  }
490  break;
491  }
492  case SCI_STP_DEV_NCQ: {
493  struct dev_to_host_fis *hdr;
494 
496  frame_index,
497  (void **)&hdr);
498  if (status != SCI_SUCCESS)
499  return status;
500 
501  if (hdr->fis_type == FIS_SETDEVBITS &&
502  (hdr->status & ATA_ERR)) {
504 
505  /* TODO Check sactive and complete associated IO if any. */
506  sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
507  } else if (hdr->fis_type == FIS_REGD2H &&
508  (hdr->status & ATA_ERR)) {
509  /*
510  * Some devices return D2H FIS when an NCQ error is detected.
511  * Treat this like an SDB error FIS ready reason.
512  */
514  sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
515  } else
516  status = SCI_FAILURE;
517 
518  sci_controller_release_frame(ihost, frame_index);
519  break;
520  }
521  case SCI_STP_DEV_CMD:
522  case SCI_SMP_DEV_CMD:
523  /* The device does not process any UF received from the hardware while
524  * in this state. All unsolicited frames are forwarded to the io request
525  * object.
526  */
527  status = sci_io_request_frame_handler(idev->working_request, frame_index);
528  break;
529  }
530 
531  return status;
532 }
533 
534 static bool is_remote_device_ready(struct isci_remote_device *idev)
535 {
536 
537  struct sci_base_state_machine *sm = &idev->sm;
538  enum sci_remote_device_states state = sm->current_state_id;
539 
540  switch (state) {
541  case SCI_DEV_READY:
542  case SCI_STP_DEV_IDLE:
543  case SCI_STP_DEV_CMD:
544  case SCI_STP_DEV_NCQ:
545  case SCI_STP_DEV_NCQ_ERROR:
546  case SCI_STP_DEV_AWAIT_RESET:
547  case SCI_SMP_DEV_IDLE:
548  case SCI_SMP_DEV_CMD:
549  return true;
550  default:
551  return false;
552  }
553 }
554 
555 /*
556  * called once the remote node context has transisitioned to a ready
557  * state (after suspending RX and/or TX due to early D2H fis)
558  */
559 static void atapi_remote_device_resume_done(void *_dev)
560 {
561  struct isci_remote_device *idev = _dev;
562  struct isci_request *ireq = idev->working_request;
563 
564  sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
565 }
566 
568  u32 event_code)
569 {
570  enum sci_status status;
571  struct sci_base_state_machine *sm = &idev->sm;
572  enum sci_remote_device_states state = sm->current_state_id;
573 
574  switch (scu_get_event_type(event_code)) {
578  status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
579  break;
581  if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
582  status = SCI_SUCCESS;
583 
584  /* Suspend the associated RNC */
586 
587  dev_dbg(scirdev_to_dev(idev),
588  "%s: device: %p event code: %x: %s\n",
589  __func__, idev, event_code,
590  is_remote_device_ready(idev)
591  ? "I_T_Nexus_Timeout event"
592  : "I_T_Nexus_Timeout event in wrong state");
593 
594  break;
595  }
596  /* Else, fall through and treat as unhandled... */
597  default:
598  dev_dbg(scirdev_to_dev(idev),
599  "%s: device: %p event code: %x: %s\n",
600  __func__, idev, event_code,
601  is_remote_device_ready(idev)
602  ? "unexpected event"
603  : "unexpected event in wrong state");
604  status = SCI_FAILURE_INVALID_STATE;
605  break;
606  }
607 
608  if (status != SCI_SUCCESS)
609  return status;
610 
611  /* Decode device-specific states that may require an RNC resume during
612  * normal operation. When the abort path is active, these resumes are
613  * managed when the abort path exits.
614  */
615  if (state == SCI_STP_DEV_ATAPI_ERROR) {
616  /* For ATAPI error state resume the RNC right away. */
619  return sci_remote_node_context_resume(&idev->rnc,
620  atapi_remote_device_resume_done,
621  idev);
622  }
623  }
624 
625  if (state == SCI_STP_DEV_IDLE) {
626 
627  /* We pick up suspension events to handle specifically to this
628  * state. We resume the RNC right away.
629  */
632  status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
633  }
634 
635  return status;
636 }
637 
638 static void sci_remote_device_start_request(struct isci_remote_device *idev,
639  struct isci_request *ireq,
640  enum sci_status status)
641 {
642  struct isci_port *iport = idev->owning_port;
643 
644  /* cleanup requests that failed after starting on the port */
645  if (status != SCI_SUCCESS)
646  sci_port_complete_io(iport, idev, ireq);
647  else {
648  kref_get(&idev->kref);
649  idev->started_request_count++;
650  }
651 }
652 
654  struct isci_remote_device *idev,
655  struct isci_request *ireq)
656 {
657  struct sci_base_state_machine *sm = &idev->sm;
658  enum sci_remote_device_states state = sm->current_state_id;
659  struct isci_port *iport = idev->owning_port;
660  enum sci_status status;
661 
662  switch (state) {
663  case SCI_DEV_INITIAL:
664  case SCI_DEV_STOPPED:
665  case SCI_DEV_STARTING:
666  case SCI_STP_DEV_NCQ_ERROR:
667  case SCI_DEV_STOPPING:
668  case SCI_DEV_FAILED:
669  case SCI_DEV_RESETTING:
670  case SCI_DEV_FINAL:
671  default:
672  dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
673  __func__, dev_state_name(state));
675  case SCI_DEV_READY:
676  /* attempt to start an io request for this device object. The remote
677  * device object will issue the start request for the io and if
678  * successful it will start the request for the port object then
679  * increment its own request count.
680  */
681  status = sci_port_start_io(iport, idev, ireq);
682  if (status != SCI_SUCCESS)
683  return status;
684 
685  status = sci_remote_node_context_start_io(&idev->rnc, ireq);
686  if (status != SCI_SUCCESS)
687  break;
688 
689  status = sci_request_start(ireq);
690  break;
691  case SCI_STP_DEV_IDLE: {
692  /* handle the start io operation for a sata device that is in
693  * the command idle state. - Evalute the type of IO request to
694  * be started - If its an NCQ request change to NCQ substate -
695  * If its any other command change to the CMD substate
696  *
697  * If this is a softreset we may want to have a different
698  * substate.
699  */
700  enum sci_remote_device_states new_state;
701  struct sas_task *task = isci_request_access_task(ireq);
702 
703  status = sci_port_start_io(iport, idev, ireq);
704  if (status != SCI_SUCCESS)
705  return status;
706 
707  status = sci_remote_node_context_start_io(&idev->rnc, ireq);
708  if (status != SCI_SUCCESS)
709  break;
710 
711  status = sci_request_start(ireq);
712  if (status != SCI_SUCCESS)
713  break;
714 
715  if (task->ata_task.use_ncq)
716  new_state = SCI_STP_DEV_NCQ;
717  else {
718  idev->working_request = ireq;
719  new_state = SCI_STP_DEV_CMD;
720  }
721  sci_change_state(sm, new_state);
722  break;
723  }
724  case SCI_STP_DEV_NCQ: {
725  struct sas_task *task = isci_request_access_task(ireq);
726 
727  if (task->ata_task.use_ncq) {
728  status = sci_port_start_io(iport, idev, ireq);
729  if (status != SCI_SUCCESS)
730  return status;
731 
732  status = sci_remote_node_context_start_io(&idev->rnc, ireq);
733  if (status != SCI_SUCCESS)
734  break;
735 
736  status = sci_request_start(ireq);
737  } else
739  break;
740  }
741  case SCI_STP_DEV_AWAIT_RESET:
743  case SCI_SMP_DEV_IDLE:
744  status = sci_port_start_io(iport, idev, ireq);
745  if (status != SCI_SUCCESS)
746  return status;
747 
748  status = sci_remote_node_context_start_io(&idev->rnc, ireq);
749  if (status != SCI_SUCCESS)
750  break;
751 
752  status = sci_request_start(ireq);
753  if (status != SCI_SUCCESS)
754  break;
755 
756  idev->working_request = ireq;
757  sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
758  break;
759  case SCI_STP_DEV_CMD:
760  case SCI_SMP_DEV_CMD:
761  /* device is already handling a command it can not accept new commands
762  * until this one is complete.
763  */
765  }
766 
767  sci_remote_device_start_request(idev, ireq, status);
768  return status;
769 }
770 
771 static enum sci_status common_complete_io(struct isci_port *iport,
772  struct isci_remote_device *idev,
773  struct isci_request *ireq)
774 {
775  enum sci_status status;
776 
777  status = sci_request_complete(ireq);
778  if (status != SCI_SUCCESS)
779  return status;
780 
781  status = sci_port_complete_io(iport, idev, ireq);
782  if (status != SCI_SUCCESS)
783  return status;
784 
785  sci_remote_device_decrement_request_count(idev);
786  return status;
787 }
788 
790  struct isci_remote_device *idev,
791  struct isci_request *ireq)
792 {
793  struct sci_base_state_machine *sm = &idev->sm;
794  enum sci_remote_device_states state = sm->current_state_id;
795  struct isci_port *iport = idev->owning_port;
796  enum sci_status status;
797 
798  switch (state) {
799  case SCI_DEV_INITIAL:
800  case SCI_DEV_STOPPED:
801  case SCI_DEV_STARTING:
802  case SCI_STP_DEV_IDLE:
803  case SCI_SMP_DEV_IDLE:
804  case SCI_DEV_FAILED:
805  case SCI_DEV_FINAL:
806  default:
807  dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
808  __func__, dev_state_name(state));
810  case SCI_DEV_READY:
811  case SCI_STP_DEV_AWAIT_RESET:
812  case SCI_DEV_RESETTING:
813  status = common_complete_io(iport, idev, ireq);
814  break;
815  case SCI_STP_DEV_CMD:
816  case SCI_STP_DEV_NCQ:
817  case SCI_STP_DEV_NCQ_ERROR:
818  case SCI_STP_DEV_ATAPI_ERROR:
819  status = common_complete_io(iport, idev, ireq);
820  if (status != SCI_SUCCESS)
821  break;
822 
824  /* This request causes hardware error, device needs to be Lun Reset.
825  * So here we force the state machine to IDLE state so the rest IOs
826  * can reach RNC state handler, these IOs will be completed by RNC with
827  * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
828  */
829  sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
830  } else if (idev->started_request_count == 0)
831  sci_change_state(sm, SCI_STP_DEV_IDLE);
832  break;
833  case SCI_SMP_DEV_CMD:
834  status = common_complete_io(iport, idev, ireq);
835  if (status != SCI_SUCCESS)
836  break;
837  sci_change_state(sm, SCI_SMP_DEV_IDLE);
838  break;
839  case SCI_DEV_STOPPING:
840  status = common_complete_io(iport, idev, ireq);
841  if (status != SCI_SUCCESS)
842  break;
843 
844  if (idev->started_request_count == 0)
846  rnc_destruct_done,
847  idev);
848  break;
849  }
850 
851  if (status != SCI_SUCCESS)
852  dev_err(scirdev_to_dev(idev),
853  "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
854  "could not complete\n", __func__, iport,
855  idev, ireq, status);
856  else
857  isci_put_device(idev);
858 
859  return status;
860 }
861 
862 static void sci_remote_device_continue_request(void *dev)
863 {
864  struct isci_remote_device *idev = dev;
865 
866  /* we need to check if this request is still valid to continue. */
867  if (idev->working_request)
869 }
870 
872  struct isci_remote_device *idev,
873  struct isci_request *ireq)
874 {
875  struct sci_base_state_machine *sm = &idev->sm;
876  enum sci_remote_device_states state = sm->current_state_id;
877  struct isci_port *iport = idev->owning_port;
878  enum sci_status status;
879 
880  switch (state) {
881  case SCI_DEV_INITIAL:
882  case SCI_DEV_STOPPED:
883  case SCI_DEV_STARTING:
884  case SCI_SMP_DEV_IDLE:
885  case SCI_SMP_DEV_CMD:
886  case SCI_DEV_STOPPING:
887  case SCI_DEV_FAILED:
888  case SCI_DEV_RESETTING:
889  case SCI_DEV_FINAL:
890  default:
891  dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
892  __func__, dev_state_name(state));
894  case SCI_STP_DEV_IDLE:
895  case SCI_STP_DEV_CMD:
896  case SCI_STP_DEV_NCQ:
897  case SCI_STP_DEV_NCQ_ERROR:
898  case SCI_STP_DEV_AWAIT_RESET:
899  status = sci_port_start_io(iport, idev, ireq);
900  if (status != SCI_SUCCESS)
901  return status;
902 
903  status = sci_request_start(ireq);
904  if (status != SCI_SUCCESS)
905  goto out;
906 
907  /* Note: If the remote device state is not IDLE this will
908  * replace the request that probably resulted in the task
909  * management request.
910  */
911  idev->working_request = ireq;
912  sci_change_state(sm, SCI_STP_DEV_CMD);
913 
914  /* The remote node context must cleanup the TCi to NCQ mapping
915  * table. The only way to do this correctly is to either write
916  * to the TLCR register or to invalidate and repost the RNC. In
917  * either case the remote node context state machine will take
918  * the correct action when the remote node context is suspended
919  * and later resumed.
920  */
923 
924  status = sci_remote_node_context_start_task(&idev->rnc, ireq,
925  sci_remote_device_continue_request, idev);
926 
927  out:
928  sci_remote_device_start_request(idev, ireq, status);
929  /* We need to let the controller start request handler know that
930  * it can't post TC yet. We will provide a callback function to
931  * post TC when RNC gets resumed.
932  */
934  case SCI_DEV_READY:
935  status = sci_port_start_io(iport, idev, ireq);
936  if (status != SCI_SUCCESS)
937  return status;
938 
939  /* Resume the RNC as needed: */
940  status = sci_remote_node_context_start_task(&idev->rnc, ireq,
941  NULL, NULL);
942  if (status != SCI_SUCCESS)
943  break;
944 
945  status = sci_request_start(ireq);
946  break;
947  }
948  sci_remote_device_start_request(idev, ireq, status);
949 
950  return status;
951 }
952 
954 {
955  struct isci_port *iport = idev->owning_port;
956  u32 context;
957 
958  context = request |
961  idev->rnc.remote_node_index;
962 
964 }
965 
966 /* called once the remote node context has transisitioned to a
967  * ready state. This is the indication that the remote device object can also
968  * transition to ready.
969  */
970 static void remote_device_resume_done(void *_dev)
971 {
972  struct isci_remote_device *idev = _dev;
973 
974  if (is_remote_device_ready(idev))
975  return;
976 
977  /* go 'ready' if we are not already in a ready state */
978  sci_change_state(&idev->sm, SCI_DEV_READY);
979 }
980 
981 static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
982 {
983  struct isci_remote_device *idev = _dev;
984  struct isci_host *ihost = idev->owning_port->owning_controller;
985 
986  /* For NCQ operation we do not issue a isci_remote_device_not_ready().
987  * As a result, avoid sending the ready notification.
988  */
989  if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
990  isci_remote_device_ready(ihost, idev);
991 }
992 
993 static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
994 {
995  struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
996 
997  /* Initial state is a transitional state to the stopped state */
998  sci_change_state(&idev->sm, SCI_DEV_STOPPED);
999 }
1000 
1014 static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
1015 {
1016  struct sci_base_state_machine *sm = &idev->sm;
1017  enum sci_remote_device_states state = sm->current_state_id;
1018  struct isci_host *ihost;
1019 
1020  if (state != SCI_DEV_STOPPED) {
1021  dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
1022  __func__, dev_state_name(state));
1024  }
1025 
1026  ihost = idev->owning_port->owning_controller;
1028  idev->rnc.remote_node_index);
1029  idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
1030  sci_change_state(sm, SCI_DEV_FINAL);
1031 
1032  return SCI_SUCCESS;
1033 }
1034 
1041 static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
1042 {
1043  dev_dbg(&ihost->pdev->dev,
1044  "%s: isci_device = %p\n", __func__, idev);
1045 
1046  /* There should not be any outstanding io's. All paths to
1047  * here should go through isci_remote_device_nuke_requests.
1048  * If we hit this condition, we will need a way to complete
1049  * io requests in process */
1050  BUG_ON(idev->started_request_count > 0);
1051 
1052  sci_remote_device_destruct(idev);
1053  list_del_init(&idev->node);
1054  isci_put_device(idev);
1055 }
1056 
1057 static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
1058 {
1059  struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1060  struct isci_host *ihost = idev->owning_port->owning_controller;
1061  u32 prev_state;
1062 
1063  /* If we are entering from the stopping state let the SCI User know that
1064  * the stop operation has completed.
1065  */
1066  prev_state = idev->sm.previous_state_id;
1067  if (prev_state == SCI_DEV_STOPPING)
1068  isci_remote_device_deconstruct(ihost, idev);
1069 
1071 }
1072 
1073 static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
1074 {
1075  struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1076  struct isci_host *ihost = idev->owning_port->owning_controller;
1077 
1078  isci_remote_device_not_ready(ihost, idev,
1080 }
1081 
1082 static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
1083 {
1084  struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1085  struct isci_host *ihost = idev->owning_port->owning_controller;
1086  struct domain_device *dev = idev->domain_dev;
1087 
1088  if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
1089  sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
1090  } else if (dev_is_expander(dev)) {
1091  sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
1092  } else
1093  isci_remote_device_ready(ihost, idev);
1094 }
1095 
1096 static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
1097 {
1098  struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1099  struct domain_device *dev = idev->domain_dev;
1100 
1101  if (dev->dev_type == SAS_END_DEV) {
1102  struct isci_host *ihost = idev->owning_port->owning_controller;
1103 
1104  isci_remote_device_not_ready(ihost, idev,
1106  }
1107 }
1108 
1109 static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
1110 {
1111  struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1112  struct isci_host *ihost = idev->owning_port->owning_controller;
1113 
1114  dev_dbg(&ihost->pdev->dev,
1115  "%s: isci_device = %p\n", __func__, idev);
1116 
1118 }
1119 
1120 static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
1121 {
1122  struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1123  struct isci_host *ihost = idev->owning_port->owning_controller;
1124 
1125  dev_dbg(&ihost->pdev->dev,
1126  "%s: isci_device = %p\n", __func__, idev);
1127 
1129 }
1130 
1131 static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
1132 {
1133  struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1134 
1135  idev->working_request = NULL;
1136  if (sci_remote_node_context_is_ready(&idev->rnc)) {
1137  /*
1138  * Since the RNC is ready, it's alright to finish completion
1139  * processing (e.g. signal the remote device is ready). */
1140  sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
1141  } else {
1143  sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
1144  idev);
1145  }
1146 }
1147 
1148 static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
1149 {
1150  struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1151  struct isci_host *ihost = idev->owning_port->owning_controller;
1152 
1153  BUG_ON(idev->working_request == NULL);
1154 
1155  isci_remote_device_not_ready(ihost, idev,
1157 }
1158 
1159 static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
1160 {
1161  struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1162  struct isci_host *ihost = idev->owning_port->owning_controller;
1163 
1165  isci_remote_device_not_ready(ihost, idev,
1166  idev->not_ready_reason);
1167 }
1168 
1169 static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
1170 {
1171  struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1172  struct isci_host *ihost = idev->owning_port->owning_controller;
1173 
1174  isci_remote_device_ready(ihost, idev);
1175 }
1176 
1177 static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
1178 {
1179  struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1180  struct isci_host *ihost = idev->owning_port->owning_controller;
1181 
1182  BUG_ON(idev->working_request == NULL);
1183 
1184  isci_remote_device_not_ready(ihost, idev,
1186 }
1187 
1188 static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
1189 {
1190  struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1191 
1192  idev->working_request = NULL;
1193 }
1194 
1195 static const struct sci_base_state sci_remote_device_state_table[] = {
1196  [SCI_DEV_INITIAL] = {
1197  .enter_state = sci_remote_device_initial_state_enter,
1198  },
1199  [SCI_DEV_STOPPED] = {
1200  .enter_state = sci_remote_device_stopped_state_enter,
1201  },
1202  [SCI_DEV_STARTING] = {
1203  .enter_state = sci_remote_device_starting_state_enter,
1204  },
1205  [SCI_DEV_READY] = {
1206  .enter_state = sci_remote_device_ready_state_enter,
1207  .exit_state = sci_remote_device_ready_state_exit
1208  },
1209  [SCI_STP_DEV_IDLE] = {
1210  .enter_state = sci_stp_remote_device_ready_idle_substate_enter,
1211  },
1212  [SCI_STP_DEV_CMD] = {
1213  .enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
1214  },
1215  [SCI_STP_DEV_NCQ] = { },
1216  [SCI_STP_DEV_NCQ_ERROR] = {
1217  .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
1218  },
1219  [SCI_STP_DEV_ATAPI_ERROR] = { },
1220  [SCI_STP_DEV_AWAIT_RESET] = { },
1221  [SCI_SMP_DEV_IDLE] = {
1222  .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
1223  },
1224  [SCI_SMP_DEV_CMD] = {
1225  .enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
1226  .exit_state = sci_smp_remote_device_ready_cmd_substate_exit,
1227  },
1228  [SCI_DEV_STOPPING] = { },
1229  [SCI_DEV_FAILED] = { },
1230  [SCI_DEV_RESETTING] = {
1231  .enter_state = sci_remote_device_resetting_state_enter,
1232  .exit_state = sci_remote_device_resetting_state_exit
1233  },
1234  [SCI_DEV_FINAL] = { },
1235 };
1236 
1247 static void sci_remote_device_construct(struct isci_port *iport,
1248  struct isci_remote_device *idev)
1249 {
1250  idev->owning_port = iport;
1251  idev->started_request_count = 0;
1252 
1253  sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
1254 
1257 }
1258 
1273 static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1274  struct isci_remote_device *idev)
1275 {
1276  enum sci_status status;
1277  struct sci_port_properties properties;
1278 
1279  sci_remote_device_construct(iport, idev);
1280 
1281  sci_port_get_properties(iport, &properties);
1282  /* Get accurate port width from port's phy mask for a DA device. */
1283  idev->device_port_width = hweight32(properties.phy_mask);
1284 
1286  idev,
1287  &idev->rnc.remote_node_index);
1288 
1289  if (status != SCI_SUCCESS)
1290  return status;
1291 
1293 
1294  return SCI_SUCCESS;
1295 }
1296 
1309 static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
1310  struct isci_remote_device *idev)
1311 {
1312  struct domain_device *dev = idev->domain_dev;
1313  enum sci_status status;
1314 
1315  sci_remote_device_construct(iport, idev);
1316 
1318  idev,
1319  &idev->rnc.remote_node_index);
1320  if (status != SCI_SUCCESS)
1321  return status;
1322 
1323  /* For SAS-2 the physical link rate is actually a logical link
1324  * rate that incorporates multiplexing. The SCU doesn't
1325  * incorporate multiplexing and for the purposes of the
1326  * connection the logical link rate is that same as the
1327  * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
1328  * one another, so this code works for both situations.
1329  */
1331  dev->linkrate);
1332 
1333  /* / @todo Should I assign the port width by reading all of the phys on the port? */
1334  idev->device_port_width = 1;
1335 
1336  return SCI_SUCCESS;
1337 }
1338 
1340  struct isci_remote_device *idev,
1342  void *cb_p)
1343 {
1344  enum sci_status status;
1345 
1346  status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p);
1347  if (status != SCI_SUCCESS)
1348  dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n",
1349  __func__, status);
1350  return status;
1351 }
1352 
1353 static void isci_remote_device_resume_from_abort_complete(void *cbparam)
1354 {
1355  struct isci_remote_device *idev = cbparam;
1356  struct isci_host *ihost = idev->owning_port->owning_controller;
1357  scics_sds_remote_node_context_callback abort_resume_cb =
1358  idev->abort_resume_cb;
1359 
1360  dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n",
1361  __func__, abort_resume_cb);
1362 
1363  if (abort_resume_cb != NULL) {
1364  idev->abort_resume_cb = NULL;
1365  abort_resume_cb(idev->abort_resume_cbparam);
1366  }
1368  wake_up(&ihost->eventq);
1369 }
1370 
1371 static bool isci_remote_device_test_resume_done(
1372  struct isci_host *ihost,
1373  struct isci_remote_device *idev)
1374 {
1375  unsigned long flags;
1376  bool done;
1377 
1378  spin_lock_irqsave(&ihost->scic_lock, flags);
1380  || test_bit(IDEV_STOP_PENDING, &idev->flags)
1381  || sci_remote_node_context_is_being_destroyed(&idev->rnc);
1382  spin_unlock_irqrestore(&ihost->scic_lock, flags);
1383 
1384  return done;
1385 }
1386 
1388  struct isci_host *ihost,
1389  struct isci_remote_device *idev)
1390 {
1391  dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n",
1392  __func__, idev);
1393 
1394  #define MAX_RESUME_MSECS 10000
1395  if (!wait_event_timeout(ihost->eventq,
1396  isci_remote_device_test_resume_done(ihost, idev),
1398 
1399  dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for "
1400  "resume: %p\n", __func__, idev);
1401  }
1403 
1404  dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n",
1405  __func__, idev);
1406 }
1407 
1409  struct isci_host *ihost,
1410  struct isci_remote_device *idev)
1411 {
1412  unsigned long flags;
1413  enum sci_status status = SCI_SUCCESS;
1414  int destroyed;
1415 
1416  spin_lock_irqsave(&ihost->scic_lock, flags);
1417  /* Preserve any current resume callbacks, for instance from other
1418  * resumptions.
1419  */
1420  idev->abort_resume_cb = idev->rnc.user_callback;
1421  idev->abort_resume_cbparam = idev->rnc.user_cookie;
1424  destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc);
1425  if (!destroyed)
1426  status = sci_remote_device_resume(
1427  idev, isci_remote_device_resume_from_abort_complete,
1428  idev);
1429  spin_unlock_irqrestore(&ihost->scic_lock, flags);
1430  if (!destroyed && (status == SCI_SUCCESS))
1432  else
1434 
1435  return status;
1436 }
1437 
1451 static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
1452  u32 timeout)
1453 {
1454  struct sci_base_state_machine *sm = &idev->sm;
1455  enum sci_remote_device_states state = sm->current_state_id;
1456  enum sci_status status;
1457 
1458  if (state != SCI_DEV_STOPPED) {
1459  dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
1460  __func__, dev_state_name(state));
1462  }
1463 
1464  status = sci_remote_device_resume(idev, remote_device_resume_done,
1465  idev);
1466  if (status != SCI_SUCCESS)
1467  return status;
1468 
1469  sci_change_state(sm, SCI_DEV_STARTING);
1470 
1471  return SCI_SUCCESS;
1472 }
1473 
1474 static enum sci_status isci_remote_device_construct(struct isci_port *iport,
1475  struct isci_remote_device *idev)
1476 {
1477  struct isci_host *ihost = iport->isci_host;
1478  struct domain_device *dev = idev->domain_dev;
1479  enum sci_status status;
1480 
1481  if (dev->parent && dev_is_expander(dev->parent))
1482  status = sci_remote_device_ea_construct(iport, idev);
1483  else
1484  status = sci_remote_device_da_construct(iport, idev);
1485 
1486  if (status != SCI_SUCCESS) {
1487  dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
1488  __func__, status);
1489 
1490  return status;
1491  }
1492 
1493  /* start the device. */
1494  status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
1495 
1496  if (status != SCI_SUCCESS)
1497  dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
1498  status);
1499 
1500  return status;
1501 }
1502 
1511 static struct isci_remote_device *
1512 isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
1513 {
1514  struct isci_remote_device *idev;
1515  int i;
1516 
1517  for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
1518  idev = &ihost->devices[i];
1519  if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
1520  break;
1521  }
1522 
1523  if (i >= SCI_MAX_REMOTE_DEVICES) {
1524  dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
1525  return NULL;
1526  }
1527  if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
1528  return NULL;
1529 
1530  return idev;
1531 }
1532 
1534 {
1535  struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
1536  struct isci_host *ihost = idev->isci_port->isci_host;
1537 
1538  idev->domain_dev = NULL;
1539  idev->isci_port = NULL;
1542  clear_bit(IDEV_IO_READY, &idev->flags);
1543  clear_bit(IDEV_GONE, &idev->flags);
1545  clear_bit(IDEV_ALLOCATED, &idev->flags);
1546  wake_up(&ihost->eventq);
1547 }
1548 
1558 {
1559  enum sci_status status;
1560  unsigned long flags;
1561 
1562  dev_dbg(&ihost->pdev->dev,
1563  "%s: isci_device = %p\n", __func__, idev);
1564 
1565  spin_lock_irqsave(&ihost->scic_lock, flags);
1566  idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
1567  set_bit(IDEV_GONE, &idev->flags);
1568 
1569  set_bit(IDEV_STOP_PENDING, &idev->flags);
1570  status = sci_remote_device_stop(idev, 50);
1571  spin_unlock_irqrestore(&ihost->scic_lock, flags);
1572 
1573  /* Wait for the stop complete callback. */
1574  if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
1575  /* nothing to wait for */;
1576  else
1577  wait_for_device_stop(ihost, idev);
1578 
1579  dev_dbg(&ihost->pdev->dev,
1580  "%s: isci_device = %p, waiting done.\n", __func__, idev);
1581 
1582  return status;
1583 }
1584 
1592 {
1593  struct isci_host *ihost = dev_to_ihost(dev);
1594  struct isci_remote_device *idev = dev->lldd_dev;
1595 
1596  dev_dbg(&ihost->pdev->dev,
1597  "%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
1598  __func__, dev, idev, idev->isci_port);
1599 
1600  isci_remote_device_stop(ihost, idev);
1601 }
1602 
1603 
1614 {
1615  struct isci_host *isci_host = dev_to_ihost(dev);
1616  struct isci_port *isci_port = dev->port->lldd_port;
1617  struct isci_remote_device *isci_device;
1618  enum sci_status status;
1619 
1620  dev_dbg(&isci_host->pdev->dev,
1621  "%s: domain_device = %p\n", __func__, dev);
1622 
1623  if (!isci_port)
1624  return -ENODEV;
1625 
1626  isci_device = isci_remote_device_alloc(isci_host, isci_port);
1627  if (!isci_device)
1628  return -ENODEV;
1629 
1630  kref_init(&isci_device->kref);
1631  INIT_LIST_HEAD(&isci_device->node);
1632 
1633  spin_lock_irq(&isci_host->scic_lock);
1634  isci_device->domain_dev = dev;
1635  isci_device->isci_port = isci_port;
1636  list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
1637 
1638  set_bit(IDEV_START_PENDING, &isci_device->flags);
1639  status = isci_remote_device_construct(isci_port, isci_device);
1640 
1641  dev_dbg(&isci_host->pdev->dev,
1642  "%s: isci_device = %p\n",
1643  __func__, isci_device);
1644 
1645  if (status == SCI_SUCCESS) {
1646  /* device came up, advertise it to the world */
1647  dev->lldd_dev = isci_device;
1648  } else
1649  isci_put_device(isci_device);
1650  spin_unlock_irq(&isci_host->scic_lock);
1651 
1652  /* wait for the device ready callback. */
1653  wait_for_device_start(isci_host, isci_device);
1654 
1655  return status == SCI_SUCCESS ? 0 : -ENODEV;
1656 }
1657 
1659  struct isci_host *ihost,
1660  struct isci_remote_device *idev,
1661  struct isci_request *ireq)
1662 {
1663  unsigned long flags;
1664  enum sci_status status;
1665 
1666  /* Put the device into suspension. */
1667  spin_lock_irqsave(&ihost->scic_lock, flags);
1670  spin_unlock_irqrestore(&ihost->scic_lock, flags);
1671 
1672  /* Terminate and wait for the completions. */
1673  status = isci_remote_device_terminate_requests(ihost, idev, ireq);
1674  if (status != SCI_SUCCESS)
1675  dev_dbg(&ihost->pdev->dev,
1676  "%s: isci_remote_device_terminate_requests(%p) "
1677  "returned %d!\n",
1678  __func__, idev, status);
1679 
1680  /* NOTE: RNC resumption is left to the caller! */
1681  return status;
1682 }
1683 
1685  struct isci_remote_device *idev)
1686 {
1688 }
1689 
1691  struct isci_remote_device *idev)
1692 {
1693  return sci_remote_device_terminate_reqs_checkabort(idev, 1);
1694 }
1695 
1697  struct isci_host *ihost,
1698  struct isci_remote_device *idev)
1699 {
1700  unsigned long flags;
1701  enum sci_status status;
1702 
1703  spin_lock_irqsave(&ihost->scic_lock, flags);
1704  status = sci_remote_device_reset_complete(idev);
1705  spin_unlock_irqrestore(&ihost->scic_lock, flags);
1706 
1707  return status;
1708 }
1709 
1711  struct isci_remote_device *idev,
1712  u32 timeout)
1713 {
1714  if (dev_is_sata(idev->domain_dev)) {
1715  if (timeout) {
1717  &idev->flags))
1718  return; /* Already enabled. */
1720  &idev->flags))
1721  return; /* Not enabled. */
1722 
1724  timeout);
1725  }
1726 }