Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
host.c
Go to the documentation of this file.
1 /*
2  * This file is provided under a dual BSD/GPLv2 license. When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  * * Redistributions of source code must retain the above copyright
34  * notice, this list of conditions and the following disclaimer.
35  * * Redistributions in binary form must reproduce the above copyright
36  * notice, this list of conditions and the following disclaimer in
37  * the documentation and/or other materials provided with the
38  * distribution.
39  * * Neither the name of Intel Corporation nor the names of its
40  * contributors may be used to endorse or promote products derived
41  * from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55 #include <linux/circ_buf.h>
56 #include <linux/device.h>
57 #include <scsi/sas.h>
58 #include "host.h"
59 #include "isci.h"
60 #include "port.h"
61 #include "probe_roms.h"
62 #include "remote_device.h"
63 #include "request.h"
64 #include "scu_completion_codes.h"
65 #include "scu_event_codes.h"
66 #include "registers.h"
68 #include "scu_task_context.h"
69 
70 #define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
71 
72 #define smu_max_ports(dcc_value) \
73  (\
74  (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
75  >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
76  )
77 
78 #define smu_max_task_contexts(dcc_value) \
79  (\
80  (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
81  >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
82  )
83 
84 #define smu_max_rncs(dcc_value) \
85  (\
86  (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
87  >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
88  )
89 
90 #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
91 
99 #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
100 
107 #define NORMALIZE_PUT_POINTER(x) \
108  ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
109 
110 
117 #define NORMALIZE_EVENT_POINTER(x) \
118  (\
119  ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
120  >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
121  )
122 
129 #define NORMALIZE_GET_POINTER(x) \
130  ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
131 
138 #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
139  ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
140 
146 #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
147 
148 /* Init the state machine and call the state entry function (if any) */
150  const struct sci_base_state *state_table, u32 initial_state)
151 {
153 
154  sm->initial_state_id = initial_state;
155  sm->previous_state_id = initial_state;
156  sm->current_state_id = initial_state;
157  sm->state_table = state_table;
158 
159  handler = sm->state_table[initial_state].enter_state;
160  if (handler)
161  handler(sm);
162 }
163 
164 /* Call the state exit fn, update the current state, call the state entry fn */
166 {
168 
169  handler = sm->state_table[sm->current_state_id].exit_state;
170  if (handler)
171  handler(sm);
172 
175 
176  handler = sm->state_table[sm->current_state_id].enter_state;
177  if (handler)
178  handler(sm);
179 }
180 
181 static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
182 {
183  u32 get_value = ihost->completion_queue_get;
184  u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
185 
186  if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
187  COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
188  return true;
189 
190  return false;
191 }
192 
193 static bool sci_controller_isr(struct isci_host *ihost)
194 {
195  if (sci_controller_completion_queue_has_entries(ihost))
196  return true;
197 
198  /* we have a spurious interrupt it could be that we have already
199  * emptied the completion queue from a previous interrupt
200  * FIXME: really!?
201  */
202  writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
203 
204  /* There is a race in the hardware that could cause us not to be
205  * notified of an interrupt completion if we do not take this
206  * step. We will mask then unmask the interrupts so if there is
207  * another interrupt pending the clearing of the interrupt
208  * source we get the next interrupt message.
209  */
210  spin_lock(&ihost->scic_lock);
211  if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) {
212  writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
213  writel(0, &ihost->smu_registers->interrupt_mask);
214  }
215  spin_unlock(&ihost->scic_lock);
216 
217  return false;
218 }
219 
221 {
222  struct isci_host *ihost = data;
223 
224  if (sci_controller_isr(ihost))
225  tasklet_schedule(&ihost->completion_tasklet);
226 
227  return IRQ_HANDLED;
228 }
229 
230 static bool sci_controller_error_isr(struct isci_host *ihost)
231 {
233 
234  interrupt_status =
235  readl(&ihost->smu_registers->interrupt_status);
236  interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
237 
238  if (interrupt_status != 0) {
239  /*
240  * There is an error interrupt pending so let it through and handle
241  * in the callback */
242  return true;
243  }
244 
245  /*
246  * There is a race in the hardware that could cause us not to be notified
247  * of an interrupt completion if we do not take this step. We will mask
248  * then unmask the error interrupts so if there was another interrupt
249  * pending we will be notified.
250  * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
251  writel(0xff, &ihost->smu_registers->interrupt_mask);
252  writel(0, &ihost->smu_registers->interrupt_mask);
253 
254  return false;
255 }
256 
257 static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
258 {
260  struct isci_request *ireq = ihost->reqs[index];
261 
262  /* Make sure that we really want to process this IO request */
263  if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
265  ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
266  /* Yep this is a valid io request pass it along to the
267  * io request handler
268  */
269  sci_io_request_tc_completion(ireq, ent);
270 }
271 
272 static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
273 {
274  u32 index;
275  struct isci_request *ireq;
276  struct isci_remote_device *idev;
277 
278  index = SCU_GET_COMPLETION_INDEX(ent);
279 
280  switch (scu_get_command_request_type(ent)) {
283  ireq = ihost->reqs[index];
284  dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
285  __func__, ent, ireq);
286  /* @todo For a post TC operation we need to fail the IO
287  * request
288  */
289  break;
293  idev = ihost->device_table[index];
294  dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
295  __func__, ent, idev);
296  /* @todo For a port RNC operation we need to fail the
297  * device
298  */
299  break;
300  default:
301  dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
302  __func__, ent);
303  break;
304  }
305 }
306 
307 static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
308 {
309  u32 index;
311 
312  struct scu_unsolicited_frame_header *frame_header;
313  struct isci_phy *iphy;
314  struct isci_remote_device *idev;
315 
317 
318  frame_index = SCU_GET_FRAME_INDEX(ent);
319 
320  frame_header = ihost->uf_control.buffers.array[frame_index].header;
321  ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
322 
323  if (SCU_GET_FRAME_ERROR(ent)) {
324  /*
325  * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
326  * / this cause a problem? We expect the phy initialization will
327  * / fail if there is an error in the frame. */
328  sci_controller_release_frame(ihost, frame_index);
329  return;
330  }
331 
332  if (frame_header->is_address_frame) {
333  index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
334  iphy = &ihost->phys[index];
335  result = sci_phy_frame_handler(iphy, frame_index);
336  } else {
337 
338  index = SCU_GET_COMPLETION_INDEX(ent);
339 
341  /*
342  * This is a signature fis or a frame from a direct attached SATA
343  * device that has not yet been created. In either case forwared
344  * the frame to the PE and let it take care of the frame data. */
345  index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
346  iphy = &ihost->phys[index];
347  result = sci_phy_frame_handler(iphy, frame_index);
348  } else {
349  if (index < ihost->remote_node_entries)
350  idev = ihost->device_table[index];
351  else
352  idev = NULL;
353 
354  if (idev != NULL)
355  result = sci_remote_device_frame_handler(idev, frame_index);
356  else
357  sci_controller_release_frame(ihost, frame_index);
358  }
359  }
360 
361  if (result != SCI_SUCCESS) {
362  /*
363  * / @todo Is there any reason to report some additional error message
364  * / when we get this failure notifiction? */
365  }
366 }
367 
368 static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
369 {
370  struct isci_remote_device *idev;
371  struct isci_request *ireq;
372  struct isci_phy *iphy;
373  u32 index;
374 
375  index = SCU_GET_COMPLETION_INDEX(ent);
376 
377  switch (scu_get_event_type(ent)) {
379  /* / @todo The driver did something wrong and we need to fix the condtion. */
380  dev_err(&ihost->pdev->dev,
381  "%s: SCIC Controller 0x%p received SMU command error "
382  "0x%x\n",
383  __func__,
384  ihost,
385  ent);
386  break;
387 
391  /*
392  * / @todo This is a hardware failure and its likely that we want to
393  * / reset the controller. */
394  dev_err(&ihost->pdev->dev,
395  "%s: SCIC Controller 0x%p received fatal controller "
396  "event 0x%x\n",
397  __func__,
398  ihost,
399  ent);
400  break;
401 
403  ireq = ihost->reqs[index];
404  sci_io_request_event_handler(ireq, ent);
405  break;
406 
408  switch (scu_get_event_specifier(ent)) {
411  ireq = ihost->reqs[index];
412  if (ireq != NULL)
413  sci_io_request_event_handler(ireq, ent);
414  else
415  dev_warn(&ihost->pdev->dev,
416  "%s: SCIC Controller 0x%p received "
417  "event 0x%x for io request object "
418  "that doesnt exist.\n",
419  __func__,
420  ihost,
421  ent);
422 
423  break;
424 
426  idev = ihost->device_table[index];
427  if (idev != NULL)
429  else
430  dev_warn(&ihost->pdev->dev,
431  "%s: SCIC Controller 0x%p received "
432  "event 0x%x for remote device object "
433  "that doesnt exist.\n",
434  __func__,
435  ihost,
436  ent);
437 
438  break;
439  }
440  break;
441 
443  /*
444  * direct the broadcast change event to the phy first and then let
445  * the phy redirect the broadcast change to the port object */
447  /*
448  * direct error counter event to the phy object since that is where
449  * we get the event notification. This is a type 4 event. */
451  index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
452  iphy = &ihost->phys[index];
453  sci_phy_event_handler(iphy, ent);
454  break;
455 
459  if (index < ihost->remote_node_entries) {
460  idev = ihost->device_table[index];
461 
462  if (idev != NULL)
464  } else
465  dev_err(&ihost->pdev->dev,
466  "%s: SCIC Controller 0x%p received event 0x%x "
467  "for remote device object 0x%0x that doesnt "
468  "exist.\n",
469  __func__,
470  ihost,
471  ent,
472  index);
473 
474  break;
475 
476  default:
477  dev_warn(&ihost->pdev->dev,
478  "%s: SCIC Controller received unknown event code %x\n",
479  __func__,
480  ent);
481  break;
482  }
483 }
484 
485 static void sci_controller_process_completions(struct isci_host *ihost)
486 {
487  u32 completion_count = 0;
488  u32 ent;
489  u32 get_index;
490  u32 get_cycle;
491  u32 event_get;
492  u32 event_cycle;
493 
494  dev_dbg(&ihost->pdev->dev,
495  "%s: completion queue beginning get:0x%08x\n",
496  __func__,
497  ihost->completion_queue_get);
498 
499  /* Get the component parts of the completion queue */
500  get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
501  get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
502 
503  event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
504  event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
505 
506  while (
508  == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
509  ) {
510  completion_count++;
511 
512  ent = ihost->completion_queue[get_index];
513 
514  /* increment the get pointer and check for rollover to toggle the cycle bit */
515  get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
517  get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
518 
519  dev_dbg(&ihost->pdev->dev,
520  "%s: completion queue entry:0x%08x\n",
521  __func__,
522  ent);
523 
524  switch (SCU_GET_COMPLETION_TYPE(ent)) {
526  sci_controller_task_completion(ihost, ent);
527  break;
528 
530  sci_controller_sdma_completion(ihost, ent);
531  break;
532 
534  sci_controller_unsolicited_frame(ihost, ent);
535  break;
536 
538  sci_controller_event_completion(ihost, ent);
539  break;
540 
542  event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
544  event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
545 
546  sci_controller_event_completion(ihost, ent);
547  break;
548  }
549  default:
550  dev_warn(&ihost->pdev->dev,
551  "%s: SCIC Controller received unknown "
552  "completion type %x\n",
553  __func__,
554  ent);
555  break;
556  }
557  }
558 
559  /* Update the get register if we completed one or more entries */
560  if (completion_count > 0) {
561  ihost->completion_queue_get =
563  SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
564  event_cycle |
565  SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
566  get_cycle |
567  SMU_CQGR_GEN_VAL(POINTER, get_index);
568 
570  &ihost->smu_registers->completion_queue_get);
571 
572  }
573 
574  dev_dbg(&ihost->pdev->dev,
575  "%s: completion queue ending get:0x%08x\n",
576  __func__,
577  ihost->completion_queue_get);
578 
579 }
580 
581 static void sci_controller_error_handler(struct isci_host *ihost)
582 {
584 
585  interrupt_status =
586  readl(&ihost->smu_registers->interrupt_status);
587 
588  if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
589  sci_controller_completion_queue_has_entries(ihost)) {
590 
591  sci_controller_process_completions(ihost);
592  writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
593  } else {
594  dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
595  interrupt_status);
596 
597  sci_change_state(&ihost->sm, SCIC_FAILED);
598 
599  return;
600  }
601 
602  /* If we dont process any completions I am not sure that we want to do this.
603  * We are in the middle of a hardware fault and should probably be reset.
604  */
605  writel(0, &ihost->smu_registers->interrupt_mask);
606 }
607 
609 {
611  struct isci_host *ihost = data;
612 
613  if (sci_controller_isr(ihost)) {
614  writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
615  tasklet_schedule(&ihost->completion_tasklet);
616  ret = IRQ_HANDLED;
617  } else if (sci_controller_error_isr(ihost)) {
618  spin_lock(&ihost->scic_lock);
619  sci_controller_error_handler(ihost);
620  spin_unlock(&ihost->scic_lock);
621  ret = IRQ_HANDLED;
622  }
623 
624  return ret;
625 }
626 
628 {
629  struct isci_host *ihost = data;
630 
631  if (sci_controller_error_isr(ihost))
632  sci_controller_error_handler(ihost);
633 
634  return IRQ_HANDLED;
635 }
636 
645 static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
646 {
647  if (completion_status != SCI_SUCCESS)
648  dev_info(&ihost->pdev->dev,
649  "controller start timed out, continuing...\n");
651  wake_up(&ihost->eventq);
652 }
653 
654 int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
655 {
656  struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
657  struct isci_host *ihost = ha->lldd_ha;
658 
659  if (test_bit(IHOST_START_PENDING, &ihost->flags))
660  return 0;
661 
662  sas_drain_work(ha);
663 
664  return 1;
665 }
666 
679 static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
680 {
681  /* Validate the user supplied parameters. */
682  if (!ihost)
683  return 0;
684 
685  /*
686  * The suggested minimum timeout value for a controller start operation:
687  *
688  * Signature FIS Timeout
689  * + Phy Start Timeout
690  * + Number of Phy Spin Up Intervals
691  * ---------------------------------
692  * Number of milliseconds for the controller start operation.
693  *
694  * NOTE: The number of phy spin up intervals will be equivalent
695  * to the number of phys divided by the number phys allowed
696  * per interval - 1 (once OEM parameters are supported).
697  * Currently we assume only 1 phy per interval. */
698 
702 }
703 
704 static void sci_controller_enable_interrupts(struct isci_host *ihost)
705 {
706  set_bit(IHOST_IRQ_ENABLED, &ihost->flags);
707  writel(0, &ihost->smu_registers->interrupt_mask);
708 }
709 
711 {
713  writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
714  readl(&ihost->smu_registers->interrupt_mask); /* flush */
715 }
716 
717 static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
718 {
719  u32 port_task_scheduler_value;
720 
721  port_task_scheduler_value =
722  readl(&ihost->scu_registers->peg0.ptsg.control);
723  port_task_scheduler_value |=
724  (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
725  SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
726  writel(port_task_scheduler_value,
727  &ihost->scu_registers->peg0.ptsg.control);
728 }
729 
730 static void sci_controller_assign_task_entries(struct isci_host *ihost)
731 {
732  u32 task_assignment;
733 
734  /*
735  * Assign all the TCs to function 0
736  * TODO: Do we actually need to read this register to write it back?
737  */
738 
739  task_assignment =
740  readl(&ihost->smu_registers->task_context_assignment[0]);
741 
742  task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
743  (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) |
744  (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
745 
746  writel(task_assignment,
747  &ihost->smu_registers->task_context_assignment[0]);
748 
749 }
750 
751 static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
752 {
753  u32 index;
754  u32 completion_queue_control_value;
755  u32 completion_queue_get_value;
756  u32 completion_queue_put_value;
757 
758  ihost->completion_queue_get = 0;
759 
760  completion_queue_control_value =
763 
764  writel(completion_queue_control_value,
765  &ihost->smu_registers->completion_queue_control);
766 
767 
768  /* Set the completion queue get pointer and enable the queue */
769  completion_queue_get_value = (
771  | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
773  | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
774  );
775 
776  writel(completion_queue_get_value,
777  &ihost->smu_registers->completion_queue_get);
778 
779  /* Set the completion queue put pointer */
780  completion_queue_put_value = (
782  | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
783  );
784 
785  writel(completion_queue_put_value,
786  &ihost->smu_registers->completion_queue_put);
787 
788  /* Initialize the cycle bit of the completion queue entries */
789  for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
790  /*
791  * If get.cycle_bit != completion_queue.cycle_bit
792  * its not a valid completion queue entry
793  * so at system start all entries are invalid */
794  ihost->completion_queue[index] = 0x80000000;
795  }
796 }
797 
798 static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
799 {
800  u32 frame_queue_control_value;
801  u32 frame_queue_get_value;
802  u32 frame_queue_put_value;
803 
804  /* Write the queue size */
805  frame_queue_control_value =
807 
808  writel(frame_queue_control_value,
809  &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
810 
811  /* Setup the get pointer for the unsolicited frame queue */
812  frame_queue_get_value = (
814  | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
815  );
816 
817  writel(frame_queue_get_value,
818  &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
819  /* Setup the put pointer for the unsolicited frame queue */
820  frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
821  writel(frame_queue_put_value,
822  &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
823 }
824 
826 {
827  if (ihost->sm.current_state_id == SCIC_STARTING) {
828  /*
829  * We move into the ready state, because some of the phys/ports
830  * may be up and operational.
831  */
832  sci_change_state(&ihost->sm, SCIC_READY);
833 
834  isci_host_start_complete(ihost, status);
835  }
836 }
837 
838 static bool is_phy_starting(struct isci_phy *iphy)
839 {
840  enum sci_phy_states state;
841 
842  state = iphy->sm.current_state_id;
843  switch (state) {
844  case SCI_PHY_STARTING:
845  case SCI_PHY_SUB_INITIAL:
846  case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
847  case SCI_PHY_SUB_AWAIT_IAF_UF:
848  case SCI_PHY_SUB_AWAIT_SAS_POWER:
849  case SCI_PHY_SUB_AWAIT_SATA_POWER:
850  case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
851  case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
852  case SCI_PHY_SUB_AWAIT_OSSP_EN:
853  case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
854  case SCI_PHY_SUB_FINAL:
855  return true;
856  default:
857  return false;
858  }
859 }
860 
862 {
863  int i;
864 
865  for (i = 0; i < SCI_MAX_PHYS; i++) {
866  struct isci_phy *iphy = &ihost->phys[i];
867  u32 state = iphy->sm.current_state_id;
868 
869  /* in apc mode we need to check every phy, in
870  * mpc mode we only need to check phys that have
871  * been configured into a port
872  */
873  if (is_port_config_apc(ihost))
874  /* pass */;
875  else if (!phy_get_non_dummy_port(iphy))
876  continue;
877 
878  /* The controller start operation is complete iff:
879  * - all links have been given an opportunity to start
880  * - have no indication of a connected device
881  * - have an indication of a connected device and it has
882  * finished the link training process.
883  */
884  if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
885  (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
886  (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
887  (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask))
888  return false;
889  }
890 
891  return true;
892 }
893 
902 static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
903 {
904  struct sci_oem_params *oem = &ihost->oem_parameters;
905  struct isci_phy *iphy;
906  enum sci_status status;
907 
908  status = SCI_SUCCESS;
909 
910  if (ihost->phy_startup_timer_pending)
911  return status;
912 
913  if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
914  if (is_controller_start_complete(ihost)) {
916  sci_del_timer(&ihost->phy_timer);
917  ihost->phy_startup_timer_pending = false;
918  }
919  } else {
920  iphy = &ihost->phys[ihost->next_phy_to_start];
921 
922  if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
923  if (phy_get_non_dummy_port(iphy) == NULL) {
924  ihost->next_phy_to_start++;
925 
926  /* Caution recursion ahead be forwarned
927  *
928  * The PHY was never added to a PORT in MPC mode
929  * so start the next phy in sequence This phy
930  * will never go link up and will not draw power
931  * the OEM parameters either configured the phy
932  * incorrectly for the PORT or it was never
933  * assigned to a PORT
934  */
935  return sci_controller_start_next_phy(ihost);
936  }
937  }
938 
939  status = sci_phy_start(iphy);
940 
941  if (status == SCI_SUCCESS) {
942  sci_mod_timer(&ihost->phy_timer,
944  ihost->phy_startup_timer_pending = true;
945  } else {
946  dev_warn(&ihost->pdev->dev,
947  "%s: Controller stop operation failed "
948  "to stop phy %d because of status "
949  "%d.\n",
950  __func__,
951  ihost->phys[ihost->next_phy_to_start].phy_index,
952  status);
953  }
954 
955  ihost->next_phy_to_start++;
956  }
957 
958  return status;
959 }
960 
961 static void phy_startup_timeout(unsigned long data)
962 {
963  struct sci_timer *tmr = (struct sci_timer *)data;
964  struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
965  unsigned long flags;
966  enum sci_status status;
967 
968  spin_lock_irqsave(&ihost->scic_lock, flags);
969 
970  if (tmr->cancel)
971  goto done;
972 
973  ihost->phy_startup_timer_pending = false;
974 
975  do {
976  status = sci_controller_start_next_phy(ihost);
977  } while (status != SCI_SUCCESS);
978 
979 done:
980  spin_unlock_irqrestore(&ihost->scic_lock, flags);
981 }
982 
983 static u16 isci_tci_active(struct isci_host *ihost)
984 {
985  return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
986 }
987 
988 static enum sci_status sci_controller_start(struct isci_host *ihost,
989  u32 timeout)
990 {
991  enum sci_status result;
992  u16 index;
993 
994  if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
995  dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
996  __func__, ihost->sm.current_state_id);
998  }
999 
1000  /* Build the TCi free pool */
1001  BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
1002  ihost->tci_head = 0;
1003  ihost->tci_tail = 0;
1004  for (index = 0; index < ihost->task_context_entries; index++)
1005  isci_tci_free(ihost, index);
1006 
1007  /* Build the RNi free pool */
1009  ihost->remote_node_entries);
1010 
1011  /*
1012  * Before anything else lets make sure we will not be
1013  * interrupted by the hardware.
1014  */
1016 
1017  /* Enable the port task scheduler */
1018  sci_controller_enable_port_task_scheduler(ihost);
1019 
1020  /* Assign all the task entries to ihost physical function */
1021  sci_controller_assign_task_entries(ihost);
1022 
1023  /* Now initialize the completion queue */
1024  sci_controller_initialize_completion_queue(ihost);
1025 
1026  /* Initialize the unsolicited frame queue for use */
1027  sci_controller_initialize_unsolicited_frame_queue(ihost);
1028 
1029  /* Start all of the ports on this controller */
1030  for (index = 0; index < ihost->logical_port_entries; index++) {
1031  struct isci_port *iport = &ihost->ports[index];
1032 
1033  result = sci_port_start(iport);
1034  if (result)
1035  return result;
1036  }
1037 
1038  sci_controller_start_next_phy(ihost);
1039 
1040  sci_mod_timer(&ihost->timer, timeout);
1041 
1042  sci_change_state(&ihost->sm, SCIC_STARTING);
1043 
1044  return SCI_SUCCESS;
1045 }
1046 
1048 {
1049  struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
1050  unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
1051 
1052  set_bit(IHOST_START_PENDING, &ihost->flags);
1053 
1054  spin_lock_irq(&ihost->scic_lock);
1055  sci_controller_start(ihost, tmo);
1056  sci_controller_enable_interrupts(ihost);
1057  spin_unlock_irq(&ihost->scic_lock);
1058 }
1059 
1060 static void isci_host_stop_complete(struct isci_host *ihost)
1061 {
1064  wake_up(&ihost->eventq);
1065 }
1066 
1067 static void sci_controller_completion_handler(struct isci_host *ihost)
1068 {
1069  /* Empty out the completion queue */
1070  if (sci_controller_completion_queue_has_entries(ihost))
1071  sci_controller_process_completions(ihost);
1072 
1073  /* Clear the interrupt and enable all interrupts again */
1074  writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
1075  /* Could we write the value of SMU_ISR_COMPLETION? */
1076  writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
1077  writel(0, &ihost->smu_registers->interrupt_mask);
1078 }
1079 
1080 void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
1081 {
1082  if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) &&
1084  if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) {
1085  /* Normal notification (task_done) */
1086  dev_dbg(&ihost->pdev->dev,
1087  "%s: Normal - ireq/task = %p/%p\n",
1088  __func__, ireq, task);
1089  task->lldd_task = NULL;
1090  task->task_done(task);
1091  } else {
1092  dev_dbg(&ihost->pdev->dev,
1093  "%s: Error - ireq/task = %p/%p\n",
1094  __func__, ireq, task);
1095  if (sas_protocol_ata(task->task_proto))
1096  task->lldd_task = NULL;
1097  sas_task_abort(task);
1098  }
1099  } else
1100  task->lldd_task = NULL;
1101 
1103  wake_up_all(&ihost->eventq);
1104 
1105  if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
1106  isci_free_tag(ihost, ireq->io_tag);
1107 }
1116 void isci_host_completion_routine(unsigned long data)
1117 {
1118  struct isci_host *ihost = (struct isci_host *)data;
1119  u16 active;
1120 
1121  spin_lock_irq(&ihost->scic_lock);
1122  sci_controller_completion_handler(ihost);
1123  spin_unlock_irq(&ihost->scic_lock);
1124 
1125  /*
1126  * we subtract SCI_MAX_PORTS to account for the number of dummy TCs
1127  * issued for hardware issue workaround
1128  */
1129  active = isci_tci_active(ihost) - SCI_MAX_PORTS;
1130 
1131  /*
1132  * the coalesence timeout doubles at each encoding step, so
1133  * update it based on the ilog2 value of the outstanding requests
1134  */
1135  writel(SMU_ICC_GEN_VAL(NUMBER, active) |
1137  &ihost->smu_registers->interrupt_coalesce_control);
1138 }
1139 
1158 static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
1159 {
1160  if (ihost->sm.current_state_id != SCIC_READY) {
1161  dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1162  __func__, ihost->sm.current_state_id);
1164  }
1165 
1166  sci_mod_timer(&ihost->timer, timeout);
1167  sci_change_state(&ihost->sm, SCIC_STOPPING);
1168  return SCI_SUCCESS;
1169 }
1170 
1183 static enum sci_status sci_controller_reset(struct isci_host *ihost)
1184 {
1185  switch (ihost->sm.current_state_id) {
1186  case SCIC_RESET:
1187  case SCIC_READY:
1188  case SCIC_STOPPING:
1189  case SCIC_FAILED:
1190  /*
1191  * The reset operation is not a graceful cleanup, just
1192  * perform the state transition.
1193  */
1195  return SCI_SUCCESS;
1196  default:
1197  dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1198  __func__, ihost->sm.current_state_id);
1200  }
1201 }
1202 
1203 static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
1204 {
1205  u32 index;
1206  enum sci_status status;
1207  enum sci_status phy_status;
1208 
1209  status = SCI_SUCCESS;
1210 
1211  for (index = 0; index < SCI_MAX_PHYS; index++) {
1212  phy_status = sci_phy_stop(&ihost->phys[index]);
1213 
1214  if (phy_status != SCI_SUCCESS &&
1215  phy_status != SCI_FAILURE_INVALID_STATE) {
1216  status = SCI_FAILURE;
1217 
1218  dev_warn(&ihost->pdev->dev,
1219  "%s: Controller stop operation failed to stop "
1220  "phy %d because of status %d.\n",
1221  __func__,
1222  ihost->phys[index].phy_index, phy_status);
1223  }
1224  }
1225 
1226  return status;
1227 }
1228 
1229 
1241 void isci_host_deinit(struct isci_host *ihost)
1242 {
1243  int i;
1244 
1245  /* disable output data selects */
1246  for (i = 0; i < isci_gpio_count(ihost); i++)
1247  writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
1248 
1249  set_bit(IHOST_STOP_PENDING, &ihost->flags);
1250 
1251  spin_lock_irq(&ihost->scic_lock);
1252  sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
1253  spin_unlock_irq(&ihost->scic_lock);
1254 
1255  wait_for_stop(ihost);
1256 
1257  /* phy stop is after controller stop to allow port and device to
1258  * go idle before shutting down the phys, but the expectation is
1259  * that i/o has been shut off well before we reach this
1260  * function.
1261  */
1262  sci_controller_stop_phys(ihost);
1263 
1264  /* disable sgpio: where the above wait should give time for the
1265  * enclosure to sample the gpios going inactive
1266  */
1267  writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
1268 
1269  spin_lock_irq(&ihost->scic_lock);
1270  sci_controller_reset(ihost);
1271  spin_unlock_irq(&ihost->scic_lock);
1272 
1273  /* Cancel any/all outstanding port timers */
1274  for (i = 0; i < ihost->logical_port_entries; i++) {
1275  struct isci_port *iport = &ihost->ports[i];
1276  del_timer_sync(&iport->timer.timer);
1277  }
1278 
1279  /* Cancel any/all outstanding phy timers */
1280  for (i = 0; i < SCI_MAX_PHYS; i++) {
1281  struct isci_phy *iphy = &ihost->phys[i];
1282  del_timer_sync(&iphy->sata_timer.timer);
1283  }
1284 
1285  del_timer_sync(&ihost->port_agent.timer.timer);
1286 
1287  del_timer_sync(&ihost->power_control.timer.timer);
1288 
1289  del_timer_sync(&ihost->timer.timer);
1290 
1291  del_timer_sync(&ihost->phy_timer.timer);
1292 }
1293 
1294 static void __iomem *scu_base(struct isci_host *isci_host)
1295 {
1296  struct pci_dev *pdev = isci_host->pdev;
1297  int id = isci_host->id;
1298 
1299  return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
1300 }
1301 
1302 static void __iomem *smu_base(struct isci_host *isci_host)
1303 {
1304  struct pci_dev *pdev = isci_host->pdev;
1305  int id = isci_host->id;
1306 
1307  return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
1308 }
1309 
1310 static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
1311 {
1312  struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1313 
1314  sci_change_state(&ihost->sm, SCIC_RESET);
1315 }
1316 
1317 static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
1318 {
1319  struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1320 
1321  sci_del_timer(&ihost->timer);
1322 }
1323 
1324 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
1325 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
1326 #define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
1327 #define INTERRUPT_COALESCE_NUMBER_MAX 256
1328 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
1329 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
1330 
1348 static enum sci_status
1349 sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
1350  u32 coalesce_number,
1351  u32 coalesce_timeout)
1352 {
1353  u8 timeout_encode = 0;
1354  u32 min = 0;
1355  u32 max = 0;
1356 
1357  /* Check if the input parameters fall in the range. */
1358  if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
1360 
1361  /*
1362  * Defined encoding for interrupt coalescing timeout:
1363  * Value Min Max Units
1364  * ----- --- --- -----
1365  * 0 - - Disabled
1366  * 1 13.3 20.0 ns
1367  * 2 26.7 40.0
1368  * 3 53.3 80.0
1369  * 4 106.7 160.0
1370  * 5 213.3 320.0
1371  * 6 426.7 640.0
1372  * 7 853.3 1280.0
1373  * 8 1.7 2.6 us
1374  * 9 3.4 5.1
1375  * 10 6.8 10.2
1376  * 11 13.7 20.5
1377  * 12 27.3 41.0
1378  * 13 54.6 81.9
1379  * 14 109.2 163.8
1380  * 15 218.5 327.7
1381  * 16 436.9 655.4
1382  * 17 873.8 1310.7
1383  * 18 1.7 2.6 ms
1384  * 19 3.5 5.2
1385  * 20 7.0 10.5
1386  * 21 14.0 21.0
1387  * 22 28.0 41.9
1388  * 23 55.9 83.9
1389  * 24 111.8 167.8
1390  * 25 223.7 335.5
1391  * 26 447.4 671.1
1392  * 27 894.8 1342.2
1393  * 28 1.8 2.7 s
1394  * Others Undefined */
1395 
1396  /*
1397  * Use the table above to decide the encode of interrupt coalescing timeout
1398  * value for register writing. */
1399  if (coalesce_timeout == 0)
1400  timeout_encode = 0;
1401  else{
1402  /* make the timeout value in unit of (10 ns). */
1403  coalesce_timeout = coalesce_timeout * 100;
1406 
1407  /* get the encode of timeout for register writing. */
1408  for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
1409  timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
1410  timeout_encode++) {
1411  if (min <= coalesce_timeout && max > coalesce_timeout)
1412  break;
1413  else if (coalesce_timeout >= max && coalesce_timeout < min * 2
1414  && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
1415  if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
1416  break;
1417  else{
1418  timeout_encode++;
1419  break;
1420  }
1421  } else {
1422  max = max * 2;
1423  min = min * 2;
1424  }
1425  }
1426 
1427  if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
1428  /* the value is out of range. */
1430  }
1431 
1432  writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
1433  SMU_ICC_GEN_VAL(TIMER, timeout_encode),
1434  &ihost->smu_registers->interrupt_coalesce_control);
1435 
1436 
1437  ihost->interrupt_coalesce_number = (u16)coalesce_number;
1438  ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
1439 
1440  return SCI_SUCCESS;
1441 }
1442 
1443 
1444 static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
1445 {
1446  struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1447  u32 val;
1448 
1449  /* enable clock gating for power control of the scu unit */
1450  val = readl(&ihost->smu_registers->clock_gating_control);
1451  val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) |
1452  SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) |
1453  SMU_CGUCR_GEN_BIT(XCLK_ENABLE));
1454  val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE);
1455  writel(val, &ihost->smu_registers->clock_gating_control);
1456 
1457  /* set the default interrupt coalescence number and timeout value. */
1458  sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1459 }
1460 
1461 static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
1462 {
1463  struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1464 
1465  /* disable interrupt coalescence. */
1466  sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1467 }
1468 
1469 static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
1470 {
1471  u32 index;
1472  enum sci_status port_status;
1473  enum sci_status status = SCI_SUCCESS;
1474 
1475  for (index = 0; index < ihost->logical_port_entries; index++) {
1476  struct isci_port *iport = &ihost->ports[index];
1477 
1478  port_status = sci_port_stop(iport);
1479 
1480  if ((port_status != SCI_SUCCESS) &&
1481  (port_status != SCI_FAILURE_INVALID_STATE)) {
1482  status = SCI_FAILURE;
1483 
1484  dev_warn(&ihost->pdev->dev,
1485  "%s: Controller stop operation failed to "
1486  "stop port %d because of status %d.\n",
1487  __func__,
1488  iport->logical_port_index,
1489  port_status);
1490  }
1491  }
1492 
1493  return status;
1494 }
1495 
1496 static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
1497 {
1498  u32 index;
1499  enum sci_status status;
1501 
1502  status = SCI_SUCCESS;
1503 
1504  for (index = 0; index < ihost->remote_node_entries; index++) {
1505  if (ihost->device_table[index] != NULL) {
1506  /* / @todo What timeout value do we want to provide to this request? */
1507  device_status = sci_remote_device_stop(ihost->device_table[index], 0);
1508 
1509  if ((device_status != SCI_SUCCESS) &&
1510  (device_status != SCI_FAILURE_INVALID_STATE)) {
1511  dev_warn(&ihost->pdev->dev,
1512  "%s: Controller stop operation failed "
1513  "to stop device 0x%p because of "
1514  "status %d.\n",
1515  __func__,
1516  ihost->device_table[index], device_status);
1517  }
1518  }
1519  }
1520 
1521  return status;
1522 }
1523 
1524 static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
1525 {
1526  struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1527 
1528  sci_controller_stop_devices(ihost);
1529  sci_controller_stop_ports(ihost);
1530 
1532  isci_host_stop_complete(ihost);
1533 }
1534 
1535 static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
1536 {
1537  struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1538 
1539  sci_del_timer(&ihost->timer);
1540 }
1541 
1542 static void sci_controller_reset_hardware(struct isci_host *ihost)
1543 {
1544  /* Disable interrupts so we dont take any spurious interrupts */
1546 
1547  /* Reset the SCU */
1548  writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
1549 
1550  /* Delay for 1ms to before clearing the CQP and UFQPR. */
1551  udelay(1000);
1552 
1553  /* The write to the CQGR clears the CQP */
1554  writel(0x00000000, &ihost->smu_registers->completion_queue_get);
1555 
1556  /* The write to the UFQGP clears the UFQPR */
1557  writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
1558 
1559  /* clear all interrupts */
1560  writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status);
1561 }
1562 
1563 static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
1564 {
1565  struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1566 
1567  sci_controller_reset_hardware(ihost);
1568  sci_change_state(&ihost->sm, SCIC_RESET);
1569 }
1570 
1571 static const struct sci_base_state sci_controller_state_table[] = {
1572  [SCIC_INITIAL] = {
1573  .enter_state = sci_controller_initial_state_enter,
1574  },
1575  [SCIC_RESET] = {},
1576  [SCIC_INITIALIZING] = {},
1577  [SCIC_INITIALIZED] = {},
1578  [SCIC_STARTING] = {
1579  .exit_state = sci_controller_starting_state_exit,
1580  },
1581  [SCIC_READY] = {
1582  .enter_state = sci_controller_ready_state_enter,
1583  .exit_state = sci_controller_ready_state_exit,
1584  },
1585  [SCIC_RESETTING] = {
1586  .enter_state = sci_controller_resetting_state_enter,
1587  },
1588  [SCIC_STOPPING] = {
1589  .enter_state = sci_controller_stopping_state_enter,
1590  .exit_state = sci_controller_stopping_state_exit,
1591  },
1592  [SCIC_FAILED] = {}
1593 };
1594 
1595 static void controller_timeout(unsigned long data)
1596 {
1597  struct sci_timer *tmr = (struct sci_timer *)data;
1598  struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
1599  struct sci_base_state_machine *sm = &ihost->sm;
1600  unsigned long flags;
1601 
1602  spin_lock_irqsave(&ihost->scic_lock, flags);
1603 
1604  if (tmr->cancel)
1605  goto done;
1606 
1607  if (sm->current_state_id == SCIC_STARTING)
1609  else if (sm->current_state_id == SCIC_STOPPING) {
1611  isci_host_stop_complete(ihost);
1612  } else /* / @todo Now what do we want to do in this case? */
1613  dev_err(&ihost->pdev->dev,
1614  "%s: Controller timer fired when controller was not "
1615  "in a state being timed.\n",
1616  __func__);
1617 
1618 done:
1619  spin_unlock_irqrestore(&ihost->scic_lock, flags);
1620 }
1621 
1622 static enum sci_status sci_controller_construct(struct isci_host *ihost,
1623  void __iomem *scu_base,
1624  void __iomem *smu_base)
1625 {
1626  u8 i;
1627 
1628  sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
1629 
1630  ihost->scu_registers = scu_base;
1631  ihost->smu_registers = smu_base;
1632 
1634 
1635  /* Construct the ports for this controller */
1636  for (i = 0; i < SCI_MAX_PORTS; i++)
1637  sci_port_construct(&ihost->ports[i], i, ihost);
1638  sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
1639 
1640  /* Construct the phys for this controller */
1641  for (i = 0; i < SCI_MAX_PHYS; i++) {
1642  /* Add all the PHYs to the dummy port */
1643  sci_phy_construct(&ihost->phys[i],
1644  &ihost->ports[SCI_MAX_PORTS], i);
1645  }
1646 
1647  ihost->invalid_phy_mask = 0;
1648 
1649  sci_init_timer(&ihost->timer, controller_timeout);
1650 
1651  return sci_controller_reset(ihost);
1652 }
1653 
1655 {
1656  int i;
1657 
1658  for (i = 0; i < SCI_MAX_PORTS; i++)
1659  if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
1660  return -EINVAL;
1661 
1662  for (i = 0; i < SCI_MAX_PHYS; i++)
1663  if (oem->phys[i].sas_address.high == 0 &&
1664  oem->phys[i].sas_address.low == 0)
1665  return -EINVAL;
1666 
1667  if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
1668  for (i = 0; i < SCI_MAX_PHYS; i++)
1669  if (oem->ports[i].phy_mask != 0)
1670  return -EINVAL;
1671  } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
1672  u8 phy_mask = 0;
1673 
1674  for (i = 0; i < SCI_MAX_PHYS; i++)
1675  phy_mask |= oem->ports[i].phy_mask;
1676 
1677  if (phy_mask == 0)
1678  return -EINVAL;
1679  } else
1680  return -EINVAL;
1681 
1682  if (oem->controller.max_concurr_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT ||
1683  oem->controller.max_concurr_spin_up < 1)
1684  return -EINVAL;
1685 
1686  if (oem->controller.do_enable_ssc) {
1687  if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1)
1688  return -EINVAL;
1689 
1690  if (version >= ISCI_ROM_VER_1_1) {
1691  u8 test = oem->controller.ssc_sata_tx_spread_level;
1692 
1693  switch (test) {
1694  case 0:
1695  case 2:
1696  case 3:
1697  case 6:
1698  case 7:
1699  break;
1700  default:
1701  return -EINVAL;
1702  }
1703 
1704  test = oem->controller.ssc_sas_tx_spread_level;
1705  if (oem->controller.ssc_sas_tx_type == 0) {
1706  switch (test) {
1707  case 0:
1708  case 2:
1709  case 3:
1710  break;
1711  default:
1712  return -EINVAL;
1713  }
1714  } else if (oem->controller.ssc_sas_tx_type == 1) {
1715  switch (test) {
1716  case 0:
1717  case 3:
1718  case 6:
1719  break;
1720  default:
1721  return -EINVAL;
1722  }
1723  }
1724  }
1725  }
1726 
1727  return 0;
1728 }
1729 
1730 static u8 max_spin_up(struct isci_host *ihost)
1731 {
1732  if (ihost->user_parameters.max_concurr_spinup)
1733  return min_t(u8, ihost->user_parameters.max_concurr_spinup,
1734  MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
1735  else
1736  return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up,
1737  MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
1738 }
1739 
1740 static void power_control_timeout(unsigned long data)
1741 {
1742  struct sci_timer *tmr = (struct sci_timer *)data;
1743  struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
1744  struct isci_phy *iphy;
1745  unsigned long flags;
1746  u8 i;
1747 
1748  spin_lock_irqsave(&ihost->scic_lock, flags);
1749 
1750  if (tmr->cancel)
1751  goto done;
1752 
1753  ihost->power_control.phys_granted_power = 0;
1754 
1755  if (ihost->power_control.phys_waiting == 0) {
1756  ihost->power_control.timer_started = false;
1757  goto done;
1758  }
1759 
1760  for (i = 0; i < SCI_MAX_PHYS; i++) {
1761 
1762  if (ihost->power_control.phys_waiting == 0)
1763  break;
1764 
1765  iphy = ihost->power_control.requesters[i];
1766  if (iphy == NULL)
1767  continue;
1768 
1769  if (ihost->power_control.phys_granted_power >= max_spin_up(ihost))
1770  break;
1771 
1772  ihost->power_control.requesters[i] = NULL;
1773  ihost->power_control.phys_waiting--;
1774  ihost->power_control.phys_granted_power++;
1776 
1777  if (iphy->protocol == SAS_PROTOCOL_SSP) {
1778  u8 j;
1779 
1780  for (j = 0; j < SCI_MAX_PHYS; j++) {
1781  struct isci_phy *requester = ihost->power_control.requesters[j];
1782 
1783  /*
1784  * Search the power_control queue to see if there are other phys
1785  * attached to the same remote device. If found, take all of
1786  * them out of await_sas_power state.
1787  */
1788  if (requester != NULL && requester != iphy) {
1789  u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
1790  iphy->frame_rcvd.iaf.sas_addr,
1791  sizeof(requester->frame_rcvd.iaf.sas_addr));
1792 
1793  if (other == 0) {
1794  ihost->power_control.requesters[j] = NULL;
1795  ihost->power_control.phys_waiting--;
1796  sci_phy_consume_power_handler(requester);
1797  }
1798  }
1799  }
1800  }
1801  }
1802 
1803  /*
1804  * It doesn't matter if the power list is empty, we need to start the
1805  * timer in case another phy becomes ready.
1806  */
1807  sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1808  ihost->power_control.timer_started = true;
1809 
1810 done:
1811  spin_unlock_irqrestore(&ihost->scic_lock, flags);
1812 }
1813 
1815  struct isci_phy *iphy)
1816 {
1817  BUG_ON(iphy == NULL);
1818 
1819  if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) {
1820  ihost->power_control.phys_granted_power++;
1822 
1823  /*
1824  * stop and start the power_control timer. When the timer fires, the
1825  * no_of_phys_granted_power will be set to 0
1826  */
1827  if (ihost->power_control.timer_started)
1828  sci_del_timer(&ihost->power_control.timer);
1829 
1830  sci_mod_timer(&ihost->power_control.timer,
1832  ihost->power_control.timer_started = true;
1833 
1834  } else {
1835  /*
1836  * There are phys, attached to the same sas address as this phy, are
1837  * already in READY state, this phy don't need wait.
1838  */
1839  u8 i;
1840  struct isci_phy *current_phy;
1841 
1842  for (i = 0; i < SCI_MAX_PHYS; i++) {
1843  u8 other;
1844  current_phy = &ihost->phys[i];
1845 
1846  other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
1847  iphy->frame_rcvd.iaf.sas_addr,
1848  sizeof(current_phy->frame_rcvd.iaf.sas_addr));
1849 
1850  if (current_phy->sm.current_state_id == SCI_PHY_READY &&
1851  current_phy->protocol == SAS_PROTOCOL_SSP &&
1852  other == 0) {
1854  break;
1855  }
1856  }
1857 
1858  if (i == SCI_MAX_PHYS) {
1859  /* Add the phy in the waiting list */
1860  ihost->power_control.requesters[iphy->phy_index] = iphy;
1861  ihost->power_control.phys_waiting++;
1862  }
1863  }
1864 }
1865 
1867  struct isci_phy *iphy)
1868 {
1869  BUG_ON(iphy == NULL);
1870 
1871  if (ihost->power_control.requesters[iphy->phy_index])
1872  ihost->power_control.phys_waiting--;
1873 
1874  ihost->power_control.requesters[iphy->phy_index] = NULL;
1875 }
1876 
1877 static int is_long_cable(int phy, unsigned char selection_byte)
1878 {
1879  return !!(selection_byte & (1 << phy));
1880 }
1881 
1882 static int is_medium_cable(int phy, unsigned char selection_byte)
1883 {
1884  return !!(selection_byte & (1 << (phy + 4)));
1885 }
1886 
1887 static enum cable_selections decode_selection_byte(
1888  int phy,
1889  unsigned char selection_byte)
1890 {
1891  return ((selection_byte & (1 << phy)) ? 1 : 0)
1892  + (selection_byte & (1 << (phy + 4)) ? 2 : 0);
1893 }
1894 
1895 static unsigned char *to_cable_select(struct isci_host *ihost)
1896 {
1897  if (is_cable_select_overridden())
1898  return ((unsigned char *)&cable_selection_override)
1899  + ihost->id;
1900  else
1901  return &ihost->oem_parameters.controller.cable_selection_mask;
1902 }
1903 
1905 {
1906  return decode_selection_byte(phy, *to_cable_select(ihost));
1907 }
1908 
1910 {
1911  static char *cable_names[] = {
1912  [short_cable] = "short",
1913  [long_cable] = "long",
1914  [medium_cable] = "medium",
1915  [undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */
1916  };
1917  return (selection <= undefined_cable) ? cable_names[selection]
1918  : cable_names[undefined_cable];
1919 }
1920 
1921 #define AFE_REGISTER_WRITE_DELAY 10
1922 
1923 static void sci_controller_afe_initialization(struct isci_host *ihost)
1924 {
1925  struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
1926  const struct sci_oem_params *oem = &ihost->oem_parameters;
1927  struct pci_dev *pdev = ihost->pdev;
1928  u32 afe_status;
1929  u32 phy_id;
1930  unsigned char cable_selection_mask = *to_cable_select(ihost);
1931 
1932  /* Clear DFX Status registers */
1933  writel(0x0081000f, &afe->afe_dfx_master_control0);
1935 
1936  if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) {
1937  /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
1938  * Timer, PM Stagger Timer
1939  */
1940  writel(0x0007FFFF, &afe->afe_pmsn_master_control2);
1942  }
1943 
1944  /* Configure bias currents to normal */
1945  if (is_a2(pdev))
1946  writel(0x00005A00, &afe->afe_bias_control);
1947  else if (is_b0(pdev) || is_c0(pdev))
1948  writel(0x00005F00, &afe->afe_bias_control);
1949  else if (is_c1(pdev))
1950  writel(0x00005500, &afe->afe_bias_control);
1951 
1953 
1954  /* Enable PLL */
1955  if (is_a2(pdev))
1956  writel(0x80040908, &afe->afe_pll_control0);
1957  else if (is_b0(pdev) || is_c0(pdev))
1958  writel(0x80040A08, &afe->afe_pll_control0);
1959  else if (is_c1(pdev)) {
1960  writel(0x80000B08, &afe->afe_pll_control0);
1962  writel(0x00000B08, &afe->afe_pll_control0);
1964  writel(0x80000B08, &afe->afe_pll_control0);
1965  }
1966 
1968 
1969  /* Wait for the PLL to lock */
1970  do {
1971  afe_status = readl(&afe->afe_common_block_status);
1973  } while ((afe_status & 0x00001000) == 0);
1974 
1975  if (is_a2(pdev)) {
1976  /* Shorten SAS SNW lock time (RxLock timer value from 76
1977  * us to 50 us)
1978  */
1979  writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
1981  }
1982 
1983  for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
1984  struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_id];
1985  const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
1986  int cable_length_long =
1987  is_long_cable(phy_id, cable_selection_mask);
1988  int cable_length_medium =
1989  is_medium_cable(phy_id, cable_selection_mask);
1990 
1991  if (is_a2(pdev)) {
1992  /* All defaults, except the Receive Word
1993  * Alignament/Comma Detect Enable....(0xe800)
1994  */
1995  writel(0x00004512, &xcvr->afe_xcvr_control0);
1997 
1998  writel(0x0050100F, &xcvr->afe_xcvr_control1);
2000  } else if (is_b0(pdev)) {
2001  /* Configure transmitter SSC parameters */
2002  writel(0x00030000, &xcvr->afe_tx_ssc_control);
2004  } else if (is_c0(pdev)) {
2005  /* Configure transmitter SSC parameters */
2006  writel(0x00010202, &xcvr->afe_tx_ssc_control);
2008 
2009  /* All defaults, except the Receive Word
2010  * Alignament/Comma Detect Enable....(0xe800)
2011  */
2012  writel(0x00014500, &xcvr->afe_xcvr_control0);
2014  } else if (is_c1(pdev)) {
2015  /* Configure transmitter SSC parameters */
2016  writel(0x00010202, &xcvr->afe_tx_ssc_control);
2018 
2019  /* All defaults, except the Receive Word
2020  * Alignament/Comma Detect Enable....(0xe800)
2021  */
2022  writel(0x0001C500, &xcvr->afe_xcvr_control0);
2024  }
2025 
2026  /* Power up TX and RX out from power down (PWRDNTX and
2027  * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c)
2028  */
2029  if (is_a2(pdev))
2030  writel(0x000003F0, &xcvr->afe_channel_control);
2031  else if (is_b0(pdev)) {
2032  writel(0x000003D7, &xcvr->afe_channel_control);
2034 
2035  writel(0x000003D4, &xcvr->afe_channel_control);
2036  } else if (is_c0(pdev)) {
2037  writel(0x000001E7, &xcvr->afe_channel_control);
2039 
2040  writel(0x000001E4, &xcvr->afe_channel_control);
2041  } else if (is_c1(pdev)) {
2042  writel(cable_length_long ? 0x000002F7 : 0x000001F7,
2043  &xcvr->afe_channel_control);
2045 
2046  writel(cable_length_long ? 0x000002F4 : 0x000001F4,
2047  &xcvr->afe_channel_control);
2048  }
2050 
2051  if (is_a2(pdev)) {
2052  /* Enable TX equalization (0xe824) */
2053  writel(0x00040000, &xcvr->afe_tx_control);
2055  }
2056 
2057  if (is_a2(pdev) || is_b0(pdev))
2058  /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0,
2059  * TPD=0x0(TX Power On), RDD=0x0(RX Detect
2060  * Enabled) ....(0xe800)
2061  */
2062  writel(0x00004100, &xcvr->afe_xcvr_control0);
2063  else if (is_c0(pdev))
2064  writel(0x00014100, &xcvr->afe_xcvr_control0);
2065  else if (is_c1(pdev))
2066  writel(0x0001C100, &xcvr->afe_xcvr_control0);
2068 
2069  /* Leave DFE/FFE on */
2070  if (is_a2(pdev))
2071  writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
2072  else if (is_b0(pdev)) {
2073  writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
2075  /* Enable TX equalization (0xe824) */
2076  writel(0x00040000, &xcvr->afe_tx_control);
2077  } else if (is_c0(pdev)) {
2078  writel(0x01400C0F, &xcvr->afe_rx_ssc_control1);
2080 
2081  writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0);
2083 
2084  /* Enable TX equalization (0xe824) */
2085  writel(0x00040000, &xcvr->afe_tx_control);
2086  } else if (is_c1(pdev)) {
2087  writel(cable_length_long ? 0x01500C0C :
2088  cable_length_medium ? 0x01400C0D : 0x02400C0D,
2089  &xcvr->afe_xcvr_control1);
2091 
2092  writel(0x000003E0, &xcvr->afe_dfx_rx_control1);
2094 
2095  writel(cable_length_long ? 0x33091C1F :
2096  cable_length_medium ? 0x3315181F : 0x2B17161F,
2097  &xcvr->afe_rx_ssc_control0);
2099 
2100  /* Enable TX equalization (0xe824) */
2101  writel(0x00040000, &xcvr->afe_tx_control);
2102  }
2103 
2105 
2108 
2111 
2114 
2117  }
2118 
2119  /* Transfer control to the PEs */
2120  writel(0x00010f00, &afe->afe_dfx_master_control0);
2122 }
2123 
2124 static void sci_controller_initialize_power_control(struct isci_host *ihost)
2125 {
2126  sci_init_timer(&ihost->power_control.timer, power_control_timeout);
2127 
2128  memset(ihost->power_control.requesters, 0,
2129  sizeof(ihost->power_control.requesters));
2130 
2131  ihost->power_control.phys_waiting = 0;
2132  ihost->power_control.phys_granted_power = 0;
2133 }
2134 
2135 static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2136 {
2137  struct sci_base_state_machine *sm = &ihost->sm;
2138  enum sci_status result = SCI_FAILURE;
2139  unsigned long i, state, val;
2140 
2141  if (ihost->sm.current_state_id != SCIC_RESET) {
2142  dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2143  __func__, ihost->sm.current_state_id);
2145  }
2146 
2148 
2149  sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
2150 
2151  ihost->next_phy_to_start = 0;
2152  ihost->phy_startup_timer_pending = false;
2153 
2154  sci_controller_initialize_power_control(ihost);
2155 
2156  /*
2157  * There is nothing to do here for B0 since we do not have to
2158  * program the AFE registers.
2159  * / @todo The AFE settings are supposed to be correct for the B0 but
2160  * / presently they seem to be wrong. */
2161  sci_controller_afe_initialization(ihost);
2162 
2163 
2164  /* Take the hardware out of reset */
2165  writel(0, &ihost->smu_registers->soft_reset_control);
2166 
2167  /*
2168  * / @todo Provide meaningfull error code for hardware failure
2169  * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
2170  for (i = 100; i >= 1; i--) {
2171  u32 status;
2172 
2173  /* Loop until the hardware reports success */
2175  status = readl(&ihost->smu_registers->control_status);
2176 
2177  if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
2178  break;
2179  }
2180  if (i == 0)
2181  goto out;
2182 
2183  /*
2184  * Determine what are the actaul device capacities that the
2185  * hardware will support */
2186  val = readl(&ihost->smu_registers->device_context_capacity);
2187 
2188  /* Record the smaller of the two capacity values */
2189  ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
2192 
2193  /*
2194  * Make all PEs that are unassigned match up with the
2195  * logical ports
2196  */
2197  for (i = 0; i < ihost->logical_port_entries; i++) {
2199  *ptsg = &ihost->scu_registers->peg0.ptsg;
2200 
2201  writel(i, &ptsg->protocol_engine[i]);
2202  }
2203 
2204  /* Initialize hardware PCI Relaxed ordering in DMA engines */
2205  val = readl(&ihost->scu_registers->sdma.pdma_configuration);
2206  val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2207  writel(val, &ihost->scu_registers->sdma.pdma_configuration);
2208 
2209  val = readl(&ihost->scu_registers->sdma.cdma_configuration);
2210  val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2211  writel(val, &ihost->scu_registers->sdma.cdma_configuration);
2212 
2213  /*
2214  * Initialize the PHYs before the PORTs because the PHY registers
2215  * are accessed during the port initialization.
2216  */
2217  for (i = 0; i < SCI_MAX_PHYS; i++) {
2218  result = sci_phy_initialize(&ihost->phys[i],
2219  &ihost->scu_registers->peg0.pe[i].tl,
2220  &ihost->scu_registers->peg0.pe[i].ll);
2221  if (result != SCI_SUCCESS)
2222  goto out;
2223  }
2224 
2225  for (i = 0; i < ihost->logical_port_entries; i++) {
2226  struct isci_port *iport = &ihost->ports[i];
2227 
2228  iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
2229  iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
2230  iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
2231  }
2232 
2233  result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
2234 
2235  out:
2236  /* Advance the controller state machine */
2237  if (result == SCI_SUCCESS)
2238  state = SCIC_INITIALIZED;
2239  else
2240  state = SCIC_FAILED;
2241  sci_change_state(sm, state);
2242 
2243  return result;
2244 }
2245 
2246 static int sci_controller_dma_alloc(struct isci_host *ihost)
2247 {
2248  struct device *dev = &ihost->pdev->dev;
2249  size_t size;
2250  int i;
2251 
2252  /* detect re-initialization */
2253  if (ihost->completion_queue)
2254  return 0;
2255 
2256  size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
2257  ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma,
2258  GFP_KERNEL);
2259  if (!ihost->completion_queue)
2260  return -ENOMEM;
2261 
2262  size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
2263  ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma,
2264  GFP_KERNEL);
2265 
2266  if (!ihost->remote_node_context_table)
2267  return -ENOMEM;
2268 
2269  size = ihost->task_context_entries * sizeof(struct scu_task_context),
2270  ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma,
2271  GFP_KERNEL);
2272  if (!ihost->task_context_table)
2273  return -ENOMEM;
2274 
2275  size = SCI_UFI_TOTAL_SIZE;
2276  ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL);
2277  if (!ihost->ufi_buf)
2278  return -ENOMEM;
2279 
2280  for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
2281  struct isci_request *ireq;
2282  dma_addr_t dma;
2283 
2284  ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL);
2285  if (!ireq)
2286  return -ENOMEM;
2287 
2288  ireq->tc = &ihost->task_context_table[i];
2289  ireq->owning_controller = ihost;
2290  ireq->request_daddr = dma;
2291  ireq->isci_host = ihost;
2292  ihost->reqs[i] = ireq;
2293  }
2294 
2295  return 0;
2296 }
2297 
2298 static int sci_controller_mem_init(struct isci_host *ihost)
2299 {
2300  int err = sci_controller_dma_alloc(ihost);
2301 
2302  if (err)
2303  return err;
2304 
2305  writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower);
2306  writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper);
2307 
2308  writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower);
2309  writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper);
2310 
2311  writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower);
2312  writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper);
2313 
2315 
2316  /*
2317  * Inform the silicon as to the location of the UF headers and
2318  * address table.
2319  */
2320  writel(lower_32_bits(ihost->uf_control.headers.physical_address),
2321  &ihost->scu_registers->sdma.uf_header_base_address_lower);
2322  writel(upper_32_bits(ihost->uf_control.headers.physical_address),
2323  &ihost->scu_registers->sdma.uf_header_base_address_upper);
2324 
2325  writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
2326  &ihost->scu_registers->sdma.uf_address_table_lower);
2327  writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
2328  &ihost->scu_registers->sdma.uf_address_table_upper);
2329 
2330  return 0;
2331 }
2332 
2341 int isci_host_init(struct isci_host *ihost)
2342 {
2343  int i, err;
2344  enum sci_status status;
2345 
2346  spin_lock_irq(&ihost->scic_lock);
2347  status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost));
2348  spin_unlock_irq(&ihost->scic_lock);
2349  if (status != SCI_SUCCESS) {
2350  dev_err(&ihost->pdev->dev,
2351  "%s: sci_controller_construct failed - status = %x\n",
2352  __func__,
2353  status);
2354  return -ENODEV;
2355  }
2356 
2357  spin_lock_irq(&ihost->scic_lock);
2358  status = sci_controller_initialize(ihost);
2359  spin_unlock_irq(&ihost->scic_lock);
2360  if (status != SCI_SUCCESS) {
2361  dev_warn(&ihost->pdev->dev,
2362  "%s: sci_controller_initialize failed -"
2363  " status = 0x%x\n",
2364  __func__, status);
2365  return -ENODEV;
2366  }
2367 
2368  err = sci_controller_mem_init(ihost);
2369  if (err)
2370  return err;
2371 
2372  /* enable sgpio */
2373  writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
2374  for (i = 0; i < isci_gpio_count(ihost); i++)
2375  writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
2376  writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
2377 
2378  return 0;
2379 }
2380 
2381 void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
2382  struct isci_phy *iphy)
2383 {
2384  switch (ihost->sm.current_state_id) {
2385  case SCIC_STARTING:
2386  sci_del_timer(&ihost->phy_timer);
2387  ihost->phy_startup_timer_pending = false;
2388  ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2389  iport, iphy);
2390  sci_controller_start_next_phy(ihost);
2391  break;
2392  case SCIC_READY:
2393  ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2394  iport, iphy);
2395  break;
2396  default:
2397  dev_dbg(&ihost->pdev->dev,
2398  "%s: SCIC Controller linkup event from phy %d in "
2399  "unexpected state %d\n", __func__, iphy->phy_index,
2400  ihost->sm.current_state_id);
2401  }
2402 }
2403 
2404 void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
2405  struct isci_phy *iphy)
2406 {
2407  switch (ihost->sm.current_state_id) {
2408  case SCIC_STARTING:
2409  case SCIC_READY:
2410  ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
2411  iport, iphy);
2412  break;
2413  default:
2414  dev_dbg(&ihost->pdev->dev,
2415  "%s: SCIC Controller linkdown event from phy %d in "
2416  "unexpected state %d\n",
2417  __func__,
2418  iphy->phy_index,
2419  ihost->sm.current_state_id);
2420  }
2421 }
2422 
2424 {
2425  u32 index;
2426 
2427  for (index = 0; index < ihost->remote_node_entries; index++) {
2428  if ((ihost->device_table[index] != NULL) &&
2429  (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
2430  return true;
2431  }
2432 
2433  return false;
2434 }
2435 
2437  struct isci_remote_device *idev)
2438 {
2439  if (ihost->sm.current_state_id != SCIC_STOPPING) {
2440  dev_dbg(&ihost->pdev->dev,
2441  "SCIC Controller 0x%p remote device stopped event "
2442  "from device 0x%p in unexpected state %d\n",
2443  ihost, idev,
2444  ihost->sm.current_state_id);
2445  return;
2446  }
2447 
2449  isci_host_stop_complete(ihost);
2450 }
2451 
2453 {
2454  dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
2455  __func__, ihost->id, request);
2456 
2457  writel(request, &ihost->smu_registers->post_context_port);
2458 }
2459 
2461 {
2462  u16 task_index;
2463  u16 task_sequence;
2464 
2465  task_index = ISCI_TAG_TCI(io_tag);
2466 
2467  if (task_index < ihost->task_context_entries) {
2468  struct isci_request *ireq = ihost->reqs[task_index];
2469 
2470  if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
2471  task_sequence = ISCI_TAG_SEQ(io_tag);
2472 
2473  if (task_sequence == ihost->io_request_sequence[task_index])
2474  return ireq;
2475  }
2476  }
2477 
2478  return NULL;
2479 }
2480 
2496  struct isci_remote_device *idev,
2497  u16 *node_id)
2498 {
2499  u16 node_index;
2500  u32 remote_node_count = sci_remote_device_node_count(idev);
2501 
2503  &ihost->available_remote_nodes, remote_node_count
2504  );
2505 
2506  if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
2507  ihost->device_table[node_index] = idev;
2508 
2509  *node_id = node_index;
2510 
2511  return SCI_SUCCESS;
2512  }
2513 
2515 }
2516 
2518  struct isci_remote_device *idev,
2519  u16 node_id)
2520 {
2521  u32 remote_node_count = sci_remote_device_node_count(idev);
2522 
2523  if (ihost->device_table[node_id] == idev) {
2524  ihost->device_table[node_id] = NULL;
2525 
2527  &ihost->available_remote_nodes, remote_node_count, node_id
2528  );
2529  }
2530 }
2531 
2532 void sci_controller_copy_sata_response(void *response_buffer,
2533  void *frame_header,
2534  void *frame_buffer)
2535 {
2536  /* XXX type safety? */
2537  memcpy(response_buffer, frame_header, sizeof(u32));
2538 
2539  memcpy(response_buffer + sizeof(u32),
2540  frame_buffer,
2541  sizeof(struct dev_to_host_fis) - sizeof(u32));
2542 }
2543 
2544 void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
2545 {
2546  if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
2547  writel(ihost->uf_control.get,
2548  &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
2549 }
2550 
2551 void isci_tci_free(struct isci_host *ihost, u16 tci)
2552 {
2553  u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
2554 
2555  ihost->tci_pool[tail] = tci;
2556  ihost->tci_tail = tail + 1;
2557 }
2558 
2559 static u16 isci_tci_alloc(struct isci_host *ihost)
2560 {
2561  u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
2562  u16 tci = ihost->tci_pool[head];
2563 
2564  ihost->tci_head = head + 1;
2565  return tci;
2566 }
2567 
2568 static u16 isci_tci_space(struct isci_host *ihost)
2569 {
2570  return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
2571 }
2572 
2574 {
2575  if (isci_tci_space(ihost)) {
2576  u16 tci = isci_tci_alloc(ihost);
2577  u8 seq = ihost->io_request_sequence[tci];
2578 
2579  return ISCI_TAG(seq, tci);
2580  }
2581 
2583 }
2584 
2586 {
2587  u16 tci = ISCI_TAG_TCI(io_tag);
2588  u16 seq = ISCI_TAG_SEQ(io_tag);
2589 
2590  /* prevent tail from passing head */
2591  if (isci_tci_active(ihost) == 0)
2593 
2594  if (seq == ihost->io_request_sequence[tci]) {
2595  ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
2596 
2597  isci_tci_free(ihost, tci);
2598 
2599  return SCI_SUCCESS;
2600  }
2602 }
2603 
2605  struct isci_remote_device *idev,
2606  struct isci_request *ireq)
2607 {
2608  enum sci_status status;
2609 
2610  if (ihost->sm.current_state_id != SCIC_READY) {
2611  dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2612  __func__, ihost->sm.current_state_id);
2614  }
2615 
2616  status = sci_remote_device_start_io(ihost, idev, ireq);
2617  if (status != SCI_SUCCESS)
2618  return status;
2619 
2620  set_bit(IREQ_ACTIVE, &ireq->flags);
2622  return SCI_SUCCESS;
2623 }
2624 
2626  struct isci_remote_device *idev,
2627  struct isci_request *ireq)
2628 {
2629  /* terminate an ongoing (i.e. started) core IO request. This does not
2630  * abort the IO request at the target, but rather removes the IO
2631  * request from the host controller.
2632  */
2633  enum sci_status status;
2634 
2635  if (ihost->sm.current_state_id != SCIC_READY) {
2636  dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2637  __func__, ihost->sm.current_state_id);
2639  }
2640  status = sci_io_request_terminate(ireq);
2641 
2642  dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n",
2643  __func__, status, ireq, ireq->flags);
2644 
2645  if ((status == SCI_SUCCESS) &&
2646  !test_bit(IREQ_PENDING_ABORT, &ireq->flags) &&
2648  /* Utilize the original post context command and or in the
2649  * POST_TC_ABORT request sub-type.
2650  */
2652  ihost, ireq->post_context |
2654  }
2655  return status;
2656 }
2657 
2670  struct isci_remote_device *idev,
2671  struct isci_request *ireq)
2672 {
2673  enum sci_status status;
2674  u16 index;
2675 
2676  switch (ihost->sm.current_state_id) {
2677  case SCIC_STOPPING:
2678  /* XXX: Implement this function */
2679  return SCI_FAILURE;
2680  case SCIC_READY:
2681  status = sci_remote_device_complete_io(ihost, idev, ireq);
2682  if (status != SCI_SUCCESS)
2683  return status;
2684 
2685  index = ISCI_TAG_TCI(ireq->io_tag);
2686  clear_bit(IREQ_ACTIVE, &ireq->flags);
2687  return SCI_SUCCESS;
2688  default:
2689  dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2690  __func__, ihost->sm.current_state_id);
2692  }
2693 
2694 }
2695 
2697 {
2698  struct isci_host *ihost = ireq->owning_controller;
2699 
2700  if (ihost->sm.current_state_id != SCIC_READY) {
2701  dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2702  __func__, ihost->sm.current_state_id);
2704  }
2705 
2706  set_bit(IREQ_ACTIVE, &ireq->flags);
2708  return SCI_SUCCESS;
2709 }
2710 
2721  struct isci_remote_device *idev,
2722  struct isci_request *ireq)
2723 {
2724  enum sci_status status;
2725 
2726  if (ihost->sm.current_state_id != SCIC_READY) {
2727  dev_warn(&ihost->pdev->dev,
2728  "%s: SCIC Controller starting task from invalid "
2729  "state\n",
2730  __func__);
2732  }
2733 
2734  status = sci_remote_device_start_task(ihost, idev, ireq);
2735  switch (status) {
2737  set_bit(IREQ_ACTIVE, &ireq->flags);
2738 
2739  /*
2740  * We will let framework know this task request started successfully,
2741  * although core is still woring on starting the request (to post tc when
2742  * RNC is resumed.)
2743  */
2744  return SCI_SUCCESS;
2745  case SCI_SUCCESS:
2746  set_bit(IREQ_ACTIVE, &ireq->flags);
2748  break;
2749  default:
2750  break;
2751  }
2752 
2753  return status;
2754 }
2755 
2756 static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data)
2757 {
2758  int d;
2759 
2760  /* no support for TX_GP_CFG */
2761  if (reg_index == 0)
2762  return -EINVAL;
2763 
2764  for (d = 0; d < isci_gpio_count(ihost); d++) {
2765  u32 val = 0x444; /* all ODx.n clear */
2766  int i;
2767 
2768  for (i = 0; i < 3; i++) {
2769  int bit = (i << 2) + 2;
2770 
2771  bit = try_test_sas_gpio_gp_bit(to_sas_gpio_od(d, i),
2772  write_data, reg_index,
2773  reg_count);
2774  if (bit < 0)
2775  break;
2776 
2777  /* if od is set, clear the 'invert' bit */
2778  val &= ~(bit << ((i << 2) + 2));
2779  }
2780 
2781  if (i < 3)
2782  break;
2783  writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]);
2784  }
2785 
2786  /* unless reg_index is > 1, we should always be able to write at
2787  * least one register
2788  */
2789  return d > 0;
2790 }
2791 
2792 int isci_gpio_write(struct sas_ha_struct *sas_ha, u8 reg_type, u8 reg_index,
2793  u8 reg_count, u8 *write_data)
2794 {
2795  struct isci_host *ihost = sas_ha->lldd_ha;
2796  int written;
2797 
2798  switch (reg_type) {
2799  case SAS_GPIO_REG_TX_GP:
2800  written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data);
2801  break;
2802  default:
2803  written = -EINVAL;
2804  }
2805 
2806  return written;
2807 }