Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ufshcd.c
Go to the documentation of this file.
1 /*
2  * Universal Flash Storage Host controller driver
3  *
4  * This code is based on drivers/scsi/ufs/ufshcd.c
5  * Copyright (C) 2011-2012 Samsung India Software Operations
6  *
7  * Santosh Yaraganavi <[email protected]>
8  * Vinayak Holikatti <[email protected]>
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version 2
13  * of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18  * GNU General Public License for more details.
19  *
20  * NO WARRANTY
21  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25  * solely responsible for determining the appropriateness of using and
26  * distributing the Program and assumes all risks associated with its
27  * exercise of rights under this Agreement, including but not limited to
28  * the risks and costs of program errors, damage to or loss of data,
29  * programs or equipment, and unavailability or interruption of operations.
30 
31  * DISCLAIMER OF LIABILITY
32  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 
40  * You should have received a copy of the GNU General Public License
41  * along with this program; if not, write to the Free Software
42  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
43  * USA.
44  */
45 
46 #include <linux/module.h>
47 #include <linux/kernel.h>
48 #include <linux/init.h>
49 #include <linux/pci.h>
50 #include <linux/interrupt.h>
51 #include <linux/io.h>
52 #include <linux/delay.h>
53 #include <linux/slab.h>
54 #include <linux/spinlock.h>
55 #include <linux/workqueue.h>
56 #include <linux/errno.h>
57 #include <linux/types.h>
58 #include <linux/wait.h>
59 #include <linux/bitops.h>
60 
61 #include <asm/irq.h>
62 #include <asm/byteorder.h>
63 #include <scsi/scsi.h>
64 #include <scsi/scsi_cmnd.h>
65 #include <scsi/scsi_host.h>
66 #include <scsi/scsi_tcq.h>
67 #include <scsi/scsi_dbg.h>
68 #include <scsi/scsi_eh.h>
69 
70 #include "ufs.h"
71 #include "ufshci.h"
72 
73 #define UFSHCD "ufshcd"
74 #define UFSHCD_DRIVER_VERSION "0.1"
75 
76 enum {
82 };
83 
84 /* UFSHCD states */
85 enum {
89 };
90 
91 /* Interrupt configuration options */
92 enum {
96 };
97 
98 /* Interrupt aggregation options */
99 enum {
102 };
103 
113 struct uic_command {
119  int result;
120 };
121 
148 struct ufs_hba {
150 
151  /* Virtual memory reference */
155 
156  /* DMA memory reference */
160 
161  struct Scsi_Host *host;
162  struct pci_dev *pdev;
163 
164  struct ufshcd_lrb *lrb;
165 
166  unsigned long outstanding_tasks;
167  unsigned long outstanding_reqs;
168 
170  int nutrs;
171  int nutmrs;
173 
176  unsigned long tm_condition;
177 
180 
181  /* Work Queues */
184 
185  /* HBA Errors */
187 };
188 
203 struct ufshcd_lrb {
208 
209  struct scsi_cmnd *cmd;
211  unsigned int sense_bufflen;
213 
215  int task_tag;
216  unsigned int lun;
217 };
218 
225 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
226 {
227  return readl(hba->mmio_base + REG_UFS_VERSION);
228 }
229 
237 static inline int ufshcd_is_device_present(u32 reg_hcs)
238 {
239  return (DEVICE_PRESENT & reg_hcs) ? 1 : 0;
240 }
241 
249 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
250 {
251  return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS;
252 }
253 
261 static inline int
262 ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
263 {
264  return task_req_descp->header.dword_2 & MASK_OCS;
265 }
266 
274 static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba)
275 {
276  return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs);
277 }
278 
284 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
285 {
286  writel(~(1 << pos),
288 }
289 
296 static inline int ufshcd_get_lists_status(u32 reg)
297 {
298  /*
299  * The mask 0xFF is for the following HCS register bits
300  * Bit Description
301  * 0 Device Present
302  * 1 UTRLRDY
303  * 2 UTMRLRDY
304  * 3 UCRDY
305  * 4 HEI
306  * 5 DEI
307  * 6-7 reserved
308  */
309  return (((reg) & (0xFF)) >> 1) ^ (0x07);
310 }
311 
319 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
320 {
321  return readl(hba->mmio_base + REG_UIC_COMMAND_ARG_2) &
323 }
324 
330 static inline void ufshcd_free_hba_memory(struct ufs_hba *hba)
331 {
332  size_t utmrdl_size, utrdl_size, ucdl_size;
333 
334  kfree(hba->lrb);
335 
336  if (hba->utmrdl_base_addr) {
337  utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
338  dma_free_coherent(&hba->pdev->dev, utmrdl_size,
339  hba->utmrdl_base_addr, hba->utmrdl_dma_addr);
340  }
341 
342  if (hba->utrdl_base_addr) {
343  utrdl_size =
344  (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
345  dma_free_coherent(&hba->pdev->dev, utrdl_size,
346  hba->utrdl_base_addr, hba->utrdl_dma_addr);
347  }
348 
349  if (hba->ucdl_base_addr) {
350  ucdl_size =
351  (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
352  dma_free_coherent(&hba->pdev->dev, ucdl_size,
353  hba->ucdl_base_addr, hba->ucdl_dma_addr);
354  }
355 }
356 
365 static inline int
366 ufshcd_is_valid_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
367 {
368  return ((be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24) ==
370 }
371 
379 static inline int
380 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
381 {
382  return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
383 }
384 
394 static inline void
395 ufshcd_config_int_aggr(struct ufs_hba *hba, int option)
396 {
397  switch (option) {
398  case INT_AGGR_RESET:
401  (hba->mmio_base +
403  break;
404  case INT_AGGR_CONFIG:
409  (hba->mmio_base +
411  break;
412  }
413 }
414 
421 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
422 {
424  (hba->mmio_base +
427  (hba->mmio_base +
429 }
430 
435 static inline void ufshcd_hba_stop(struct ufs_hba *hba)
436 {
438 }
439 
444 static inline void ufshcd_hba_start(struct ufs_hba *hba)
445 {
447 }
448 
455 static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
456 {
457  return (readl(hba->mmio_base + REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
458 }
459 
465 static inline
466 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
467 {
468  __set_bit(task_tag, &hba->outstanding_reqs);
469  writel((1 << task_tag),
471 }
472 
477 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
478 {
479  int len;
480  if (lrbp->sense_buffer) {
481  len = be16_to_cpu(lrbp->ucd_rsp_ptr->sense_data_len);
482  memcpy(lrbp->sense_buffer,
483  lrbp->ucd_rsp_ptr->sense_data,
484  min_t(int, len, SCSI_SENSE_BUFFERSIZE));
485  }
486 }
487 
492 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
493 {
494  hba->capabilities =
496 
497  /* nutrs and nutmrs are 0 based values */
498  hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
499  hba->nutmrs =
501 }
502 
508 static inline void
509 ufshcd_send_uic_command(struct ufs_hba *hba, struct uic_command *uic_cmnd)
510 {
511  /* Write Args */
512  writel(uic_cmnd->argument1,
514  writel(uic_cmnd->argument2,
516  writel(uic_cmnd->argument3,
518 
519  /* Write UIC Cmd */
520  writel((uic_cmnd->command & COMMAND_OPCODE_MASK),
521  (hba->mmio_base + REG_UIC_COMMAND));
522 }
523 
530 static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
531 {
532  struct ufshcd_sg_entry *prd_table;
533  struct scatterlist *sg;
534  struct scsi_cmnd *cmd;
535  int sg_segments;
536  int i;
537 
538  cmd = lrbp->cmd;
539  sg_segments = scsi_dma_map(cmd);
540  if (sg_segments < 0)
541  return sg_segments;
542 
543  if (sg_segments) {
544  lrbp->utr_descriptor_ptr->prd_table_length =
545  cpu_to_le16((u16) (sg_segments));
546 
547  prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
548 
549  scsi_for_each_sg(cmd, sg, sg_segments, i) {
550  prd_table[i].size =
551  cpu_to_le32(((u32) sg_dma_len(sg))-1);
552  prd_table[i].base_addr =
554  prd_table[i].upper_addr =
556  }
557  } else {
558  lrbp->utr_descriptor_ptr->prd_table_length = 0;
559  }
560 
561  return 0;
562 }
563 
569 static void ufshcd_int_config(struct ufs_hba *hba, u32 option)
570 {
571  switch (option) {
572  case UFSHCD_INT_ENABLE:
573  writel(hba->int_enable_mask,
575  break;
576  case UFSHCD_INT_DISABLE:
577  if (hba->ufs_version == UFSHCI_VERSION_10)
580  else
583  break;
584  }
585 }
586 
591 static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
592 {
593  struct utp_transfer_req_desc *req_desc;
594  struct utp_upiu_cmd *ucd_cmd_ptr;
596  u32 upiu_flags;
597 
598  ucd_cmd_ptr = lrbp->ucd_cmd_ptr;
599  req_desc = lrbp->utr_descriptor_ptr;
600 
601  switch (lrbp->command_type) {
602  case UTP_CMD_TYPE_SCSI:
603  if (lrbp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
604  data_direction = UTP_DEVICE_TO_HOST;
605  upiu_flags = UPIU_CMD_FLAGS_READ;
606  } else if (lrbp->cmd->sc_data_direction == DMA_TO_DEVICE) {
607  data_direction = UTP_HOST_TO_DEVICE;
608  upiu_flags = UPIU_CMD_FLAGS_WRITE;
609  } else {
610  data_direction = UTP_NO_DATA_TRANSFER;
611  upiu_flags = UPIU_CMD_FLAGS_NONE;
612  }
613 
614  /* Transfer request descriptor header fields */
615  req_desc->header.dword_0 =
616  cpu_to_le32(data_direction | UTP_SCSI_COMMAND);
617 
618  /*
619  * assigning invalid value for command status. Controller
620  * updates OCS on command completion, with the command
621  * status
622  */
623  req_desc->header.dword_2 =
625 
626  /* command descriptor fields */
627  ucd_cmd_ptr->header.dword_0 =
629  upiu_flags,
630  lrbp->lun,
631  lrbp->task_tag));
632  ucd_cmd_ptr->header.dword_1 =
633  cpu_to_be32(
635  0,
636  0,
637  0));
638 
639  /* Total EHS length and Data segment length will be zero */
640  ucd_cmd_ptr->header.dword_2 = 0;
641 
642  ucd_cmd_ptr->exp_data_transfer_len =
643  cpu_to_be32(lrbp->cmd->transfersize);
644 
645  memcpy(ucd_cmd_ptr->cdb,
646  lrbp->cmd->cmnd,
647  (min_t(unsigned short,
648  lrbp->cmd->cmd_len,
649  MAX_CDB_SIZE)));
650  break;
652  /* For query function implementation */
653  break;
654  case UTP_CMD_TYPE_UFS:
655  /* For UFS native command implementation */
656  break;
657  } /* end of switch */
658 }
659 
667 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
668 {
669  struct ufshcd_lrb *lrbp;
670  struct ufs_hba *hba;
671  unsigned long flags;
672  int tag;
673  int err = 0;
674 
675  hba = shost_priv(host);
676 
677  tag = cmd->request->tag;
678 
681  goto out;
682  }
683 
684  lrbp = &hba->lrb[tag];
685 
686  lrbp->cmd = cmd;
688  lrbp->sense_buffer = cmd->sense_buffer;
689  lrbp->task_tag = tag;
690  lrbp->lun = cmd->device->lun;
691 
693 
694  /* form UPIU before issuing the command */
695  ufshcd_compose_upiu(lrbp);
696  err = ufshcd_map_sg(lrbp);
697  if (err)
698  goto out;
699 
700  /* issue command to the controller */
701  spin_lock_irqsave(hba->host->host_lock, flags);
702  ufshcd_send_command(hba, tag);
703  spin_unlock_irqrestore(hba->host->host_lock, flags);
704 out:
705  return err;
706 }
707 
721 static int ufshcd_memory_alloc(struct ufs_hba *hba)
722 {
723  size_t utmrdl_size, utrdl_size, ucdl_size;
724 
725  /* Allocate memory for UTP command descriptors */
726  ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
727  hba->ucdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
728  ucdl_size,
729  &hba->ucdl_dma_addr,
730  GFP_KERNEL);
731 
732  /*
733  * UFSHCI requires UTP command descriptor to be 128 byte aligned.
734  * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
735  * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
736  * be aligned to 128 bytes as well
737  */
738  if (!hba->ucdl_base_addr ||
739  WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
740  dev_err(&hba->pdev->dev,
741  "Command Descriptor Memory allocation failed\n");
742  goto out;
743  }
744 
745  /*
746  * Allocate memory for UTP Transfer descriptors
747  * UFSHCI requires 1024 byte alignment of UTRD
748  */
749  utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
750  hba->utrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
751  utrdl_size,
752  &hba->utrdl_dma_addr,
753  GFP_KERNEL);
754  if (!hba->utrdl_base_addr ||
755  WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
756  dev_err(&hba->pdev->dev,
757  "Transfer Descriptor Memory allocation failed\n");
758  goto out;
759  }
760 
761  /*
762  * Allocate memory for UTP Task Management descriptors
763  * UFSHCI requires 1024 byte alignment of UTMRD
764  */
765  utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
766  hba->utmrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
767  utmrdl_size,
768  &hba->utmrdl_dma_addr,
769  GFP_KERNEL);
770  if (!hba->utmrdl_base_addr ||
771  WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
772  dev_err(&hba->pdev->dev,
773  "Task Management Descriptor Memory allocation failed\n");
774  goto out;
775  }
776 
777  /* Allocate memory for local reference block */
778  hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL);
779  if (!hba->lrb) {
780  dev_err(&hba->pdev->dev, "LRB Memory allocation failed\n");
781  goto out;
782  }
783  return 0;
784 out:
785  ufshcd_free_hba_memory(hba);
786  return -ENOMEM;
787 }
788 
802 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
803 {
804  struct utp_transfer_cmd_desc *cmd_descp;
805  struct utp_transfer_req_desc *utrdlp;
806  dma_addr_t cmd_desc_dma_addr;
807  dma_addr_t cmd_desc_element_addr;
808  u16 response_offset;
809  u16 prdt_offset;
810  int cmd_desc_size;
811  int i;
812 
813  utrdlp = hba->utrdl_base_addr;
814  cmd_descp = hba->ucdl_base_addr;
815 
816  response_offset =
817  offsetof(struct utp_transfer_cmd_desc, response_upiu);
818  prdt_offset =
819  offsetof(struct utp_transfer_cmd_desc, prd_table);
820 
821  cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
822  cmd_desc_dma_addr = hba->ucdl_dma_addr;
823 
824  for (i = 0; i < hba->nutrs; i++) {
825  /* Configure UTRD with command descriptor base address */
826  cmd_desc_element_addr =
827  (cmd_desc_dma_addr + (cmd_desc_size * i));
828  utrdlp[i].command_desc_base_addr_lo =
829  cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
830  utrdlp[i].command_desc_base_addr_hi =
831  cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
832 
833  /* Response upiu and prdt offset should be in double words */
834  utrdlp[i].response_upiu_offset =
835  cpu_to_le16((response_offset >> 2));
836  utrdlp[i].prd_table_offset =
837  cpu_to_le16((prdt_offset >> 2));
838  utrdlp[i].response_upiu_length =
840 
841  hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
842  hba->lrb[i].ucd_cmd_ptr =
843  (struct utp_upiu_cmd *)(cmd_descp + i);
844  hba->lrb[i].ucd_rsp_ptr =
845  (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
846  hba->lrb[i].ucd_prdt_ptr =
847  (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
848  }
849 }
850 
862 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
863 {
864  struct uic_command *uic_cmd;
865  unsigned long flags;
866 
867  /* check if controller is ready to accept UIC commands */
868  if (((readl(hba->mmio_base + REG_CONTROLLER_STATUS)) &
869  UIC_COMMAND_READY) == 0x0) {
870  dev_err(&hba->pdev->dev,
871  "Controller not ready"
872  " to accept UIC commands\n");
873  return -EIO;
874  }
875 
876  spin_lock_irqsave(hba->host->host_lock, flags);
877 
878  /* form UIC command */
879  uic_cmd = &hba->active_uic_cmd;
881  uic_cmd->argument1 = 0;
882  uic_cmd->argument2 = 0;
883  uic_cmd->argument3 = 0;
884 
885  /* enable UIC related interrupts */
887  ufshcd_int_config(hba, UFSHCD_INT_ENABLE);
888 
889  /* sending UIC commands to controller */
890  ufshcd_send_uic_command(hba, uic_cmd);
891  spin_unlock_irqrestore(hba->host->host_lock, flags);
892  return 0;
893 }
894 
907 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
908 {
909  int err = 0;
910  u32 reg;
911 
912  /* check if device present */
913  reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS));
914  if (!ufshcd_is_device_present(reg)) {
915  dev_err(&hba->pdev->dev, "cc: Device not present\n");
916  err = -ENXIO;
917  goto out;
918  }
919 
920  /*
921  * UCRDY, UTMRLDY and UTRLRDY bits must be 1
922  * DEI, HEI bits must be 0
923  */
924  if (!(ufshcd_get_lists_status(reg))) {
925  ufshcd_enable_run_stop_reg(hba);
926  } else {
927  dev_err(&hba->pdev->dev,
928  "Host controller not ready to process requests");
929  err = -EIO;
930  goto out;
931  }
932 
933  /* Enable required interrupts */
935  UIC_ERROR |
940  ufshcd_int_config(hba, UFSHCD_INT_ENABLE);
941 
942  /* Configure interrupt aggregation */
943  ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG);
944 
945  if (hba->ufshcd_state == UFSHCD_STATE_RESET)
947 
949  scsi_scan_host(hba->host);
950 out:
951  return err;
952 }
953 
964 static int ufshcd_hba_enable(struct ufs_hba *hba)
965 {
966  int retry;
967 
968  /*
969  * msleep of 1 and 5 used in this function might result in msleep(20),
970  * but it was necessary to send the UFS FPGA to reset mode during
971  * development and testing of this driver. msleep can be changed to
972  * mdelay and retry count can be reduced based on the controller.
973  */
974  if (!ufshcd_is_hba_active(hba)) {
975 
976  /* change controller state to "reset state" */
977  ufshcd_hba_stop(hba);
978 
979  /*
980  * This delay is based on the testing done with UFS host
981  * controller FPGA. The delay can be changed based on the
982  * host controller used.
983  */
984  msleep(5);
985  }
986 
987  /* start controller initialization sequence */
988  ufshcd_hba_start(hba);
989 
990  /*
991  * To initialize a UFS host controller HCE bit must be set to 1.
992  * During initialization the HCE bit value changes from 1->0->1.
993  * When the host controller completes initialization sequence
994  * it sets the value of HCE bit to 1. The same HCE bit is read back
995  * to check if the controller has completed initialization sequence.
996  * So without this delay the value HCE = 1, set in the previous
997  * instruction might be read back.
998  * This delay can be changed based on the controller.
999  */
1000  msleep(1);
1001 
1002  /* wait for the host controller to complete initialization */
1003  retry = 10;
1004  while (ufshcd_is_hba_active(hba)) {
1005  if (retry) {
1006  retry--;
1007  } else {
1008  dev_err(&hba->pdev->dev,
1009  "Controller enable failed\n");
1010  return -EIO;
1011  }
1012  msleep(5);
1013  }
1014  return 0;
1015 }
1016 
1029 static int ufshcd_initialize_hba(struct ufs_hba *hba)
1030 {
1031  if (ufshcd_hba_enable(hba))
1032  return -EIO;
1033 
1034  /* Configure UTRL and UTMRL base address registers */
1043 
1044  /* Initialize unipro link startup procedure */
1045  return ufshcd_dme_link_startup(hba);
1046 }
1047 
1054 static int ufshcd_do_reset(struct ufs_hba *hba)
1055 {
1056  struct ufshcd_lrb *lrbp;
1057  unsigned long flags;
1058  int tag;
1059 
1060  /* block commands from midlayer */
1061  scsi_block_requests(hba->host);
1062 
1063  spin_lock_irqsave(hba->host->host_lock, flags);
1065 
1066  /* send controller to reset state */
1067  ufshcd_hba_stop(hba);
1068  spin_unlock_irqrestore(hba->host->host_lock, flags);
1069 
1070  /* abort outstanding commands */
1071  for (tag = 0; tag < hba->nutrs; tag++) {
1072  if (test_bit(tag, &hba->outstanding_reqs)) {
1073  lrbp = &hba->lrb[tag];
1074  scsi_dma_unmap(lrbp->cmd);
1075  lrbp->cmd->result = DID_RESET << 16;
1076  lrbp->cmd->scsi_done(lrbp->cmd);
1077  lrbp->cmd = NULL;
1078  }
1079  }
1080 
1081  /* clear outstanding request/task bit maps */
1082  hba->outstanding_reqs = 0;
1083  hba->outstanding_tasks = 0;
1084 
1085  /* start the initialization process */
1086  if (ufshcd_initialize_hba(hba)) {
1087  dev_err(&hba->pdev->dev,
1088  "Reset: Controller initialization failed\n");
1089  return FAILED;
1090  }
1091  return SUCCESS;
1092 }
1093 
1100 static int ufshcd_slave_alloc(struct scsi_device *sdev)
1101 {
1102  struct ufs_hba *hba;
1103 
1104  hba = shost_priv(sdev->host);
1105  sdev->tagged_supported = 1;
1106 
1107  /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
1108  sdev->use_10_for_ms = 1;
1109  scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
1110 
1111  /*
1112  * Inform SCSI Midlayer that the LUN queue depth is same as the
1113  * controller queue depth. If a LUN queue depth is less than the
1114  * controller queue depth and if the LUN reports
1115  * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted
1116  * with scsi_adjust_queue_depth.
1117  */
1118  scsi_activate_tcq(sdev, hba->nutrs);
1119  return 0;
1120 }
1121 
1126 static void ufshcd_slave_destroy(struct scsi_device *sdev)
1127 {
1128  struct ufs_hba *hba;
1129 
1130  hba = shost_priv(sdev->host);
1131  scsi_deactivate_tcq(sdev, hba->nutrs);
1132 }
1133 
1141 static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
1142 {
1143  struct utp_task_req_desc *task_req_descp;
1144  struct utp_upiu_task_rsp *task_rsp_upiup;
1145  unsigned long flags;
1146  int ocs_value;
1147  int task_result;
1148 
1149  spin_lock_irqsave(hba->host->host_lock, flags);
1150 
1151  /* Clear completed tasks from outstanding_tasks */
1152  __clear_bit(index, &hba->outstanding_tasks);
1153 
1154  task_req_descp = hba->utmrdl_base_addr;
1155  ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
1156 
1157  if (ocs_value == OCS_SUCCESS) {
1158  task_rsp_upiup = (struct utp_upiu_task_rsp *)
1159  task_req_descp[index].task_rsp_upiu;
1160  task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
1161  task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
1162 
1163  if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL &&
1164  task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
1165  task_result = FAILED;
1166  else
1167  task_result = SUCCESS;
1168  } else {
1169  task_result = FAILED;
1170  dev_err(&hba->pdev->dev,
1171  "trc: Invalid ocs = %x\n", ocs_value);
1172  }
1173  spin_unlock_irqrestore(hba->host->host_lock, flags);
1174  return task_result;
1175 }
1176 
1182 static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd)
1183 {
1184  struct ufs_hba *hba;
1185  int i;
1186  int lun_qdepth = 0;
1187 
1188  hba = shost_priv(cmd->device->host);
1189 
1190  /*
1191  * LUN queue depth can be obtained by counting outstanding commands
1192  * on the LUN.
1193  */
1194  for (i = 0; i < hba->nutrs; i++) {
1195  if (test_bit(i, &hba->outstanding_reqs)) {
1196 
1197  /*
1198  * Check if the outstanding command belongs
1199  * to the LUN which reported SAM_STAT_TASK_SET_FULL.
1200  */
1201  if (cmd->device->lun == hba->lrb[i].lun)
1202  lun_qdepth++;
1203  }
1204  }
1205 
1206  /*
1207  * LUN queue depth will be total outstanding commands, except the
1208  * command for which the LUN reported SAM_STAT_TASK_SET_FULL.
1209  */
1210  scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1);
1211 }
1212 
1220 static inline int
1221 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
1222 {
1223  int result = 0;
1224 
1225  switch (scsi_status) {
1226  case SAM_STAT_GOOD:
1227  result |= DID_OK << 16 |
1228  COMMAND_COMPLETE << 8 |
1229  SAM_STAT_GOOD;
1230  break;
1232  result |= DID_OK << 16 |
1233  COMMAND_COMPLETE << 8 |
1235  ufshcd_copy_sense_data(lrbp);
1236  break;
1237  case SAM_STAT_BUSY:
1238  result |= SAM_STAT_BUSY;
1239  break;
1241 
1242  /*
1243  * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
1244  * depth needs to be adjusted to the exact number of
1245  * outstanding commands the LUN can handle at any given time.
1246  */
1247  ufshcd_adjust_lun_qdepth(lrbp->cmd);
1248  result |= SAM_STAT_TASK_SET_FULL;
1249  break;
1250  case SAM_STAT_TASK_ABORTED:
1251  result |= SAM_STAT_TASK_ABORTED;
1252  break;
1253  default:
1254  result |= DID_ERROR << 16;
1255  break;
1256  } /* end of switch */
1257 
1258  return result;
1259 }
1260 
1268 static inline int
1269 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1270 {
1271  int result = 0;
1272  int scsi_status;
1273  int ocs;
1274 
1275  /* overall command status of utrd */
1276  ocs = ufshcd_get_tr_ocs(lrbp);
1277 
1278  switch (ocs) {
1279  case OCS_SUCCESS:
1280 
1281  /* check if the returned transfer response is valid */
1282  result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr);
1283  if (result) {
1284  dev_err(&hba->pdev->dev,
1285  "Invalid response = %x\n", result);
1286  break;
1287  }
1288 
1289  /*
1290  * get the response UPIU result to extract
1291  * the SCSI command status
1292  */
1293  result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
1294 
1295  /*
1296  * get the result based on SCSI status response
1297  * to notify the SCSI midlayer of the command status
1298  */
1299  scsi_status = result & MASK_SCSI_STATUS;
1300  result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
1301  break;
1302  case OCS_ABORTED:
1303  result |= DID_ABORT << 16;
1304  break;
1306  case OCS_INVALID_PRDT_ATTR:
1309  case OCS_PEER_COMM_FAILURE:
1310  case OCS_FATAL_ERROR:
1311  default:
1312  result |= DID_ERROR << 16;
1313  dev_err(&hba->pdev->dev,
1314  "OCS error from controller = %x\n", ocs);
1315  break;
1316  } /* end of switch */
1317 
1318  return result;
1319 }
1320 
1325 static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
1326 {
1327  struct ufshcd_lrb *lrb;
1328  unsigned long completed_reqs;
1329  u32 tr_doorbell;
1330  int result;
1331  int index;
1332 
1333  lrb = hba->lrb;
1334  tr_doorbell =
1336  completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
1337 
1338  for (index = 0; index < hba->nutrs; index++) {
1339  if (test_bit(index, &completed_reqs)) {
1340 
1341  result = ufshcd_transfer_rsp_status(hba, &lrb[index]);
1342 
1343  if (lrb[index].cmd) {
1344  scsi_dma_unmap(lrb[index].cmd);
1345  lrb[index].cmd->result = result;
1346  lrb[index].cmd->scsi_done(lrb[index].cmd);
1347 
1348  /* Mark completed command as NULL in LRB */
1349  lrb[index].cmd = NULL;
1350  }
1351  } /* end of if */
1352  } /* end of for */
1353 
1354  /* clear corresponding bits of completed commands */
1355  hba->outstanding_reqs ^= completed_reqs;
1356 
1357  /* Reset interrupt aggregation counters */
1358  ufshcd_config_int_aggr(hba, INT_AGGR_RESET);
1359 }
1360 
1367 static void ufshcd_uic_cc_handler (struct work_struct *work)
1368 {
1369  struct ufs_hba *hba;
1370 
1371  hba = container_of(work, struct ufs_hba, uic_workq);
1372 
1373  if ((hba->active_uic_cmd.command == UIC_CMD_DME_LINK_STARTUP) &&
1374  !(ufshcd_get_uic_cmd_result(hba))) {
1375 
1376  if (ufshcd_make_hba_operational(hba))
1377  dev_err(&hba->pdev->dev,
1378  "cc: hba not operational state\n");
1379  return;
1380  }
1381 }
1382 
1387 static void ufshcd_fatal_err_handler(struct work_struct *work)
1388 {
1389  struct ufs_hba *hba;
1390  hba = container_of(work, struct ufs_hba, feh_workq);
1391 
1392  /* check if reset is already in progress */
1393  if (hba->ufshcd_state != UFSHCD_STATE_RESET)
1394  ufshcd_do_reset(hba);
1395 }
1396 
1401 static void ufshcd_err_handler(struct ufs_hba *hba)
1402 {
1403  u32 reg;
1404 
1405  if (hba->errors & INT_FATAL_ERRORS)
1406  goto fatal_eh;
1407 
1408  if (hba->errors & UIC_ERROR) {
1409 
1410  reg = readl(hba->mmio_base +
1413  goto fatal_eh;
1414  }
1415  return;
1416 fatal_eh:
1418  schedule_work(&hba->feh_workq);
1419 }
1420 
1425 static void ufshcd_tmc_handler(struct ufs_hba *hba)
1426 {
1427  u32 tm_doorbell;
1428 
1429  tm_doorbell = readl(hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL);
1430  hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
1432 }
1433 
1439 static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
1440 {
1441  hba->errors = UFSHCD_ERROR_MASK & intr_status;
1442  if (hba->errors)
1443  ufshcd_err_handler(hba);
1444 
1445  if (intr_status & UIC_COMMAND_COMPL)
1446  schedule_work(&hba->uic_workq);
1447 
1448  if (intr_status & UTP_TASK_REQ_COMPL)
1449  ufshcd_tmc_handler(hba);
1450 
1451  if (intr_status & UTP_TRANSFER_REQ_COMPL)
1452  ufshcd_transfer_req_compl(hba);
1453 }
1454 
1463 static irqreturn_t ufshcd_intr(int irq, void *__hba)
1464 {
1465  u32 intr_status;
1467  struct ufs_hba *hba = __hba;
1468 
1469  spin_lock(hba->host->host_lock);
1470  intr_status = readl(hba->mmio_base + REG_INTERRUPT_STATUS);
1471 
1472  if (intr_status) {
1473  ufshcd_sl_intr(hba, intr_status);
1474 
1475  /* If UFSHCI 1.0 then clear interrupt status register */
1476  if (hba->ufs_version == UFSHCI_VERSION_10)
1477  writel(intr_status,
1478  (hba->mmio_base + REG_INTERRUPT_STATUS));
1479  retval = IRQ_HANDLED;
1480  }
1481  spin_unlock(hba->host->host_lock);
1482  return retval;
1483 }
1484 
1492 static int
1493 ufshcd_issue_tm_cmd(struct ufs_hba *hba,
1494  struct ufshcd_lrb *lrbp,
1495  u8 tm_function)
1496 {
1497  struct utp_task_req_desc *task_req_descp;
1498  struct utp_upiu_task_req *task_req_upiup;
1499  struct Scsi_Host *host;
1500  unsigned long flags;
1501  int free_slot = 0;
1502  int err;
1503 
1504  host = hba->host;
1505 
1506  spin_lock_irqsave(host->host_lock, flags);
1507 
1508  /* If task management queue is full */
1509  free_slot = ufshcd_get_tm_free_slot(hba);
1510  if (free_slot >= hba->nutmrs) {
1511  spin_unlock_irqrestore(host->host_lock, flags);
1512  dev_err(&hba->pdev->dev, "Task management queue full\n");
1513  err = FAILED;
1514  goto out;
1515  }
1516 
1517  task_req_descp = hba->utmrdl_base_addr;
1518  task_req_descp += free_slot;
1519 
1520  /* Configure task request descriptor */
1521  task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
1522  task_req_descp->header.dword_2 =
1524 
1525  /* Configure task request UPIU */
1526  task_req_upiup =
1527  (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
1528  task_req_upiup->header.dword_0 =
1530  lrbp->lun, lrbp->task_tag));
1531  task_req_upiup->header.dword_1 =
1532  cpu_to_be32(UPIU_HEADER_DWORD(0, tm_function, 0, 0));
1533 
1534  task_req_upiup->input_param1 = lrbp->lun;
1535  task_req_upiup->input_param1 =
1536  cpu_to_be32(task_req_upiup->input_param1);
1537  task_req_upiup->input_param2 = lrbp->task_tag;
1538  task_req_upiup->input_param2 =
1539  cpu_to_be32(task_req_upiup->input_param2);
1540 
1541  /* send command to the controller */
1542  __set_bit(free_slot, &hba->outstanding_tasks);
1543  writel((1 << free_slot),
1545 
1546  spin_unlock_irqrestore(host->host_lock, flags);
1547 
1548  /* wait until the task management command is completed */
1549  err =
1551  (test_bit(free_slot,
1552  &hba->tm_condition) != 0),
1553  60 * HZ);
1554  if (!err) {
1555  dev_err(&hba->pdev->dev,
1556  "Task management command timed-out\n");
1557  err = FAILED;
1558  goto out;
1559  }
1560  clear_bit(free_slot, &hba->tm_condition);
1561  err = ufshcd_task_req_compl(hba, free_slot);
1562 out:
1563  return err;
1564 }
1565 
1572 static int ufshcd_device_reset(struct scsi_cmnd *cmd)
1573 {
1574  struct Scsi_Host *host;
1575  struct ufs_hba *hba;
1576  unsigned int tag;
1577  u32 pos;
1578  int err;
1579 
1580  host = cmd->device->host;
1581  hba = shost_priv(host);
1582  tag = cmd->request->tag;
1583 
1584  err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET);
1585  if (err == FAILED)
1586  goto out;
1587 
1588  for (pos = 0; pos < hba->nutrs; pos++) {
1589  if (test_bit(pos, &hba->outstanding_reqs) &&
1590  (hba->lrb[tag].lun == hba->lrb[pos].lun)) {
1591 
1592  /* clear the respective UTRLCLR register bit */
1593  ufshcd_utrl_clear(hba, pos);
1594 
1595  clear_bit(pos, &hba->outstanding_reqs);
1596 
1597  if (hba->lrb[pos].cmd) {
1598  scsi_dma_unmap(hba->lrb[pos].cmd);
1599  hba->lrb[pos].cmd->result =
1600  DID_ABORT << 16;
1601  hba->lrb[pos].cmd->scsi_done(cmd);
1602  hba->lrb[pos].cmd = NULL;
1603  }
1604  }
1605  } /* end of for */
1606 out:
1607  return err;
1608 }
1609 
1616 static int ufshcd_host_reset(struct scsi_cmnd *cmd)
1617 {
1618  struct ufs_hba *hba;
1619 
1620  hba = shost_priv(cmd->device->host);
1621 
1622  if (hba->ufshcd_state == UFSHCD_STATE_RESET)
1623  return SUCCESS;
1624 
1625  return ufshcd_do_reset(hba);
1626 }
1627 
1634 static int ufshcd_abort(struct scsi_cmnd *cmd)
1635 {
1636  struct Scsi_Host *host;
1637  struct ufs_hba *hba;
1638  unsigned long flags;
1639  unsigned int tag;
1640  int err;
1641 
1642  host = cmd->device->host;
1643  hba = shost_priv(host);
1644  tag = cmd->request->tag;
1645 
1646  spin_lock_irqsave(host->host_lock, flags);
1647 
1648  /* check if command is still pending */
1649  if (!(test_bit(tag, &hba->outstanding_reqs))) {
1650  err = FAILED;
1651  spin_unlock_irqrestore(host->host_lock, flags);
1652  goto out;
1653  }
1654  spin_unlock_irqrestore(host->host_lock, flags);
1655 
1656  err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK);
1657  if (err == FAILED)
1658  goto out;
1659 
1660  scsi_dma_unmap(cmd);
1661 
1662  spin_lock_irqsave(host->host_lock, flags);
1663 
1664  /* clear the respective UTRLCLR register bit */
1665  ufshcd_utrl_clear(hba, tag);
1666 
1667  __clear_bit(tag, &hba->outstanding_reqs);
1668  hba->lrb[tag].cmd = NULL;
1669  spin_unlock_irqrestore(host->host_lock, flags);
1670 out:
1671  return err;
1672 }
1673 
1674 static struct scsi_host_template ufshcd_driver_template = {
1675  .module = THIS_MODULE,
1676  .name = UFSHCD,
1677  .proc_name = UFSHCD,
1678  .queuecommand = ufshcd_queuecommand,
1679  .slave_alloc = ufshcd_slave_alloc,
1680  .slave_destroy = ufshcd_slave_destroy,
1681  .eh_abort_handler = ufshcd_abort,
1682  .eh_device_reset_handler = ufshcd_device_reset,
1683  .eh_host_reset_handler = ufshcd_host_reset,
1684  .this_id = -1,
1685  .sg_tablesize = SG_ALL,
1686  .cmd_per_lun = UFSHCD_CMD_PER_LUN,
1687  .can_queue = UFSHCD_CAN_QUEUE,
1688 };
1689 
1694 static void ufshcd_shutdown(struct pci_dev *pdev)
1695 {
1696  ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
1697 }
1698 
1699 #ifdef CONFIG_PM
1700 
1707 static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state)
1708 {
1709  /*
1710  * TODO:
1711  * 1. Block SCSI requests from SCSI midlayer
1712  * 2. Change the internal driver state to non operational
1713  * 3. Set UTRLRSR and UTMRLRSR bits to zero
1714  * 4. Wait until outstanding commands are completed
1715  * 5. Set HCE to zero to send the UFS host controller to reset state
1716  */
1717 
1718  return -ENOSYS;
1719 }
1720 
1727 static int ufshcd_resume(struct pci_dev *pdev)
1728 {
1729  /*
1730  * TODO:
1731  * 1. Set HCE to 1, to start the UFS host controller
1732  * initialization process
1733  * 2. Set UTRLRSR and UTMRLRSR bits to 1
1734  * 3. Change the internal driver state to operational
1735  * 4. Unblock SCSI requests from SCSI midlayer
1736  */
1737 
1738  return -ENOSYS;
1739 }
1740 #endif /* CONFIG_PM */
1741 
1747 static void ufshcd_hba_free(struct ufs_hba *hba)
1748 {
1749  iounmap(hba->mmio_base);
1750  ufshcd_free_hba_memory(hba);
1751  pci_release_regions(hba->pdev);
1752 }
1753 
1759 static void ufshcd_remove(struct pci_dev *pdev)
1760 {
1761  struct ufs_hba *hba = pci_get_drvdata(pdev);
1762 
1763  /* disable interrupts */
1764  ufshcd_int_config(hba, UFSHCD_INT_DISABLE);
1765  free_irq(pdev->irq, hba);
1766 
1767  ufshcd_hba_stop(hba);
1768  ufshcd_hba_free(hba);
1769 
1770  scsi_remove_host(hba->host);
1771  scsi_host_put(hba->host);
1772  pci_set_drvdata(pdev, NULL);
1773  pci_clear_master(pdev);
1774  pci_disable_device(pdev);
1775 }
1776 
1784 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
1785 {
1786  int err;
1787  u64 dma_mask;
1788 
1789  /*
1790  * If controller supports 64 bit addressing mode, then set the DMA
1791  * mask to 64-bit, else set the DMA mask to 32-bit
1792  */
1794  dma_mask = DMA_BIT_MASK(64);
1795  else
1796  dma_mask = DMA_BIT_MASK(32);
1797 
1798  err = pci_set_dma_mask(hba->pdev, dma_mask);
1799  if (err)
1800  return err;
1801 
1802  err = pci_set_consistent_dma_mask(hba->pdev, dma_mask);
1803 
1804  return err;
1805 }
1806 
1814 static int __devinit
1815 ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1816 {
1817  struct Scsi_Host *host;
1818  struct ufs_hba *hba;
1819  int err;
1820 
1821  err = pci_enable_device(pdev);
1822  if (err) {
1823  dev_err(&pdev->dev, "pci_enable_device failed\n");
1824  goto out_error;
1825  }
1826 
1827  pci_set_master(pdev);
1828 
1829  host = scsi_host_alloc(&ufshcd_driver_template,
1830  sizeof(struct ufs_hba));
1831  if (!host) {
1832  dev_err(&pdev->dev, "scsi_host_alloc failed\n");
1833  err = -ENOMEM;
1834  goto out_disable;
1835  }
1836  hba = shost_priv(host);
1837 
1838  err = pci_request_regions(pdev, UFSHCD);
1839  if (err < 0) {
1840  dev_err(&pdev->dev, "request regions failed\n");
1841  goto out_host_put;
1842  }
1843 
1844  hba->mmio_base = pci_ioremap_bar(pdev, 0);
1845  if (!hba->mmio_base) {
1846  dev_err(&pdev->dev, "memory map failed\n");
1847  err = -ENOMEM;
1848  goto out_release_regions;
1849  }
1850 
1851  hba->host = host;
1852  hba->pdev = pdev;
1853 
1854  /* Read capabilities registers */
1855  ufshcd_hba_capabilities(hba);
1856 
1857  /* Get UFS version supported by the controller */
1858  hba->ufs_version = ufshcd_get_ufs_version(hba);
1859 
1860  err = ufshcd_set_dma_mask(hba);
1861  if (err) {
1862  dev_err(&pdev->dev, "set dma mask failed\n");
1863  goto out_iounmap;
1864  }
1865 
1866  /* Allocate memory for host memory space */
1867  err = ufshcd_memory_alloc(hba);
1868  if (err) {
1869  dev_err(&pdev->dev, "Memory allocation failed\n");
1870  goto out_iounmap;
1871  }
1872 
1873  /* Configure LRB */
1874  ufshcd_host_memory_configure(hba);
1875 
1876  host->can_queue = hba->nutrs;
1877  host->cmd_per_lun = hba->nutrs;
1878  host->max_id = UFSHCD_MAX_ID;
1879  host->max_lun = UFSHCD_MAX_LUNS;
1881  host->unique_id = host->host_no;
1882  host->max_cmd_len = MAX_CDB_SIZE;
1883 
1884  /* Initailize wait queue for task management */
1886 
1887  /* Initialize work queues */
1888  INIT_WORK(&hba->uic_workq, ufshcd_uic_cc_handler);
1889  INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
1890 
1891  /* IRQ registration */
1892  err = request_irq(pdev->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
1893  if (err) {
1894  dev_err(&pdev->dev, "request irq failed\n");
1895  goto out_lrb_free;
1896  }
1897 
1898  /* Enable SCSI tag mapping */
1899  err = scsi_init_shared_tag_map(host, host->can_queue);
1900  if (err) {
1901  dev_err(&pdev->dev, "init shared queue failed\n");
1902  goto out_free_irq;
1903  }
1904 
1905  pci_set_drvdata(pdev, hba);
1906 
1907  err = scsi_add_host(host, &pdev->dev);
1908  if (err) {
1909  dev_err(&pdev->dev, "scsi_add_host failed\n");
1910  goto out_free_irq;
1911  }
1912 
1913  /* Initialization routine */
1914  err = ufshcd_initialize_hba(hba);
1915  if (err) {
1916  dev_err(&pdev->dev, "Initialization failed\n");
1917  goto out_free_irq;
1918  }
1919 
1920  return 0;
1921 
1922 out_free_irq:
1923  free_irq(pdev->irq, hba);
1924 out_lrb_free:
1925  ufshcd_free_hba_memory(hba);
1926 out_iounmap:
1927  iounmap(hba->mmio_base);
1928 out_release_regions:
1929  pci_release_regions(pdev);
1930 out_host_put:
1931  scsi_host_put(host);
1932 out_disable:
1933  pci_clear_master(pdev);
1934  pci_disable_device(pdev);
1935 out_error:
1936  return err;
1937 }
1938 
1939 static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
1940  { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
1941  { } /* terminate list */
1942 };
1943 
1944 MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
1945 
1946 static struct pci_driver ufshcd_pci_driver = {
1947  .name = UFSHCD,
1948  .id_table = ufshcd_pci_tbl,
1949  .probe = ufshcd_probe,
1950  .remove = __devexit_p(ufshcd_remove),
1951  .shutdown = ufshcd_shutdown,
1952 #ifdef CONFIG_PM
1953  .suspend = ufshcd_suspend,
1954  .resume = ufshcd_resume,
1955 #endif
1956 };
1957 
1958 module_pci_driver(ufshcd_pci_driver);
1959 
1960 MODULE_AUTHOR("Santosh Yaragnavi <[email protected]>, "
1961  "Vinayak Holikatti <[email protected]>");
1962 MODULE_DESCRIPTION("Generic UFS host controller driver");
1963 MODULE_LICENSE("GPL");