Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mv_sas.c
Go to the documentation of this file.
1 /*
2  * Marvell 88SE64xx/88SE94xx main function
3  *
4  * Copyright 2007 Red Hat, Inc.
5  * Copyright 2008 Marvell. <[email protected]>
6  * Copyright 2009-2011 Marvell. <[email protected]>
7  *
8  * This file is licensed under GPLv2.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License as
12  * published by the Free Software Foundation; version 2 of the
13  * License.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23  * USA
24 */
25 
26 #include "mv_sas.h"
27 
28 static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
29 {
30  if (task->lldd_task) {
31  struct mvs_slot_info *slot;
32  slot = task->lldd_task;
33  *tag = slot->slot_tag;
34  return 1;
35  }
36  return 0;
37 }
38 
39 void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
40 {
41  void *bitmap = mvi->tags;
42  clear_bit(tag, bitmap);
43 }
44 
45 void mvs_tag_free(struct mvs_info *mvi, u32 tag)
46 {
47  mvs_tag_clear(mvi, tag);
48 }
49 
50 void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
51 {
52  void *bitmap = mvi->tags;
53  set_bit(tag, bitmap);
54 }
55 
56 inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
57 {
58  unsigned int index, tag;
59  void *bitmap = mvi->tags;
60 
61  index = find_first_zero_bit(bitmap, mvi->tags_num);
62  tag = index;
63  if (tag >= mvi->tags_num)
64  return -SAS_QUEUE_FULL;
65  mvs_tag_set(mvi, tag);
66  *tag_out = tag;
67  return 0;
68 }
69 
70 void mvs_tag_init(struct mvs_info *mvi)
71 {
72  int i;
73  for (i = 0; i < mvi->tags_num; ++i)
74  mvs_tag_clear(mvi, i);
75 }
76 
78 {
79  unsigned long i = 0, j = 0, hi = 0;
80  struct sas_ha_struct *sha = dev->port->ha;
81  struct mvs_info *mvi = NULL;
82  struct asd_sas_phy *phy;
83 
84  while (sha->sas_port[i]) {
85  if (sha->sas_port[i] == dev->port) {
86  phy = container_of(sha->sas_port[i]->phy_list.next,
87  struct asd_sas_phy, port_phy_el);
88  j = 0;
89  while (sha->sas_phy[j]) {
90  if (sha->sas_phy[j] == phy)
91  break;
92  j++;
93  }
94  break;
95  }
96  i++;
97  }
98  hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
99  mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
100 
101  return mvi;
102 
103 }
104 
105 int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
106 {
107  unsigned long i = 0, j = 0, n = 0, num = 0;
108  struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
109  struct mvs_info *mvi = mvi_dev->mvi_info;
110  struct sas_ha_struct *sha = dev->port->ha;
111 
112  while (sha->sas_port[i]) {
113  if (sha->sas_port[i] == dev->port) {
114  struct asd_sas_phy *phy;
116  &sha->sas_port[i]->phy_list, port_phy_el) {
117  j = 0;
118  while (sha->sas_phy[j]) {
119  if (sha->sas_phy[j] == phy)
120  break;
121  j++;
122  }
123  phyno[n] = (j >= mvi->chip->n_phy) ?
124  (j - mvi->chip->n_phy) : j;
125  num++;
126  n++;
127  }
128  break;
129  }
130  i++;
131  }
132  return num;
133 }
134 
136  u8 reg_set)
137 {
138  u32 dev_no;
139  for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) {
140  if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED)
141  continue;
142 
143  if (mvi->devices[dev_no].taskfileset == reg_set)
144  return &mvi->devices[dev_no];
145  }
146  return NULL;
147 }
148 
149 static inline void mvs_free_reg_set(struct mvs_info *mvi,
150  struct mvs_device *dev)
151 {
152  if (!dev) {
153  mv_printk("device has been free.\n");
154  return;
155  }
156  if (dev->taskfileset == MVS_ID_NOT_MAPPED)
157  return;
158  MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
159 }
160 
161 static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
162  struct mvs_device *dev)
163 {
164  if (dev->taskfileset != MVS_ID_NOT_MAPPED)
165  return 0;
166  return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
167 }
168 
169 void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
170 {
171  u32 no;
172  for_each_phy(phy_mask, phy_mask, no) {
173  if (!(phy_mask & 1))
174  continue;
175  MVS_CHIP_DISP->phy_reset(mvi, no, hard);
176  }
177 }
178 
180  void *funcdata)
181 {
182  int rc = 0, phy_id = sas_phy->id;
183  u32 tmp, i = 0, hi;
184  struct sas_ha_struct *sha = sas_phy->ha;
185  struct mvs_info *mvi = NULL;
186 
187  while (sha->sas_phy[i]) {
188  if (sha->sas_phy[i] == sas_phy)
189  break;
190  i++;
191  }
192  hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
193  mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
194 
195  switch (func) {
197  MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
198  break;
199 
200  case PHY_FUNC_HARD_RESET:
201  tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
202  if (tmp & PHY_RST_HARD)
203  break;
204  MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET);
205  break;
206 
207  case PHY_FUNC_LINK_RESET:
208  MVS_CHIP_DISP->phy_enable(mvi, phy_id);
209  MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET);
210  break;
211 
212  case PHY_FUNC_DISABLE:
213  MVS_CHIP_DISP->phy_disable(mvi, phy_id);
214  break;
216  default:
217  rc = -ENOSYS;
218  }
219  msleep(200);
220  return rc;
221 }
222 
224  u32 off_lo, u32 off_hi, u64 sas_addr)
225 {
226  u32 lo = (u32)sas_addr;
227  u32 hi = (u32)(sas_addr>>32);
228 
229  MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
230  MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
231  MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
232  MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
233 }
234 
235 static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
236 {
237  struct mvs_phy *phy = &mvi->phy[i];
238  struct asd_sas_phy *sas_phy = &phy->sas_phy;
239  struct sas_ha_struct *sas_ha;
240  if (!phy->phy_attached)
241  return;
242 
243  if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
244  && phy->phy_type & PORT_TYPE_SAS) {
245  return;
246  }
247 
248  sas_ha = mvi->sas;
249  sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
250 
251  if (sas_phy->phy) {
252  struct sas_phy *sphy = sas_phy->phy;
253 
254  sphy->negotiated_linkrate = sas_phy->linkrate;
255  sphy->minimum_linkrate = phy->minimum_linkrate;
257  sphy->maximum_linkrate = phy->maximum_linkrate;
258  sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
259  }
260 
261  if (phy->phy_type & PORT_TYPE_SAS) {
262  struct sas_identify_frame *id;
263 
264  id = (struct sas_identify_frame *)phy->frame_rcvd;
265  id->dev_type = phy->identify.device_type;
266  id->initiator_bits = SAS_PROTOCOL_ALL;
267  id->target_bits = phy->identify.target_port_protocols;
268 
269  /* direct attached SAS device */
271  MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
272  MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x00);
273  }
274  } else if (phy->phy_type & PORT_TYPE_SATA) {
275  /*Nothing*/
276  }
277  mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
278 
279  sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
280 
281  mvi->sas->notify_port_event(sas_phy,
283 }
284 
286 {
287  int i, j;
288  unsigned short core_nr;
289  struct mvs_info *mvi;
290  struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
291  struct mvs_prv_info *mvs_prv = sha->lldd_ha;
292 
293  core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
294 
295  for (j = 0; j < core_nr; j++) {
296  mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
297  for (i = 0; i < mvi->chip->n_phy; ++i)
298  mvs_bytes_dmaed(mvi, i);
299  }
300  mvs_prv->scan_finished = 1;
301 }
302 
303 int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
304 {
305  struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
306  struct mvs_prv_info *mvs_prv = sha->lldd_ha;
307 
308  if (mvs_prv->scan_finished == 0)
309  return 0;
310 
311  sas_drain_work(sha);
312  return 1;
313 }
314 
315 static int mvs_task_prep_smp(struct mvs_info *mvi,
316  struct mvs_task_exec_info *tei)
317 {
318  int elem, rc, i;
319  struct sas_task *task = tei->task;
320  struct mvs_cmd_hdr *hdr = tei->hdr;
321  struct domain_device *dev = task->dev;
322  struct asd_sas_port *sas_port = dev->port;
323  struct scatterlist *sg_req, *sg_resp;
324  u32 req_len, resp_len, tag = tei->tag;
325  void *buf_tmp;
326  u8 *buf_oaf;
327  dma_addr_t buf_tmp_dma;
328  void *buf_prd;
329  struct mvs_slot_info *slot = &mvi->slot_info[tag];
330  u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
331 
332  /*
333  * DMA-map SMP request, response buffers
334  */
335  sg_req = &task->smp_task.smp_req;
336  elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
337  if (!elem)
338  return -ENOMEM;
339  req_len = sg_dma_len(sg_req);
340 
341  sg_resp = &task->smp_task.smp_resp;
342  elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
343  if (!elem) {
344  rc = -ENOMEM;
345  goto err_out;
346  }
347  resp_len = SB_RFB_MAX;
348 
349  /* must be in dwords */
350  if ((req_len & 0x3) || (resp_len & 0x3)) {
351  rc = -EINVAL;
352  goto err_out_2;
353  }
354 
355  /*
356  * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
357  */
358 
359  /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
360  buf_tmp = slot->buf;
361  buf_tmp_dma = slot->buf_dma;
362 
363  hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
364 
365  /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
366  buf_oaf = buf_tmp;
367  hdr->open_frame = cpu_to_le64(buf_tmp_dma);
368 
369  buf_tmp += MVS_OAF_SZ;
370  buf_tmp_dma += MVS_OAF_SZ;
371 
372  /* region 3: PRD table *********************************** */
373  buf_prd = buf_tmp;
374  if (tei->n_elem)
375  hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
376  else
377  hdr->prd_tbl = 0;
378 
379  i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
380  buf_tmp += i;
381  buf_tmp_dma += i;
382 
383  /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
384  slot->response = buf_tmp;
385  hdr->status_buf = cpu_to_le64(buf_tmp_dma);
386  if (mvi->flags & MVF_FLAG_SOC)
387  hdr->reserved[0] = 0;
388 
389  /*
390  * Fill in TX ring and command slot header
391  */
392  slot->tx = mvi->tx_prod;
393  mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
394  TXQ_MODE_I | tag |
395  (sas_port->phy_mask << TXQ_PHY_SHIFT));
396 
397  hdr->flags |= flags;
398  hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
399  hdr->tags = cpu_to_le32(tag);
400  hdr->data_len = 0;
401 
402  /* generate open address frame hdr (first 12 bytes) */
403  /* initiator, SMP, ftype 1h */
404  buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
405  buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
406  *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
407  memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
408 
409  /* fill in PRD (scatter/gather) table, if any */
410  MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
411 
412  return 0;
413 
414 err_out_2:
415  dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
417 err_out:
418  dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
420  return rc;
421 }
422 
423 static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
424 {
425  struct ata_queued_cmd *qc = task->uldd_task;
426 
427  if (qc) {
428  if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
429  qc->tf.command == ATA_CMD_FPDMA_READ) {
430  *tag = qc->tag;
431  return 1;
432  }
433  }
434 
435  return 0;
436 }
437 
438 static int mvs_task_prep_ata(struct mvs_info *mvi,
439  struct mvs_task_exec_info *tei)
440 {
441  struct sas_task *task = tei->task;
442  struct domain_device *dev = task->dev;
443  struct mvs_device *mvi_dev = dev->lldd_dev;
444  struct mvs_cmd_hdr *hdr = tei->hdr;
445  struct asd_sas_port *sas_port = dev->port;
446  struct mvs_slot_info *slot;
447  void *buf_prd;
448  u32 tag = tei->tag, hdr_tag;
449  u32 flags, del_q;
450  void *buf_tmp;
451  u8 *buf_cmd, *buf_oaf;
452  dma_addr_t buf_tmp_dma;
453  u32 i, req_len, resp_len;
454  const u32 max_resp_len = SB_RFB_MAX;
455 
456  if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
457  mv_dprintk("Have not enough regiset for dev %d.\n",
458  mvi_dev->device_id);
459  return -EBUSY;
460  }
461  slot = &mvi->slot_info[tag];
462  slot->tx = mvi->tx_prod;
463  del_q = TXQ_MODE_I | tag |
465  (sas_port->phy_mask << TXQ_PHY_SHIFT) |
466  (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
467  mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
468 
469  if (task->data_dir == DMA_FROM_DEVICE)
470  flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
471  else
472  flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
473 
474  if (task->ata_task.use_ncq)
475  flags |= MCH_FPDMA;
476  if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
477  if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
478  flags |= MCH_ATAPI;
479  }
480 
481  hdr->flags = cpu_to_le32(flags);
482 
483  if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
484  task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
485  else
486  hdr_tag = tag;
487 
488  hdr->tags = cpu_to_le32(hdr_tag);
489 
490  hdr->data_len = cpu_to_le32(task->total_xfer_len);
491 
492  /*
493  * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
494  */
495 
496  /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
497  buf_cmd = buf_tmp = slot->buf;
498  buf_tmp_dma = slot->buf_dma;
499 
500  hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
501 
502  buf_tmp += MVS_ATA_CMD_SZ;
503  buf_tmp_dma += MVS_ATA_CMD_SZ;
504 
505  /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
506  /* used for STP. unused for SATA? */
507  buf_oaf = buf_tmp;
508  hdr->open_frame = cpu_to_le64(buf_tmp_dma);
509 
510  buf_tmp += MVS_OAF_SZ;
511  buf_tmp_dma += MVS_OAF_SZ;
512 
513  /* region 3: PRD table ********************************************* */
514  buf_prd = buf_tmp;
515 
516  if (tei->n_elem)
517  hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
518  else
519  hdr->prd_tbl = 0;
520  i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
521 
522  buf_tmp += i;
523  buf_tmp_dma += i;
524 
525  /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
526  slot->response = buf_tmp;
527  hdr->status_buf = cpu_to_le64(buf_tmp_dma);
528  if (mvi->flags & MVF_FLAG_SOC)
529  hdr->reserved[0] = 0;
530 
531  req_len = sizeof(struct host_to_dev_fis);
532  resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
533  sizeof(struct mvs_err_info) - i;
534 
535  /* request, response lengths */
536  resp_len = min(resp_len, max_resp_len);
537  hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
538 
539  if (likely(!task->ata_task.device_control_reg_update))
540  task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
541  /* fill in command FIS and ATAPI CDB */
542  memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
543  if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
544  memcpy(buf_cmd + STP_ATAPI_CMD,
545  task->ata_task.atapi_packet, 16);
546 
547  /* generate open address frame hdr (first 12 bytes) */
548  /* initiator, STP, ftype 1h */
549  buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
550  buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
551  *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
552  memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
553 
554  /* fill in PRD (scatter/gather) table, if any */
555  MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
556 
557  if (task->data_dir == DMA_FROM_DEVICE)
558  MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask,
559  TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
560 
561  return 0;
562 }
563 
564 static int mvs_task_prep_ssp(struct mvs_info *mvi,
565  struct mvs_task_exec_info *tei, int is_tmf,
566  struct mvs_tmf_task *tmf)
567 {
568  struct sas_task *task = tei->task;
569  struct mvs_cmd_hdr *hdr = tei->hdr;
570  struct mvs_port *port = tei->port;
571  struct domain_device *dev = task->dev;
572  struct mvs_device *mvi_dev = dev->lldd_dev;
573  struct asd_sas_port *sas_port = dev->port;
574  struct mvs_slot_info *slot;
575  void *buf_prd;
576  struct ssp_frame_hdr *ssp_hdr;
577  void *buf_tmp;
578  u8 *buf_cmd, *buf_oaf, fburst = 0;
579  dma_addr_t buf_tmp_dma;
580  u32 flags;
581  u32 resp_len, req_len, i, tag = tei->tag;
582  const u32 max_resp_len = SB_RFB_MAX;
583  u32 phy_mask;
584 
585  slot = &mvi->slot_info[tag];
586 
587  phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
588  sas_port->phy_mask) & TXQ_PHY_MASK;
589 
590  slot->tx = mvi->tx_prod;
591  mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
593  (phy_mask << TXQ_PHY_SHIFT));
594 
595  flags = MCH_RETRY;
596  if (task->ssp_task.enable_first_burst) {
597  flags |= MCH_FBURST;
598  fburst = (1 << 7);
599  }
600  if (is_tmf)
602  else
604 
605  hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
606  hdr->tags = cpu_to_le32(tag);
607  hdr->data_len = cpu_to_le32(task->total_xfer_len);
608 
609  /*
610  * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
611  */
612 
613  /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
614  buf_cmd = buf_tmp = slot->buf;
615  buf_tmp_dma = slot->buf_dma;
616 
617  hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
618 
619  buf_tmp += MVS_SSP_CMD_SZ;
620  buf_tmp_dma += MVS_SSP_CMD_SZ;
621 
622  /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
623  buf_oaf = buf_tmp;
624  hdr->open_frame = cpu_to_le64(buf_tmp_dma);
625 
626  buf_tmp += MVS_OAF_SZ;
627  buf_tmp_dma += MVS_OAF_SZ;
628 
629  /* region 3: PRD table ********************************************* */
630  buf_prd = buf_tmp;
631  if (tei->n_elem)
632  hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
633  else
634  hdr->prd_tbl = 0;
635 
636  i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
637  buf_tmp += i;
638  buf_tmp_dma += i;
639 
640  /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
641  slot->response = buf_tmp;
642  hdr->status_buf = cpu_to_le64(buf_tmp_dma);
643  if (mvi->flags & MVF_FLAG_SOC)
644  hdr->reserved[0] = 0;
645 
646  resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
647  sizeof(struct mvs_err_info) - i;
648  resp_len = min(resp_len, max_resp_len);
649 
650  req_len = sizeof(struct ssp_frame_hdr) + 28;
651 
652  /* request, response lengths */
653  hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
654 
655  /* generate open address frame hdr (first 12 bytes) */
656  /* initiator, SSP, ftype 1h */
657  buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
658  buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
659  *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
660  memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
661 
662  /* fill in SSP frame header (Command Table.SSP frame header) */
663  ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
664 
665  if (is_tmf)
666  ssp_hdr->frame_type = SSP_TASK;
667  else
668  ssp_hdr->frame_type = SSP_COMMAND;
669 
670  memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
672  memcpy(ssp_hdr->hashed_src_addr,
674  ssp_hdr->tag = cpu_to_be16(tag);
675 
676  /* fill in IU for TASK and Command Frame */
677  buf_cmd += sizeof(*ssp_hdr);
678  memcpy(buf_cmd, &task->ssp_task.LUN, 8);
679 
680  if (ssp_hdr->frame_type != SSP_TASK) {
681  buf_cmd[9] = fburst | task->ssp_task.task_attr |
682  (task->ssp_task.task_prio << 3);
683  memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
684  } else{
685  buf_cmd[10] = tmf->tmf;
686  switch (tmf->tmf) {
687  case TMF_ABORT_TASK:
688  case TMF_QUERY_TASK:
689  buf_cmd[12] =
690  (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
691  buf_cmd[13] =
692  tmf->tag_of_task_to_be_managed & 0xff;
693  break;
694  default:
695  break;
696  }
697  }
698  /* fill in PRD (scatter/gather) table, if any */
699  MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
700  return 0;
701 }
702 
703 #define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
704 static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
705  struct mvs_tmf_task *tmf, int *pass)
706 {
707  struct domain_device *dev = task->dev;
708  struct mvs_device *mvi_dev = dev->lldd_dev;
709  struct mvs_task_exec_info tei;
710  struct mvs_slot_info *slot;
711  u32 tag = 0xdeadbeef, n_elem = 0;
712  int rc = 0;
713 
714  if (!dev->port) {
715  struct task_status_struct *tsm = &task->task_status;
716 
717  tsm->resp = SAS_TASK_UNDELIVERED;
718  tsm->stat = SAS_PHY_DOWN;
719  /*
720  * libsas will use dev->port, should
721  * not call task_done for sata
722  */
723  if (dev->dev_type != SATA_DEV)
724  task->task_done(task);
725  return rc;
726  }
727 
728  if (DEV_IS_GONE(mvi_dev)) {
729  if (mvi_dev)
730  mv_dprintk("device %d not ready.\n",
731  mvi_dev->device_id);
732  else
733  mv_dprintk("device %016llx not ready.\n",
734  SAS_ADDR(dev->sas_addr));
735 
736  rc = SAS_PHY_DOWN;
737  return rc;
738  }
739  tei.port = dev->port->lldd_port;
740  if (tei.port && !tei.port->port_attached && !tmf) {
741  if (sas_protocol_ata(task->task_proto)) {
742  struct task_status_struct *ts = &task->task_status;
743  mv_dprintk("SATA/STP port %d does not attach"
744  "device.\n", dev->port->id);
745  ts->resp = SAS_TASK_COMPLETE;
746  ts->stat = SAS_PHY_DOWN;
747 
748  task->task_done(task);
749 
750  } else {
751  struct task_status_struct *ts = &task->task_status;
752  mv_dprintk("SAS port %d does not attach"
753  "device.\n", dev->port->id);
755  ts->stat = SAS_PHY_DOWN;
756  task->task_done(task);
757  }
758  return rc;
759  }
760 
761  if (!sas_protocol_ata(task->task_proto)) {
762  if (task->num_scatter) {
763  n_elem = dma_map_sg(mvi->dev,
764  task->scatter,
765  task->num_scatter,
766  task->data_dir);
767  if (!n_elem) {
768  rc = -ENOMEM;
769  goto prep_out;
770  }
771  }
772  } else {
773  n_elem = task->num_scatter;
774  }
775 
776  rc = mvs_tag_alloc(mvi, &tag);
777  if (rc)
778  goto err_out;
779 
780  slot = &mvi->slot_info[tag];
781 
782  task->lldd_task = NULL;
783  slot->n_elem = n_elem;
784  slot->slot_tag = tag;
785 
786  slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
787  if (!slot->buf)
788  goto err_out_tag;
789  memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
790 
791  tei.task = task;
792  tei.hdr = &mvi->slot[tag];
793  tei.tag = tag;
794  tei.n_elem = n_elem;
795  switch (task->task_proto) {
796  case SAS_PROTOCOL_SMP:
797  rc = mvs_task_prep_smp(mvi, &tei);
798  break;
799  case SAS_PROTOCOL_SSP:
800  rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
801  break;
802  case SAS_PROTOCOL_SATA:
803  case SAS_PROTOCOL_STP:
805  rc = mvs_task_prep_ata(mvi, &tei);
806  break;
807  default:
808  dev_printk(KERN_ERR, mvi->dev,
809  "unknown sas_task proto: 0x%x\n",
810  task->task_proto);
811  rc = -EINVAL;
812  break;
813  }
814 
815  if (rc) {
816  mv_dprintk("rc is %x\n", rc);
817  goto err_out_slot_buf;
818  }
819  slot->task = task;
820  slot->port = tei.port;
821  task->lldd_task = slot;
822  list_add_tail(&slot->entry, &tei.port->list);
823  spin_lock(&task->task_state_lock);
825  spin_unlock(&task->task_state_lock);
826 
827  mvi_dev->running_req++;
828  ++(*pass);
829  mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
830 
831  return rc;
832 
833 err_out_slot_buf:
834  pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
835 err_out_tag:
836  mvs_tag_free(mvi, tag);
837 err_out:
838 
839  dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc);
840  if (!sas_protocol_ata(task->task_proto))
841  if (n_elem)
842  dma_unmap_sg(mvi->dev, task->scatter, n_elem,
843  task->data_dir);
844 prep_out:
845  return rc;
846 }
847 
848 static struct mvs_task_list *mvs_task_alloc_list(int *num, gfp_t gfp_flags)
849 {
850  struct mvs_task_list *first = NULL;
851 
852  for (; *num > 0; --*num) {
853  struct mvs_task_list *mvs_list = kmem_cache_zalloc(mvs_task_list_cache, gfp_flags);
854 
855  if (!mvs_list)
856  break;
857 
858  INIT_LIST_HEAD(&mvs_list->list);
859  if (!first)
860  first = mvs_list;
861  else
862  list_add_tail(&mvs_list->list, &first->list);
863 
864  }
865 
866  return first;
867 }
868 
869 static inline void mvs_task_free_list(struct mvs_task_list *mvs_list)
870 {
871  LIST_HEAD(list);
872  struct list_head *pos, *a;
873  struct mvs_task_list *mlist = NULL;
874 
875  __list_add(&list, mvs_list->list.prev, &mvs_list->list);
876 
877  list_for_each_safe(pos, a, &list) {
878  list_del_init(pos);
879  mlist = list_entry(pos, struct mvs_task_list, list);
881  }
882 }
883 
884 static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
885  struct completion *completion, int is_tmf,
886  struct mvs_tmf_task *tmf)
887 {
888  struct mvs_info *mvi = NULL;
889  u32 rc = 0;
890  u32 pass = 0;
891  unsigned long flags = 0;
892 
893  mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info;
894 
895  spin_lock_irqsave(&mvi->lock, flags);
896  rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass);
897  if (rc)
898  dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
899 
900  if (likely(pass))
901  MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) &
902  (MVS_CHIP_SLOT_SZ - 1));
903  spin_unlock_irqrestore(&mvi->lock, flags);
904 
905  return rc;
906 }
907 
908 static int mvs_collector_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
909  struct completion *completion, int is_tmf,
910  struct mvs_tmf_task *tmf)
911 {
912  struct domain_device *dev = task->dev;
913  struct mvs_prv_info *mpi = dev->port->ha->lldd_ha;
914  struct mvs_info *mvi = NULL;
915  struct sas_task *t = task;
916  struct mvs_task_list *mvs_list = NULL, *a;
917  LIST_HEAD(q);
918  int pass[2] = {0};
919  u32 rc = 0;
920  u32 n = num;
921  unsigned long flags = 0;
922 
923  mvs_list = mvs_task_alloc_list(&n, gfp_flags);
924  if (n) {
925  printk(KERN_ERR "%s: mvs alloc list failed.\n", __func__);
926  rc = -ENOMEM;
927  goto free_list;
928  }
929 
930  __list_add(&q, mvs_list->list.prev, &mvs_list->list);
931 
932  list_for_each_entry(a, &q, list) {
933  a->task = t;
934  t = list_entry(t->list.next, struct sas_task, list);
935  }
936 
937  list_for_each_entry(a, &q , list) {
938 
939  t = a->task;
940  mvi = ((struct mvs_device *)t->dev->lldd_dev)->mvi_info;
941 
942  spin_lock_irqsave(&mvi->lock, flags);
943  rc = mvs_task_prep(t, mvi, is_tmf, tmf, &pass[mvi->id]);
944  if (rc)
945  dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
946  spin_unlock_irqrestore(&mvi->lock, flags);
947  }
948 
949  if (likely(pass[0]))
950  MVS_CHIP_DISP->start_delivery(mpi->mvi[0],
951  (mpi->mvi[0]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
952 
953  if (likely(pass[1]))
954  MVS_CHIP_DISP->start_delivery(mpi->mvi[1],
955  (mpi->mvi[1]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
956 
957  list_del_init(&q);
958 
959 free_list:
960  if (mvs_list)
961  mvs_task_free_list(mvs_list);
962 
963  return rc;
964 }
965 
966 int mvs_queue_command(struct sas_task *task, const int num,
967  gfp_t gfp_flags)
968 {
969  struct mvs_device *mvi_dev = task->dev->lldd_dev;
970  struct sas_ha_struct *sas = mvi_dev->mvi_info->sas;
971 
972  if (sas->lldd_max_execute_num < 2)
973  return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
974  else
975  return mvs_collector_task_exec(task, num, gfp_flags, NULL, 0, NULL);
976 }
977 
978 static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
979 {
980  u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
981  mvs_tag_clear(mvi, slot_idx);
982 }
983 
984 static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
985  struct mvs_slot_info *slot, u32 slot_idx)
986 {
987  if (!slot->task)
988  return;
989  if (!sas_protocol_ata(task->task_proto))
990  if (slot->n_elem)
991  dma_unmap_sg(mvi->dev, task->scatter,
992  slot->n_elem, task->data_dir);
993 
994  switch (task->task_proto) {
995  case SAS_PROTOCOL_SMP:
996  dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
998  dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
1000  break;
1001 
1002  case SAS_PROTOCOL_SATA:
1003  case SAS_PROTOCOL_STP:
1004  case SAS_PROTOCOL_SSP:
1005  default:
1006  /* do nothing */
1007  break;
1008  }
1009 
1010  if (slot->buf) {
1011  pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
1012  slot->buf = NULL;
1013  }
1014  list_del_init(&slot->entry);
1015  task->lldd_task = NULL;
1016  slot->task = NULL;
1017  slot->port = NULL;
1018  slot->slot_tag = 0xFFFFFFFF;
1019  mvs_slot_free(mvi, slot_idx);
1020 }
1021 
1022 static void mvs_update_wideport(struct mvs_info *mvi, int phy_no)
1023 {
1024  struct mvs_phy *phy = &mvi->phy[phy_no];
1025  struct mvs_port *port = phy->port;
1026  int j, no;
1027 
1028  for_each_phy(port->wide_port_phymap, j, no) {
1029  if (j & 1) {
1030  MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1031  PHYR_WIDE_PORT);
1032  MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1033  port->wide_port_phymap);
1034  } else {
1035  MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1036  PHYR_WIDE_PORT);
1037  MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1038  0);
1039  }
1040  }
1041 }
1042 
1043 static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
1044 {
1045  u32 tmp;
1046  struct mvs_phy *phy = &mvi->phy[i];
1047  struct mvs_port *port = phy->port;
1048 
1049  tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
1050  if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
1051  if (!port)
1052  phy->phy_attached = 1;
1053  return tmp;
1054  }
1055 
1056  if (port) {
1057  if (phy->phy_type & PORT_TYPE_SAS) {
1058  port->wide_port_phymap &= ~(1U << i);
1059  if (!port->wide_port_phymap)
1060  port->port_attached = 0;
1061  mvs_update_wideport(mvi, i);
1062  } else if (phy->phy_type & PORT_TYPE_SATA)
1063  port->port_attached = 0;
1064  phy->port = NULL;
1065  phy->phy_attached = 0;
1066  phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
1067  }
1068  return 0;
1069 }
1070 
1071 static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
1072 {
1073  u32 *s = (u32 *) buf;
1074 
1075  if (!s)
1076  return NULL;
1077 
1078  MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
1079  s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1080 
1081  MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
1082  s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1083 
1084  MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
1085  s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1086 
1087  MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
1088  s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1089 
1090  if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
1091  s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
1092 
1093  return s;
1094 }
1095 
1096 static u32 mvs_is_sig_fis_received(u32 irq_status)
1097 {
1098  return irq_status & PHYEV_SIG_FIS;
1099 }
1100 
1101 static void mvs_sig_remove_timer(struct mvs_phy *phy)
1102 {
1103  if (phy->timer.function)
1104  del_timer(&phy->timer);
1105  phy->timer.function = NULL;
1106 }
1107 
1108 void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1109 {
1110  struct mvs_phy *phy = &mvi->phy[i];
1111  struct sas_identify_frame *id;
1112 
1113  id = (struct sas_identify_frame *)phy->frame_rcvd;
1114 
1115  if (get_st) {
1116  phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
1117  phy->phy_status = mvs_is_phy_ready(mvi, i);
1118  }
1119 
1120  if (phy->phy_status) {
1121  int oob_done = 0;
1122  struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
1123 
1124  oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
1125 
1126  MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
1127  if (phy->phy_type & PORT_TYPE_SATA) {
1128  phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
1129  if (mvs_is_sig_fis_received(phy->irq_status)) {
1130  mvs_sig_remove_timer(phy);
1131  phy->phy_attached = 1;
1132  phy->att_dev_sas_addr =
1133  i + mvi->id * mvi->chip->n_phy;
1134  if (oob_done)
1135  sas_phy->oob_mode = SATA_OOB_MODE;
1136  phy->frame_rcvd_size =
1137  sizeof(struct dev_to_host_fis);
1138  mvs_get_d2h_reg(mvi, i, id);
1139  } else {
1140  u32 tmp;
1141  dev_printk(KERN_DEBUG, mvi->dev,
1142  "Phy%d : No sig fis\n", i);
1143  tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
1144  MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
1145  tmp | PHYEV_SIG_FIS);
1146  phy->phy_attached = 0;
1147  phy->phy_type &= ~PORT_TYPE_SATA;
1148  goto out_done;
1149  }
1150  } else if (phy->phy_type & PORT_TYPE_SAS
1151  || phy->att_dev_info & PORT_SSP_INIT_MASK) {
1152  phy->phy_attached = 1;
1153  phy->identify.device_type =
1155 
1156  if (phy->identify.device_type == SAS_END_DEV)
1157  phy->identify.target_port_protocols =
1159  else if (phy->identify.device_type != NO_DEVICE)
1160  phy->identify.target_port_protocols =
1162  if (oob_done)
1163  sas_phy->oob_mode = SAS_OOB_MODE;
1164  phy->frame_rcvd_size =
1165  sizeof(struct sas_identify_frame);
1166  }
1167  memcpy(sas_phy->attached_sas_addr,
1169 
1170  if (MVS_CHIP_DISP->phy_work_around)
1171  MVS_CHIP_DISP->phy_work_around(mvi, i);
1172  }
1173  mv_dprintk("phy %d attach dev info is %x\n",
1174  i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
1175  mv_dprintk("phy %d attach sas addr is %llx\n",
1176  i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
1177 out_done:
1178  if (get_st)
1179  MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
1180 }
1181 
1182 static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1183 {
1184  struct sas_ha_struct *sas_ha = sas_phy->ha;
1185  struct mvs_info *mvi = NULL; int i = 0, hi;
1186  struct mvs_phy *phy = sas_phy->lldd_phy;
1187  struct asd_sas_port *sas_port = sas_phy->port;
1188  struct mvs_port *port;
1189  unsigned long flags = 0;
1190  if (!sas_port)
1191  return;
1192 
1193  while (sas_ha->sas_phy[i]) {
1194  if (sas_ha->sas_phy[i] == sas_phy)
1195  break;
1196  i++;
1197  }
1198  hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
1199  mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
1200  if (i >= mvi->chip->n_phy)
1201  port = &mvi->port[i - mvi->chip->n_phy];
1202  else
1203  port = &mvi->port[i];
1204  if (lock)
1205  spin_lock_irqsave(&mvi->lock, flags);
1206  port->port_attached = 1;
1207  phy->port = port;
1208  sas_port->lldd_port = port;
1209  if (phy->phy_type & PORT_TYPE_SAS) {
1210  port->wide_port_phymap = sas_port->phy_mask;
1211  mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
1212  mvs_update_wideport(mvi, sas_phy->id);
1213 
1214  /* direct attached SAS device */
1215  if (phy->att_dev_info & PORT_SSP_TRGT_MASK) {
1216  MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
1217  MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x04);
1218  }
1219  }
1220  if (lock)
1221  spin_unlock_irqrestore(&mvi->lock, flags);
1222 }
1223 
1224 static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
1225 {
1226  struct domain_device *dev;
1227  struct mvs_phy *phy = sas_phy->lldd_phy;
1228  struct mvs_info *mvi = phy->mvi;
1229  struct asd_sas_port *port = sas_phy->port;
1230  int phy_no = 0;
1231 
1232  while (phy != &mvi->phy[phy_no]) {
1233  phy_no++;
1234  if (phy_no >= MVS_MAX_PHYS)
1235  return;
1236  }
1237  list_for_each_entry(dev, &port->dev_list, dev_list_node)
1238  mvs_do_release_task(phy->mvi, phy_no, dev);
1239 
1240 }
1241 
1242 
1244 {
1245  mvs_port_notify_formed(sas_phy, 1);
1246 }
1247 
1248 void mvs_port_deformed(struct asd_sas_phy *sas_phy)
1249 {
1250  mvs_port_notify_deformed(sas_phy, 1);
1251 }
1252 
1253 struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
1254 {
1255  u32 dev;
1256  for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
1257  if (mvi->devices[dev].dev_type == NO_DEVICE) {
1258  mvi->devices[dev].device_id = dev;
1259  return &mvi->devices[dev];
1260  }
1261  }
1262 
1263  if (dev == MVS_MAX_DEVICES)
1264  mv_printk("max support %d devices, ignore ..\n",
1265  MVS_MAX_DEVICES);
1266 
1267  return NULL;
1268 }
1269 
1270 void mvs_free_dev(struct mvs_device *mvi_dev)
1271 {
1272  u32 id = mvi_dev->device_id;
1273  memset(mvi_dev, 0, sizeof(*mvi_dev));
1274  mvi_dev->device_id = id;
1275  mvi_dev->dev_type = NO_DEVICE;
1276  mvi_dev->dev_status = MVS_DEV_NORMAL;
1277  mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
1278 }
1279 
1280 int mvs_dev_found_notify(struct domain_device *dev, int lock)
1281 {
1282  unsigned long flags = 0;
1283  int res = 0;
1284  struct mvs_info *mvi = NULL;
1285  struct domain_device *parent_dev = dev->parent;
1286  struct mvs_device *mvi_device;
1287 
1288  mvi = mvs_find_dev_mvi(dev);
1289 
1290  if (lock)
1291  spin_lock_irqsave(&mvi->lock, flags);
1292 
1293  mvi_device = mvs_alloc_dev(mvi);
1294  if (!mvi_device) {
1295  res = -1;
1296  goto found_out;
1297  }
1298  dev->lldd_dev = mvi_device;
1299  mvi_device->dev_status = MVS_DEV_NORMAL;
1300  mvi_device->dev_type = dev->dev_type;
1301  mvi_device->mvi_info = mvi;
1302  mvi_device->sas_device = dev;
1303  if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
1304  int phy_id;
1305  u8 phy_num = parent_dev->ex_dev.num_phys;
1306  struct ex_phy *phy;
1307  for (phy_id = 0; phy_id < phy_num; phy_id++) {
1308  phy = &parent_dev->ex_dev.ex_phy[phy_id];
1309  if (SAS_ADDR(phy->attached_sas_addr) ==
1310  SAS_ADDR(dev->sas_addr)) {
1311  mvi_device->attached_phy = phy_id;
1312  break;
1313  }
1314  }
1315 
1316  if (phy_id == phy_num) {
1317  mv_printk("Error: no attached dev:%016llx"
1318  "at ex:%016llx.\n",
1319  SAS_ADDR(dev->sas_addr),
1320  SAS_ADDR(parent_dev->sas_addr));
1321  res = -1;
1322  }
1323  }
1324 
1325 found_out:
1326  if (lock)
1327  spin_unlock_irqrestore(&mvi->lock, flags);
1328  return res;
1329 }
1330 
1332 {
1333  return mvs_dev_found_notify(dev, 1);
1334 }
1335 
1337 {
1338  unsigned long flags = 0;
1339  struct mvs_device *mvi_dev = dev->lldd_dev;
1340  struct mvs_info *mvi = mvi_dev->mvi_info;
1341 
1342  spin_lock_irqsave(&mvi->lock, flags);
1343 
1344  if (mvi_dev) {
1345  mv_dprintk("found dev[%d:%x] is gone.\n",
1346  mvi_dev->device_id, mvi_dev->dev_type);
1347  mvs_release_task(mvi, dev);
1348  mvs_free_reg_set(mvi, mvi_dev);
1349  mvs_free_dev(mvi_dev);
1350  } else {
1351  mv_dprintk("found dev has gone.\n");
1352  }
1353  dev->lldd_dev = NULL;
1354  mvi_dev->sas_device = NULL;
1355 
1356  spin_unlock_irqrestore(&mvi->lock, flags);
1357 }
1358 
1359 
1360 void mvs_dev_gone(struct domain_device *dev)
1361 {
1362  mvs_dev_gone_notify(dev);
1363 }
1364 
1365 static void mvs_task_done(struct sas_task *task)
1366 {
1367  if (!del_timer(&task->slow_task->timer))
1368  return;
1369  complete(&task->slow_task->completion);
1370 }
1371 
1372 static void mvs_tmf_timedout(unsigned long data)
1373 {
1374  struct sas_task *task = (struct sas_task *)data;
1375 
1377  complete(&task->slow_task->completion);
1378 }
1379 
1380 #define MVS_TASK_TIMEOUT 20
1381 static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1382  void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
1383 {
1384  int res, retry;
1385  struct sas_task *task = NULL;
1386 
1387  for (retry = 0; retry < 3; retry++) {
1389  if (!task)
1390  return -ENOMEM;
1391 
1392  task->dev = dev;
1393  task->task_proto = dev->tproto;
1394 
1395  memcpy(&task->ssp_task, parameter, para_len);
1396  task->task_done = mvs_task_done;
1397 
1398  task->slow_task->timer.data = (unsigned long) task;
1399  task->slow_task->timer.function = mvs_tmf_timedout;
1400  task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
1401  add_timer(&task->slow_task->timer);
1402 
1403  res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
1404 
1405  if (res) {
1406  del_timer(&task->slow_task->timer);
1407  mv_printk("executing internel task failed:%d\n", res);
1408  goto ex_err;
1409  }
1410 
1411  wait_for_completion(&task->slow_task->completion);
1412  res = TMF_RESP_FUNC_FAILED;
1413  /* Even TMF timed out, return direct. */
1414  if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1415  if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1416  mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
1417  goto ex_err;
1418  }
1419  }
1420 
1421  if (task->task_status.resp == SAS_TASK_COMPLETE &&
1422  task->task_status.stat == SAM_STAT_GOOD) {
1423  res = TMF_RESP_FUNC_COMPLETE;
1424  break;
1425  }
1426 
1427  if (task->task_status.resp == SAS_TASK_COMPLETE &&
1428  task->task_status.stat == SAS_DATA_UNDERRUN) {
1429  /* no error, but return the number of bytes of
1430  * underrun */
1431  res = task->task_status.residual;
1432  break;
1433  }
1434 
1435  if (task->task_status.resp == SAS_TASK_COMPLETE &&
1436  task->task_status.stat == SAS_DATA_OVERRUN) {
1437  mv_dprintk("blocked task error.\n");
1438  res = -EMSGSIZE;
1439  break;
1440  } else {
1441  mv_dprintk(" task to dev %016llx response: 0x%x "
1442  "status 0x%x\n",
1443  SAS_ADDR(dev->sas_addr),
1444  task->task_status.resp,
1445  task->task_status.stat);
1446  sas_free_task(task);
1447  task = NULL;
1448 
1449  }
1450  }
1451 ex_err:
1452  BUG_ON(retry == 3 && task != NULL);
1453  sas_free_task(task);
1454  return res;
1455 }
1456 
1457 static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
1458  u8 *lun, struct mvs_tmf_task *tmf)
1459 {
1460  struct sas_ssp_task ssp_task;
1461  if (!(dev->tproto & SAS_PROTOCOL_SSP))
1462  return TMF_RESP_FUNC_ESUPP;
1463 
1464  memcpy(ssp_task.LUN, lun, 8);
1465 
1466  return mvs_exec_internal_tmf_task(dev, &ssp_task,
1467  sizeof(ssp_task), tmf);
1468 }
1469 
1470 
1471 /* Standard mandates link reset for ATA (type 0)
1472  and hard reset for SSP (type 1) , only for RECOVERY */
1473 static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1474 {
1475  int rc;
1476  struct sas_phy *phy = sas_get_local_phy(dev);
1477  int reset_type = (dev->dev_type == SATA_DEV ||
1478  (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1479  rc = sas_phy_reset(phy, reset_type);
1480  sas_put_local_phy(phy);
1481  msleep(2000);
1482  return rc;
1483 }
1484 
1485 /* mandatory SAM-3 */
1486 int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1487 {
1488  unsigned long flags;
1489  int rc = TMF_RESP_FUNC_FAILED;
1490  struct mvs_tmf_task tmf_task;
1491  struct mvs_device * mvi_dev = dev->lldd_dev;
1492  struct mvs_info *mvi = mvi_dev->mvi_info;
1493 
1494  tmf_task.tmf = TMF_LU_RESET;
1495  mvi_dev->dev_status = MVS_DEV_EH;
1496  rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1497  if (rc == TMF_RESP_FUNC_COMPLETE) {
1498  spin_lock_irqsave(&mvi->lock, flags);
1499  mvs_release_task(mvi, dev);
1500  spin_unlock_irqrestore(&mvi->lock, flags);
1501  }
1502  /* If failed, fall-through I_T_Nexus reset */
1503  mv_printk("%s for device[%x]:rc= %d\n", __func__,
1504  mvi_dev->device_id, rc);
1505  return rc;
1506 }
1507 
1509 {
1510  unsigned long flags;
1511  int rc = TMF_RESP_FUNC_FAILED;
1512  struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
1513  struct mvs_info *mvi = mvi_dev->mvi_info;
1514 
1515  if (mvi_dev->dev_status != MVS_DEV_EH)
1516  return TMF_RESP_FUNC_COMPLETE;
1517  else
1518  mvi_dev->dev_status = MVS_DEV_NORMAL;
1519  rc = mvs_debug_I_T_nexus_reset(dev);
1520  mv_printk("%s for device[%x]:rc= %d\n",
1521  __func__, mvi_dev->device_id, rc);
1522 
1523  spin_lock_irqsave(&mvi->lock, flags);
1524  mvs_release_task(mvi, dev);
1525  spin_unlock_irqrestore(&mvi->lock, flags);
1526 
1527  return rc;
1528 }
1529 /* optional SAM-3 */
1530 int mvs_query_task(struct sas_task *task)
1531 {
1532  u32 tag;
1533  struct scsi_lun lun;
1534  struct mvs_tmf_task tmf_task;
1535  int rc = TMF_RESP_FUNC_FAILED;
1536 
1537  if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1538  struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1539  struct domain_device *dev = task->dev;
1540  struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1541  struct mvs_info *mvi = mvi_dev->mvi_info;
1542 
1543  int_to_scsilun(cmnd->device->lun, &lun);
1544  rc = mvs_find_tag(mvi, task, &tag);
1545  if (rc == 0) {
1546  rc = TMF_RESP_FUNC_FAILED;
1547  return rc;
1548  }
1549 
1550  tmf_task.tmf = TMF_QUERY_TASK;
1551  tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1552 
1553  rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1554  switch (rc) {
1555  /* The task is still in Lun, release it then */
1556  case TMF_RESP_FUNC_SUCC:
1557  /* The task is not in Lun or failed, reset the phy */
1558  case TMF_RESP_FUNC_FAILED:
1560  break;
1561  }
1562  }
1563  mv_printk("%s:rc= %d\n", __func__, rc);
1564  return rc;
1565 }
1566 
1567 /* mandatory SAM-3, still need free task/slot info */
1568 int mvs_abort_task(struct sas_task *task)
1569 {
1570  struct scsi_lun lun;
1571  struct mvs_tmf_task tmf_task;
1572  struct domain_device *dev = task->dev;
1573  struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1574  struct mvs_info *mvi;
1575  int rc = TMF_RESP_FUNC_FAILED;
1576  unsigned long flags;
1577  u32 tag;
1578 
1579  if (!mvi_dev) {
1580  mv_printk("Device has removed\n");
1581  return TMF_RESP_FUNC_FAILED;
1582  }
1583 
1584  mvi = mvi_dev->mvi_info;
1585 
1586  spin_lock_irqsave(&task->task_state_lock, flags);
1587  if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1588  spin_unlock_irqrestore(&task->task_state_lock, flags);
1590  goto out;
1591  }
1592  spin_unlock_irqrestore(&task->task_state_lock, flags);
1593  mvi_dev->dev_status = MVS_DEV_EH;
1594  if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1595  struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1596 
1597  int_to_scsilun(cmnd->device->lun, &lun);
1598  rc = mvs_find_tag(mvi, task, &tag);
1599  if (rc == 0) {
1600  mv_printk("No such tag in %s\n", __func__);
1601  rc = TMF_RESP_FUNC_FAILED;
1602  return rc;
1603  }
1604 
1605  tmf_task.tmf = TMF_ABORT_TASK;
1606  tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1607 
1608  rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1609 
1610  /* if successful, clear the task and callback forwards.*/
1611  if (rc == TMF_RESP_FUNC_COMPLETE) {
1612  u32 slot_no;
1613  struct mvs_slot_info *slot;
1614 
1615  if (task->lldd_task) {
1616  slot = task->lldd_task;
1617  slot_no = (u32) (slot - mvi->slot_info);
1618  spin_lock_irqsave(&mvi->lock, flags);
1619  mvs_slot_complete(mvi, slot_no, 1);
1620  spin_unlock_irqrestore(&mvi->lock, flags);
1621  }
1622  }
1623 
1624  } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1625  task->task_proto & SAS_PROTOCOL_STP) {
1626  if (SATA_DEV == dev->dev_type) {
1627  struct mvs_slot_info *slot = task->lldd_task;
1628  u32 slot_idx = (u32)(slot - mvi->slot_info);
1629  mv_dprintk("mvs_abort_task() mvi=%p task=%p "
1630  "slot=%p slot_idx=x%x\n",
1631  mvi, task, slot, slot_idx);
1633  mvs_slot_task_free(mvi, task, slot, slot_idx);
1635  goto out;
1636  }
1637 
1638  }
1639 out:
1640  if (rc != TMF_RESP_FUNC_COMPLETE)
1641  mv_printk("%s:rc= %d\n", __func__, rc);
1642  return rc;
1643 }
1644 
1645 int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
1646 {
1647  int rc = TMF_RESP_FUNC_FAILED;
1648  struct mvs_tmf_task tmf_task;
1649 
1650  tmf_task.tmf = TMF_ABORT_TASK_SET;
1651  rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1652 
1653  return rc;
1654 }
1655 
1656 int mvs_clear_aca(struct domain_device *dev, u8 *lun)
1657 {
1658  int rc = TMF_RESP_FUNC_FAILED;
1659  struct mvs_tmf_task tmf_task;
1660 
1661  tmf_task.tmf = TMF_CLEAR_ACA;
1662  rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1663 
1664  return rc;
1665 }
1666 
1667 int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
1668 {
1669  int rc = TMF_RESP_FUNC_FAILED;
1670  struct mvs_tmf_task tmf_task;
1671 
1672  tmf_task.tmf = TMF_CLEAR_TASK_SET;
1673  rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1674 
1675  return rc;
1676 }
1677 
1678 static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1679  u32 slot_idx, int err)
1680 {
1681  struct mvs_device *mvi_dev = task->dev->lldd_dev;
1682  struct task_status_struct *tstat = &task->task_status;
1683  struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1684  int stat = SAM_STAT_GOOD;
1685 
1686 
1687  resp->frame_len = sizeof(struct dev_to_host_fis);
1688  memcpy(&resp->ending_fis[0],
1690  sizeof(struct dev_to_host_fis));
1691  tstat->buf_valid_size = sizeof(*resp);
1692  if (unlikely(err)) {
1693  if (unlikely(err & CMD_ISS_STPD))
1695  else
1697  }
1698 
1699  return stat;
1700 }
1701 
1702 void mvs_set_sense(u8 *buffer, int len, int d_sense,
1703  int key, int asc, int ascq)
1704 {
1705  memset(buffer, 0, len);
1706 
1707  if (d_sense) {
1708  /* Descriptor format */
1709  if (len < 4) {
1710  mv_printk("Length %d of sense buffer too small to "
1711  "fit sense %x:%x:%x", len, key, asc, ascq);
1712  }
1713 
1714  buffer[0] = 0x72; /* Response Code */
1715  if (len > 1)
1716  buffer[1] = key; /* Sense Key */
1717  if (len > 2)
1718  buffer[2] = asc; /* ASC */
1719  if (len > 3)
1720  buffer[3] = ascq; /* ASCQ */
1721  } else {
1722  if (len < 14) {
1723  mv_printk("Length %d of sense buffer too small to "
1724  "fit sense %x:%x:%x", len, key, asc, ascq);
1725  }
1726 
1727  buffer[0] = 0x70; /* Response Code */
1728  if (len > 2)
1729  buffer[2] = key; /* Sense Key */
1730  if (len > 7)
1731  buffer[7] = 0x0a; /* Additional Sense Length */
1732  if (len > 12)
1733  buffer[12] = asc; /* ASC */
1734  if (len > 13)
1735  buffer[13] = ascq; /* ASCQ */
1736  }
1737 
1738  return;
1739 }
1740 
1741 void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
1742  u8 key, u8 asc, u8 asc_q)
1743 {
1744  iu->datapres = 2;
1745  iu->response_data_len = 0;
1746  iu->sense_data_len = 17;
1747  iu->status = 02;
1748  mvs_set_sense(iu->sense_data, 17, 0,
1749  key, asc, asc_q);
1750 }
1751 
1752 static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1753  u32 slot_idx)
1754 {
1755  struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1756  int stat;
1757  u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response);
1758  u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1));
1759  u32 tfs = 0;
1761 
1762  if (err_dw0 & CMD_ISS_STPD)
1763  MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
1764 
1765  MVS_CHIP_DISP->command_active(mvi, slot_idx);
1766 
1767  stat = SAM_STAT_CHECK_CONDITION;
1768  switch (task->task_proto) {
1769  case SAS_PROTOCOL_SSP:
1770  {
1771  stat = SAS_ABORTED_TASK;
1772  if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) {
1773  struct ssp_response_iu *iu = slot->response +
1774  sizeof(struct mvs_err_info);
1775  mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01);
1776  sas_ssp_task_response(mvi->dev, task, iu);
1777  stat = SAM_STAT_CHECK_CONDITION;
1778  }
1779  if (err_dw1 & bit(31))
1780  mv_printk("reuse same slot, retry command.\n");
1781  break;
1782  }
1783  case SAS_PROTOCOL_SMP:
1784  stat = SAM_STAT_CHECK_CONDITION;
1785  break;
1786 
1787  case SAS_PROTOCOL_SATA:
1788  case SAS_PROTOCOL_STP:
1790  {
1791  task->ata_task.use_ncq = 0;
1792  stat = SAS_PROTO_RESPONSE;
1793  mvs_sata_done(mvi, task, slot_idx, err_dw0);
1794  }
1795  break;
1796  default:
1797  break;
1798  }
1799 
1800  return stat;
1801 }
1802 
1803 int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1804 {
1805  u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1806  struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1807  struct sas_task *task = slot->task;
1808  struct mvs_device *mvi_dev = NULL;
1809  struct task_status_struct *tstat;
1810  struct domain_device *dev;
1811  u32 aborted;
1812 
1813  void *to;
1814  enum exec_status sts;
1815 
1816  if (unlikely(!task || !task->lldd_task || !task->dev))
1817  return -1;
1818 
1819  tstat = &task->task_status;
1820  dev = task->dev;
1821  mvi_dev = dev->lldd_dev;
1822 
1823  spin_lock(&task->task_state_lock);
1824  task->task_state_flags &=
1827  /* race condition*/
1828  aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1829  spin_unlock(&task->task_state_lock);
1830 
1831  memset(tstat, 0, sizeof(*tstat));
1832  tstat->resp = SAS_TASK_COMPLETE;
1833 
1834  if (unlikely(aborted)) {
1835  tstat->stat = SAS_ABORTED_TASK;
1836  if (mvi_dev && mvi_dev->running_req)
1837  mvi_dev->running_req--;
1838  if (sas_protocol_ata(task->task_proto))
1839  mvs_free_reg_set(mvi, mvi_dev);
1840 
1841  mvs_slot_task_free(mvi, task, slot, slot_idx);
1842  return -1;
1843  }
1844 
1845  /* when no device attaching, go ahead and complete by error handling*/
1846  if (unlikely(!mvi_dev || flags)) {
1847  if (!mvi_dev)
1848  mv_dprintk("port has not device.\n");
1849  tstat->stat = SAS_PHY_DOWN;
1850  goto out;
1851  }
1852 
1853  /* error info record present */
1854  if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1855  mv_dprintk("port %d slot %d rx_desc %X has error info"
1856  "%016llX.\n", slot->port->sas_port.id, slot_idx,
1857  rx_desc, (u64)(*(u64 *)slot->response));
1858  tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1859  tstat->resp = SAS_TASK_COMPLETE;
1860  goto out;
1861  }
1862 
1863  switch (task->task_proto) {
1864  case SAS_PROTOCOL_SSP:
1865  /* hw says status == 0, datapres == 0 */
1866  if (rx_desc & RXQ_GOOD) {
1867  tstat->stat = SAM_STAT_GOOD;
1868  tstat->resp = SAS_TASK_COMPLETE;
1869  }
1870  /* response frame present */
1871  else if (rx_desc & RXQ_RSP) {
1872  struct ssp_response_iu *iu = slot->response +
1873  sizeof(struct mvs_err_info);
1874  sas_ssp_task_response(mvi->dev, task, iu);
1875  } else
1876  tstat->stat = SAM_STAT_CHECK_CONDITION;
1877  break;
1878 
1879  case SAS_PROTOCOL_SMP: {
1880  struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1881  tstat->stat = SAM_STAT_GOOD;
1882  to = kmap_atomic(sg_page(sg_resp));
1883  memcpy(to + sg_resp->offset,
1884  slot->response + sizeof(struct mvs_err_info),
1885  sg_dma_len(sg_resp));
1886  kunmap_atomic(to);
1887  break;
1888  }
1889 
1890  case SAS_PROTOCOL_SATA:
1891  case SAS_PROTOCOL_STP:
1893  tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
1894  break;
1895  }
1896 
1897  default:
1898  tstat->stat = SAM_STAT_CHECK_CONDITION;
1899  break;
1900  }
1901  if (!slot->port->port_attached) {
1902  mv_dprintk("port %d has removed.\n", slot->port->sas_port.id);
1903  tstat->stat = SAS_PHY_DOWN;
1904  }
1905 
1906 
1907 out:
1908  if (mvi_dev && mvi_dev->running_req) {
1909  mvi_dev->running_req--;
1910  if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req)
1911  mvs_free_reg_set(mvi, mvi_dev);
1912  }
1913  mvs_slot_task_free(mvi, task, slot, slot_idx);
1914  sts = tstat->stat;
1915 
1916  spin_unlock(&mvi->lock);
1917  if (task->task_done)
1918  task->task_done(task);
1919 
1920  spin_lock(&mvi->lock);
1921 
1922  return sts;
1923 }
1924 
1926  int phy_no, struct domain_device *dev)
1927 {
1928  u32 slot_idx;
1929  struct mvs_phy *phy;
1930  struct mvs_port *port;
1931  struct mvs_slot_info *slot, *slot2;
1932 
1933  phy = &mvi->phy[phy_no];
1934  port = phy->port;
1935  if (!port)
1936  return;
1937  /* clean cmpl queue in case request is already finished */
1938  mvs_int_rx(mvi, false);
1939 
1940 
1941 
1942  list_for_each_entry_safe(slot, slot2, &port->list, entry) {
1943  struct sas_task *task;
1944  slot_idx = (u32) (slot - mvi->slot_info);
1945  task = slot->task;
1946 
1947  if (dev && task->dev != dev)
1948  continue;
1949 
1950  mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
1951  slot_idx, slot->slot_tag, task);
1952  MVS_CHIP_DISP->command_active(mvi, slot_idx);
1953 
1954  mvs_slot_complete(mvi, slot_idx, 1);
1955  }
1956 }
1957 
1958 void mvs_release_task(struct mvs_info *mvi,
1959  struct domain_device *dev)
1960 {
1961  int i, phyno[WIDE_PORT_MAX_PHY], num;
1962  num = mvs_find_dev_phyno(dev, phyno);
1963  for (i = 0; i < num; i++)
1964  mvs_do_release_task(mvi, phyno[i], dev);
1965 }
1966 
1967 static void mvs_phy_disconnected(struct mvs_phy *phy)
1968 {
1969  phy->phy_attached = 0;
1970  phy->att_dev_info = 0;
1971  phy->att_dev_sas_addr = 0;
1972 }
1973 
1974 static void mvs_work_queue(struct work_struct *work)
1975 {
1976  struct delayed_work *dw = container_of(work, struct delayed_work, work);
1977  struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
1978  struct mvs_info *mvi = mwq->mvi;
1979  unsigned long flags;
1980  u32 phy_no = (unsigned long) mwq->data;
1981  struct sas_ha_struct *sas_ha = mvi->sas;
1982  struct mvs_phy *phy = &mvi->phy[phy_no];
1983  struct asd_sas_phy *sas_phy = &phy->sas_phy;
1984 
1985  spin_lock_irqsave(&mvi->lock, flags);
1986  if (mwq->handler & PHY_PLUG_EVENT) {
1987 
1988  if (phy->phy_event & PHY_PLUG_OUT) {
1989  u32 tmp;
1990  struct sas_identify_frame *id;
1991  id = (struct sas_identify_frame *)phy->frame_rcvd;
1992  tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
1993  phy->phy_event &= ~PHY_PLUG_OUT;
1994  if (!(tmp & PHY_READY_MASK)) {
1995  sas_phy_disconnected(sas_phy);
1996  mvs_phy_disconnected(phy);
1997  sas_ha->notify_phy_event(sas_phy,
1999  mv_dprintk("phy%d Removed Device\n", phy_no);
2000  } else {
2001  MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
2002  mvs_update_phyinfo(mvi, phy_no, 1);
2003  mvs_bytes_dmaed(mvi, phy_no);
2004  mvs_port_notify_formed(sas_phy, 0);
2005  mv_dprintk("phy%d Attached Device\n", phy_no);
2006  }
2007  }
2008  } else if (mwq->handler & EXP_BRCT_CHG) {
2009  phy->phy_event &= ~EXP_BRCT_CHG;
2010  sas_ha->notify_port_event(sas_phy,
2012  mv_dprintk("phy%d Got Broadcast Change\n", phy_no);
2013  }
2014  list_del(&mwq->entry);
2015  spin_unlock_irqrestore(&mvi->lock, flags);
2016  kfree(mwq);
2017 }
2018 
2019 static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
2020 {
2021  struct mvs_wq *mwq;
2022  int ret = 0;
2023 
2024  mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
2025  if (mwq) {
2026  mwq->mvi = mvi;
2027  mwq->data = data;
2028  mwq->handler = handler;
2029  MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
2030  list_add_tail(&mwq->entry, &mvi->wq_list);
2031  schedule_delayed_work(&mwq->work_q, HZ * 2);
2032  } else
2033  ret = -ENOMEM;
2034 
2035  return ret;
2036 }
2037 
2038 static void mvs_sig_time_out(unsigned long tphy)
2039 {
2040  struct mvs_phy *phy = (struct mvs_phy *)tphy;
2041  struct mvs_info *mvi = phy->mvi;
2042  u8 phy_no;
2043 
2044  for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
2045  if (&mvi->phy[phy_no] == phy) {
2046  mv_dprintk("Get signature time out, reset phy %d\n",
2047  phy_no+mvi->id*mvi->chip->n_phy);
2048  MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET);
2049  }
2050  }
2051 }
2052 
2053 void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2054 {
2055  u32 tmp;
2056  struct mvs_phy *phy = &mvi->phy[phy_no];
2057 
2058  phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
2059  MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
2060  mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy,
2061  MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
2062  mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy,
2063  phy->irq_status);
2064 
2065  /*
2066  * events is port event now ,
2067  * we need check the interrupt status which belongs to per port.
2068  */
2069 
2070  if (phy->irq_status & PHYEV_DCDR_ERR) {
2071  mv_dprintk("phy %d STP decoding error.\n",
2072  phy_no + mvi->id*mvi->chip->n_phy);
2073  }
2074 
2075  if (phy->irq_status & PHYEV_POOF) {
2076  mdelay(500);
2077  if (!(phy->phy_event & PHY_PLUG_OUT)) {
2078  int dev_sata = phy->phy_type & PORT_TYPE_SATA;
2079  int ready;
2080  mvs_do_release_task(mvi, phy_no, NULL);
2081  phy->phy_event |= PHY_PLUG_OUT;
2082  MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1);
2083  mvs_handle_event(mvi,
2084  (void *)(unsigned long)phy_no,
2085  PHY_PLUG_EVENT);
2086  ready = mvs_is_phy_ready(mvi, phy_no);
2087  if (ready || dev_sata) {
2088  if (MVS_CHIP_DISP->stp_reset)
2089  MVS_CHIP_DISP->stp_reset(mvi,
2090  phy_no);
2091  else
2092  MVS_CHIP_DISP->phy_reset(mvi,
2093  phy_no, MVS_SOFT_RESET);
2094  return;
2095  }
2096  }
2097  }
2098 
2099  if (phy->irq_status & PHYEV_COMWAKE) {
2100  tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
2101  MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
2102  tmp | PHYEV_SIG_FIS);
2103  if (phy->timer.function == NULL) {
2104  phy->timer.data = (unsigned long)phy;
2105  phy->timer.function = mvs_sig_time_out;
2106  phy->timer.expires = jiffies + 5*HZ;
2107  add_timer(&phy->timer);
2108  }
2109  }
2110  if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
2111  phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
2112  mv_dprintk("notify plug in on phy[%d]\n", phy_no);
2113  if (phy->phy_status) {
2114  mdelay(10);
2115  MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
2116  if (phy->phy_type & PORT_TYPE_SATA) {
2117  tmp = MVS_CHIP_DISP->read_port_irq_mask(
2118  mvi, phy_no);
2119  tmp &= ~PHYEV_SIG_FIS;
2120  MVS_CHIP_DISP->write_port_irq_mask(mvi,
2121  phy_no, tmp);
2122  }
2123  mvs_update_phyinfo(mvi, phy_no, 0);
2124  if (phy->phy_type & PORT_TYPE_SAS) {
2125  MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE);
2126  mdelay(10);
2127  }
2128 
2129  mvs_bytes_dmaed(mvi, phy_no);
2130  /* whether driver is going to handle hot plug */
2131  if (phy->phy_event & PHY_PLUG_OUT) {
2132  mvs_port_notify_formed(&phy->sas_phy, 0);
2133  phy->phy_event &= ~PHY_PLUG_OUT;
2134  }
2135  } else {
2136  mv_dprintk("plugin interrupt but phy%d is gone\n",
2137  phy_no + mvi->id*mvi->chip->n_phy);
2138  }
2139  } else if (phy->irq_status & PHYEV_BROAD_CH) {
2140  mv_dprintk("phy %d broadcast change.\n",
2141  phy_no + mvi->id*mvi->chip->n_phy);
2142  mvs_handle_event(mvi, (void *)(unsigned long)phy_no,
2143  EXP_BRCT_CHG);
2144  }
2145 }
2146 
2147 int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
2148 {
2149  u32 rx_prod_idx, rx_desc;
2150  bool attn = false;
2151 
2152  /* the first dword in the RX ring is special: it contains
2153  * a mirror of the hardware's RX producer index, so that
2154  * we don't have to stall the CPU reading that register.
2155  * The actual RX ring is offset by one dword, due to this.
2156  */
2157  rx_prod_idx = mvi->rx_cons;
2158  mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
2159  if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
2160  return 0;
2161 
2162  /* The CMPL_Q may come late, read from register and try again
2163  * note: if coalescing is enabled,
2164  * it will need to read from register every time for sure
2165  */
2166  if (unlikely(mvi->rx_cons == rx_prod_idx))
2167  mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
2168 
2169  if (mvi->rx_cons == rx_prod_idx)
2170  return 0;
2171 
2172  while (mvi->rx_cons != rx_prod_idx) {
2173  /* increment our internal RX consumer pointer */
2174  rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
2175  rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
2176 
2177  if (likely(rx_desc & RXQ_DONE))
2178  mvs_slot_complete(mvi, rx_desc, 0);
2179  if (rx_desc & RXQ_ATTN) {
2180  attn = true;
2181  } else if (rx_desc & RXQ_ERR) {
2182  if (!(rx_desc & RXQ_DONE))
2183  mvs_slot_complete(mvi, rx_desc, 0);
2184  } else if (rx_desc & RXQ_SLOT_RESET) {
2185  mvs_slot_free(mvi, rx_desc);
2186  }
2187  }
2188 
2189  if (attn && self_clear)
2190  MVS_CHIP_DISP->int_full(mvi);
2191  return 0;
2192 }
2193