Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
libata-eh.c
Go to the documentation of this file.
1 /*
2  * libata-eh.c - libata error handling
3  *
4  * Maintained by: Jeff Garzik <[email protected]>
5  * Please ALWAYS copy [email protected]
6  * on emails.
7  *
8  * Copyright 2006 Tejun Heo <[email protected]>
9  *
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License as
13  * published by the Free Software Foundation; either version 2, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; see the file COPYING. If not, write to
23  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24  * USA.
25  *
26  *
27  * libata documentation is available via 'make {ps|pdf}docs',
28  * as Documentation/DocBook/libata.*
29  *
30  * Hardware documentation available from http://www.t13.org/ and
31  * http://www.sata-io.org/
32  *
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/blkdev.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi_eh.h>
42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_dbg.h>
45 #include "../scsi/scsi_transport_api.h"
46 
47 #include <linux/libata.h>
48 
49 #include "libata.h"
50 
51 enum {
52  /* speed down verdicts */
53  ATA_EH_SPDN_NCQ_OFF = (1 << 0),
57 
58  /* error flags */
59  ATA_EFLAG_IS_IO = (1 << 0),
61  ATA_EFLAG_OLD_ER = (1 << 31),
62 
63  /* error categories */
73 
75 
76  /* always put at least this amount of time between resets */
78 
79  /* Waiting in ->prereset can never be reliable. It's
80  * sometimes nice to wait there but it can't be depended upon;
81  * otherwise, we wouldn't be resetting. Just give it enough
82  * time for most drives to spin up.
83  */
86 
88 
89  /* probe speed down parameters, see ata_eh_schedule_probe() */
90  ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */
92 };
93 
94 /* The following table determines how we sequence resets. Each entry
95  * represents timeout for that try. The first try can be soft or
96  * hardreset. All others are hardreset if available. In most cases
97  * the first reset w/ 10sec timeout should succeed. Following entries
98  * are mostly for error handling, hotplug and retarded devices.
99  */
100 static const unsigned long ata_eh_reset_timeouts[] = {
101  10000, /* most drives spin up by 10sec */
102  10000, /* > 99% working drives spin up before 20sec */
103  35000, /* give > 30 secs of idleness for retarded devices */
104  5000, /* and sweet one last chance */
105  ULONG_MAX, /* > 1 min has elapsed, give up */
106 };
107 
108 static const unsigned long ata_eh_identify_timeouts[] = {
109  5000, /* covers > 99% of successes and not too boring on failures */
110  10000, /* combined time till here is enough even for media access */
111  30000, /* for true idiots */
112  ULONG_MAX,
113 };
114 
115 static const unsigned long ata_eh_flush_timeouts[] = {
116  15000, /* be generous with flush */
117  15000, /* ditto */
118  30000, /* and even more generous */
119  ULONG_MAX,
120 };
121 
122 static const unsigned long ata_eh_other_timeouts[] = {
123  5000, /* same rationale as identify timeout */
124  10000, /* ditto */
125  /* but no merciful 30sec for other commands, it just isn't worth it */
126  ULONG_MAX,
127 };
128 
130  const u8 *commands;
131  const unsigned long *timeouts;
132 };
133 
134 /* The following table determines timeouts to use for EH internal
135  * commands. Each table entry is a command class and matches the
136  * commands the entry applies to and the timeout table to use.
137  *
138  * On the retry after a command timed out, the next timeout value from
139  * the table is used. If the table doesn't contain further entries,
140  * the last value is used.
141  *
142  * ehc->cmd_timeout_idx keeps track of which timeout to use per
143  * command class, so if SET_FEATURES times out on the first try, the
144  * next try will use the second timeout value only for that class.
145  */
146 #define CMDS(cmds...) (const u8 []){ cmds, 0 }
147 static const struct ata_eh_cmd_timeout_ent
148 ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
149  { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
150  .timeouts = ata_eh_identify_timeouts, },
152  .timeouts = ata_eh_other_timeouts, },
153  { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
154  .timeouts = ata_eh_other_timeouts, },
155  { .commands = CMDS(ATA_CMD_SET_FEATURES),
156  .timeouts = ata_eh_other_timeouts, },
157  { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
158  .timeouts = ata_eh_other_timeouts, },
159  { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
160  .timeouts = ata_eh_flush_timeouts },
161 };
162 #undef CMDS
163 
164 static void __ata_port_freeze(struct ata_port *ap);
165 #ifdef CONFIG_PM
166 static void ata_eh_handle_port_suspend(struct ata_port *ap);
167 static void ata_eh_handle_port_resume(struct ata_port *ap);
168 #else /* CONFIG_PM */
169 static void ata_eh_handle_port_suspend(struct ata_port *ap)
170 { }
171 
172 static void ata_eh_handle_port_resume(struct ata_port *ap)
173 { }
174 #endif /* CONFIG_PM */
175 
176 static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
177  va_list args)
178 {
179  ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
180  ATA_EH_DESC_LEN - ehi->desc_len,
181  fmt, args);
182 }
183 
194 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
195 {
196  va_list args;
197 
198  va_start(args, fmt);
199  __ata_ehi_pushv_desc(ehi, fmt, args);
200  va_end(args);
201 }
202 
214 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
215 {
216  va_list args;
217 
218  if (ehi->desc_len)
219  __ata_ehi_push_desc(ehi, ", ");
220 
221  va_start(args, fmt);
222  __ata_ehi_pushv_desc(ehi, fmt, args);
223  va_end(args);
224 }
225 
236 {
237  ehi->desc[0] = '\0';
238  ehi->desc_len = 0;
239 }
240 
254 void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
255 {
256  va_list args;
257 
259 
260  if (ap->link.eh_info.desc_len)
261  __ata_ehi_push_desc(&ap->link.eh_info, " ");
262 
263  va_start(args, fmt);
264  __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
265  va_end(args);
266 }
267 
268 #ifdef CONFIG_PCI
269 
285 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
286  const char *name)
287 {
288  struct pci_dev *pdev = to_pci_dev(ap->host->dev);
289  char *type = "";
290  unsigned long long start, len;
291 
292  if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
293  type = "m";
294  else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
295  type = "i";
296 
297  start = (unsigned long long)pci_resource_start(pdev, bar);
298  len = (unsigned long long)pci_resource_len(pdev, bar);
299 
300  if (offset < 0)
301  ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
302  else
303  ata_port_desc(ap, "%s 0x%llx", name,
304  start + (unsigned long long)offset);
305 }
306 
307 #endif /* CONFIG_PCI */
308 
309 static int ata_lookup_timeout_table(u8 cmd)
310 {
311  int i;
312 
313  for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
314  const u8 *cur;
315 
316  for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
317  if (*cur == cmd)
318  return i;
319  }
320 
321  return -1;
322 }
323 
337 unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
338 {
339  struct ata_eh_context *ehc = &dev->link->eh_context;
340  int ent = ata_lookup_timeout_table(cmd);
341  int idx;
342 
343  if (ent < 0)
344  return ATA_EH_CMD_DFL_TIMEOUT;
345 
346  idx = ehc->cmd_timeout_idx[dev->devno][ent];
347  return ata_eh_cmd_timeout_table[ent].timeouts[idx];
348 }
349 
363 {
364  struct ata_eh_context *ehc = &dev->link->eh_context;
365  int ent = ata_lookup_timeout_table(cmd);
366  int idx;
367 
368  if (ent < 0)
369  return;
370 
371  idx = ehc->cmd_timeout_idx[dev->devno][ent];
372  if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
373  ehc->cmd_timeout_idx[dev->devno][ent]++;
374 }
375 
376 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
377  unsigned int err_mask)
378 {
379  struct ata_ering_entry *ent;
380 
381  WARN_ON(!err_mask);
382 
383  ering->cursor++;
384  ering->cursor %= ATA_ERING_SIZE;
385 
386  ent = &ering->ring[ering->cursor];
387  ent->eflags = eflags;
388  ent->err_mask = err_mask;
389  ent->timestamp = get_jiffies_64();
390 }
391 
392 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
393 {
394  struct ata_ering_entry *ent = &ering->ring[ering->cursor];
395 
396  if (ent->err_mask)
397  return ent;
398  return NULL;
399 }
400 
401 int ata_ering_map(struct ata_ering *ering,
402  int (*map_fn)(struct ata_ering_entry *, void *),
403  void *arg)
404 {
405  int idx, rc = 0;
406  struct ata_ering_entry *ent;
407 
408  idx = ering->cursor;
409  do {
410  ent = &ering->ring[idx];
411  if (!ent->err_mask)
412  break;
413  rc = map_fn(ent, arg);
414  if (rc)
415  break;
416  idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
417  } while (idx != ering->cursor);
418 
419  return rc;
420 }
421 
422 static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
423 {
424  ent->eflags |= ATA_EFLAG_OLD_ER;
425  return 0;
426 }
427 
428 static void ata_ering_clear(struct ata_ering *ering)
429 {
430  ata_ering_map(ering, ata_ering_clear_cb, NULL);
431 }
432 
433 static unsigned int ata_eh_dev_action(struct ata_device *dev)
434 {
435  struct ata_eh_context *ehc = &dev->link->eh_context;
436 
437  return ehc->i.action | ehc->i.dev_action[dev->devno];
438 }
439 
440 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
441  struct ata_eh_info *ehi, unsigned int action)
442 {
443  struct ata_device *tdev;
444 
445  if (!dev) {
446  ehi->action &= ~action;
447  ata_for_each_dev(tdev, link, ALL)
448  ehi->dev_action[tdev->devno] &= ~action;
449  } else {
450  /* doesn't make sense for port-wide EH actions */
451  WARN_ON(!(action & ATA_EH_PERDEV_MASK));
452 
453  /* break ehi->action into ehi->dev_action */
454  if (ehi->action & action) {
455  ata_for_each_dev(tdev, link, ALL)
456  ehi->dev_action[tdev->devno] |=
457  ehi->action & action;
458  ehi->action &= ~action;
459  }
460 
461  /* turn off the specified per-dev action */
462  ehi->dev_action[dev->devno] &= ~action;
463  }
464 }
465 
478 {
479  mutex_lock(&ap->host->eh_mutex);
480  WARN_ON_ONCE(ap->host->eh_owner);
481  ap->host->eh_owner = current;
482 }
483 
494 void ata_eh_release(struct ata_port *ap)
495 {
496  WARN_ON_ONCE(ap->host->eh_owner != current);
497  ap->host->eh_owner = NULL;
498  mutex_unlock(&ap->host->eh_mutex);
499 }
500 
520 enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
521 {
522  struct Scsi_Host *host = cmd->device->host;
523  struct ata_port *ap = ata_shost_to_port(host);
524  unsigned long flags;
525  struct ata_queued_cmd *qc;
526  enum blk_eh_timer_return ret;
527 
528  DPRINTK("ENTER\n");
529 
530  if (ap->ops->error_handler) {
531  ret = BLK_EH_NOT_HANDLED;
532  goto out;
533  }
534 
535  ret = BLK_EH_HANDLED;
536  spin_lock_irqsave(ap->lock, flags);
537  qc = ata_qc_from_tag(ap, ap->link.active_tag);
538  if (qc) {
539  WARN_ON(qc->scsicmd != cmd);
541  qc->err_mask |= AC_ERR_TIMEOUT;
542  ret = BLK_EH_NOT_HANDLED;
543  }
544  spin_unlock_irqrestore(ap->lock, flags);
545 
546  out:
547  DPRINTK("EXIT, ret=%d\n", ret);
548  return ret;
549 }
550 
551 static void ata_eh_unload(struct ata_port *ap)
552 {
553  struct ata_link *link;
554  struct ata_device *dev;
555  unsigned long flags;
556 
557  /* Restore SControl IPM and SPD for the next driver and
558  * disable attached devices.
559  */
560  ata_for_each_link(link, ap, PMP_FIRST) {
561  sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
562  ata_for_each_dev(dev, link, ALL)
563  ata_dev_disable(dev);
564  }
565 
566  /* freeze and set UNLOADED */
567  spin_lock_irqsave(ap->lock, flags);
568 
569  ata_port_freeze(ap); /* won't be thawed */
570  ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */
571  ap->pflags |= ATA_PFLAG_UNLOADED;
572 
573  spin_unlock_irqrestore(ap->lock, flags);
574 }
575 
589 {
590  struct ata_port *ap = ata_shost_to_port(host);
591  unsigned long flags;
592  LIST_HEAD(eh_work_q);
593 
594  DPRINTK("ENTER\n");
595 
596  spin_lock_irqsave(host->host_lock, flags);
597  list_splice_init(&host->eh_cmd_q, &eh_work_q);
598  spin_unlock_irqrestore(host->host_lock, flags);
599 
600  ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
601 
602  /* If we timed raced normal completion and there is nothing to
603  recover nr_timedout == 0 why exactly are we doing error recovery ? */
604  ata_scsi_port_error_handler(host, ap);
605 
606  /* finish or retry handled scmd's and clean up */
607  WARN_ON(host->host_failed || !list_empty(&eh_work_q));
608 
609  DPRINTK("EXIT\n");
610 }
611 
623  struct list_head *eh_work_q)
624 {
625  int i;
626  unsigned long flags;
627 
628  /* make sure sff pio task is not running */
630 
631  /* synchronize with host lock and sort out timeouts */
632 
633  /* For new EH, all qcs are finished in one of three ways -
634  * normal completion, error completion, and SCSI timeout.
635  * Both completions can race against SCSI timeout. When normal
636  * completion wins, the qc never reaches EH. When error
637  * completion wins, the qc has ATA_QCFLAG_FAILED set.
638  *
639  * When SCSI timeout wins, things are a bit more complex.
640  * Normal or error completion can occur after the timeout but
641  * before this point. In such cases, both types of
642  * completions are honored. A scmd is determined to have
643  * timed out iff its associated qc is active and not failed.
644  */
645  if (ap->ops->error_handler) {
646  struct scsi_cmnd *scmd, *tmp;
647  int nr_timedout = 0;
648 
649  spin_lock_irqsave(ap->lock, flags);
650 
651  /* This must occur under the ap->lock as we don't want
652  a polled recovery to race the real interrupt handler
653 
654  The lost_interrupt handler checks for any completed but
655  non-notified command and completes much like an IRQ handler.
656 
657  We then fall into the error recovery code which will treat
658  this as if normal completion won the race */
659 
660  if (ap->ops->lost_interrupt)
661  ap->ops->lost_interrupt(ap);
662 
663  list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
664  struct ata_queued_cmd *qc;
665 
666  for (i = 0; i < ATA_MAX_QUEUE; i++) {
667  qc = __ata_qc_from_tag(ap, i);
668  if (qc->flags & ATA_QCFLAG_ACTIVE &&
669  qc->scsicmd == scmd)
670  break;
671  }
672 
673  if (i < ATA_MAX_QUEUE) {
674  /* the scmd has an associated qc */
675  if (!(qc->flags & ATA_QCFLAG_FAILED)) {
676  /* which hasn't failed yet, timeout */
677  qc->err_mask |= AC_ERR_TIMEOUT;
678  qc->flags |= ATA_QCFLAG_FAILED;
679  nr_timedout++;
680  }
681  } else {
682  /* Normal completion occurred after
683  * SCSI timeout but before this point.
684  * Successfully complete it.
685  */
686  scmd->retries = scmd->allowed;
687  scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
688  }
689  }
690 
691  /* If we have timed out qcs. They belong to EH from
692  * this point but the state of the controller is
693  * unknown. Freeze the port to make sure the IRQ
694  * handler doesn't diddle with those qcs. This must
695  * be done atomically w.r.t. setting QCFLAG_FAILED.
696  */
697  if (nr_timedout)
698  __ata_port_freeze(ap);
699 
700  spin_unlock_irqrestore(ap->lock, flags);
701 
702  /* initialize eh_tries */
704  } else
705  spin_unlock_wait(ap->lock);
706 
707 }
709 
719 {
720  unsigned long flags;
721 
722  /* invoke error handler */
723  if (ap->ops->error_handler) {
724  struct ata_link *link;
725 
726  /* acquire EH ownership */
727  ata_eh_acquire(ap);
728  repeat:
729  /* kill fast drain timer */
731 
732  /* process port resume request */
733  ata_eh_handle_port_resume(ap);
734 
735  /* fetch & clear EH info */
736  spin_lock_irqsave(ap->lock, flags);
737 
738  ata_for_each_link(link, ap, HOST_FIRST) {
739  struct ata_eh_context *ehc = &link->eh_context;
740  struct ata_device *dev;
741 
742  memset(&link->eh_context, 0, sizeof(link->eh_context));
743  link->eh_context.i = link->eh_info;
744  memset(&link->eh_info, 0, sizeof(link->eh_info));
745 
746  ata_for_each_dev(dev, link, ENABLED) {
747  int devno = dev->devno;
748 
749  ehc->saved_xfer_mode[devno] = dev->xfer_mode;
750  if (ata_ncq_enabled(dev))
751  ehc->saved_ncq_enabled |= 1 << devno;
752  }
753  }
754 
757  ap->excl_link = NULL; /* don't maintain exclusion over EH */
758 
759  spin_unlock_irqrestore(ap->lock, flags);
760 
761  /* invoke EH, skip if unloading or suspended */
763  ap->ops->error_handler(ap);
764  else {
765  /* if unloading, commence suicide */
766  if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
767  !(ap->pflags & ATA_PFLAG_UNLOADED))
768  ata_eh_unload(ap);
769  ata_eh_finish(ap);
770  }
771 
772  /* process port suspend request */
773  ata_eh_handle_port_suspend(ap);
774 
775  /* Exception might have happened after ->error_handler
776  * recovered the port but before this point. Repeat
777  * EH in such case.
778  */
779  spin_lock_irqsave(ap->lock, flags);
780 
781  if (ap->pflags & ATA_PFLAG_EH_PENDING) {
782  if (--ap->eh_tries) {
783  spin_unlock_irqrestore(ap->lock, flags);
784  goto repeat;
785  }
786  ata_port_err(ap,
787  "EH pending after %d tries, giving up\n",
790  }
791 
792  /* this run is complete, make sure EH info is clear */
793  ata_for_each_link(link, ap, HOST_FIRST)
794  memset(&link->eh_info, 0, sizeof(link->eh_info));
795 
796  /* end eh (clear host_eh_scheduled) while holding
797  * ap->lock such that if exception occurs after this
798  * point but before EH completion, SCSI midlayer will
799  * re-initiate EH.
800  */
801  ap->ops->end_eh(ap);
802 
803  spin_unlock_irqrestore(ap->lock, flags);
804  ata_eh_release(ap);
805  } else {
806  WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
807  ap->ops->eng_timeout(ap);
808  }
809 
811 
812  /* clean up */
813  spin_lock_irqsave(ap->lock, flags);
814 
815  if (ap->pflags & ATA_PFLAG_LOADING)
816  ap->pflags &= ~ATA_PFLAG_LOADING;
817  else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
819 
820  if (ap->pflags & ATA_PFLAG_RECOVERED)
821  ata_port_info(ap, "EH complete\n");
822 
824 
825  /* tell wait_eh that we're done */
827  wake_up_all(&ap->eh_wait_q);
828 
829  spin_unlock_irqrestore(ap->lock, flags);
830 }
832 
842 void ata_port_wait_eh(struct ata_port *ap)
843 {
844  unsigned long flags;
845  DEFINE_WAIT(wait);
846 
847  retry:
848  spin_lock_irqsave(ap->lock, flags);
849 
852  spin_unlock_irqrestore(ap->lock, flags);
853  schedule();
854  spin_lock_irqsave(ap->lock, flags);
855  }
856  finish_wait(&ap->eh_wait_q, &wait);
857 
858  spin_unlock_irqrestore(ap->lock, flags);
859 
860  /* make sure SCSI EH is complete */
861  if (scsi_host_in_recovery(ap->scsi_host)) {
862  ata_msleep(ap, 10);
863  goto retry;
864  }
865 }
867 
868 static int ata_eh_nr_in_flight(struct ata_port *ap)
869 {
870  unsigned int tag;
871  int nr = 0;
872 
873  /* count only non-internal commands */
874  for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
875  if (ata_qc_from_tag(ap, tag))
876  nr++;
877 
878  return nr;
879 }
880 
881 void ata_eh_fastdrain_timerfn(unsigned long arg)
882 {
883  struct ata_port *ap = (void *)arg;
884  unsigned long flags;
885  int cnt;
886 
887  spin_lock_irqsave(ap->lock, flags);
888 
889  cnt = ata_eh_nr_in_flight(ap);
890 
891  /* are we done? */
892  if (!cnt)
893  goto out_unlock;
894 
895  if (cnt == ap->fastdrain_cnt) {
896  unsigned int tag;
897 
898  /* No progress during the last interval, tag all
899  * in-flight qcs as timed out and freeze the port.
900  */
901  for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
902  struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
903  if (qc)
904  qc->err_mask |= AC_ERR_TIMEOUT;
905  }
906 
907  ata_port_freeze(ap);
908  } else {
909  /* some qcs have finished, give it another chance */
910  ap->fastdrain_cnt = cnt;
911  ap->fastdrain_timer.expires =
912  ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
914  }
915 
916  out_unlock:
917  spin_unlock_irqrestore(ap->lock, flags);
918 }
919 
932 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
933 {
934  int cnt;
935 
936  /* already scheduled? */
937  if (ap->pflags & ATA_PFLAG_EH_PENDING)
938  return;
939 
941 
942  if (!fastdrain)
943  return;
944 
945  /* do we have in-flight qcs? */
946  cnt = ata_eh_nr_in_flight(ap);
947  if (!cnt)
948  return;
949 
950  /* activate fast drain */
951  ap->fastdrain_cnt = cnt;
952  ap->fastdrain_timer.expires =
953  ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
955 }
956 
968 {
969  struct ata_port *ap = qc->ap;
970  struct request_queue *q = qc->scsicmd->device->request_queue;
971  unsigned long flags;
972 
973  WARN_ON(!ap->ops->error_handler);
974 
975  qc->flags |= ATA_QCFLAG_FAILED;
976  ata_eh_set_pending(ap, 1);
977 
978  /* The following will fail if timeout has already expired.
979  * ata_scsi_error() takes care of such scmds on EH entry.
980  * Note that ATA_QCFLAG_FAILED is unconditionally set after
981  * this function completes.
982  */
983  spin_lock_irqsave(q->queue_lock, flags);
984  blk_abort_request(qc->scsicmd->request);
985  spin_unlock_irqrestore(q->queue_lock, flags);
986 }
987 
995 void ata_std_sched_eh(struct ata_port *ap)
996 {
997  WARN_ON(!ap->ops->error_handler);
998 
999  if (ap->pflags & ATA_PFLAG_INITIALIZING)
1000  return;
1001 
1002  ata_eh_set_pending(ap, 1);
1004 
1005  DPRINTK("port EH scheduled\n");
1006 }
1008 
1021 void ata_std_end_eh(struct ata_port *ap)
1022 {
1023  struct Scsi_Host *host = ap->scsi_host;
1024 
1025  host->host_eh_scheduled = 0;
1026 }
1028 
1029 
1041 {
1042  /* see: ata_std_sched_eh, unless you know better */
1043  ap->ops->sched_eh(ap);
1044 }
1045 
1046 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
1047 {
1048  int tag, nr_aborted = 0;
1049 
1050  WARN_ON(!ap->ops->error_handler);
1051 
1052  /* we're gonna abort all commands, no need for fast drain */
1053  ata_eh_set_pending(ap, 0);
1054 
1055  for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1056  struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1057 
1058  if (qc && (!link || qc->dev->link == link)) {
1059  qc->flags |= ATA_QCFLAG_FAILED;
1060  ata_qc_complete(qc);
1061  nr_aborted++;
1062  }
1063  }
1064 
1065  if (!nr_aborted)
1067 
1068  return nr_aborted;
1069 }
1070 
1083 int ata_link_abort(struct ata_link *link)
1084 {
1085  return ata_do_link_abort(link->ap, link);
1086 }
1087 
1100 int ata_port_abort(struct ata_port *ap)
1101 {
1102  return ata_do_link_abort(ap, NULL);
1103 }
1104 
1123 static void __ata_port_freeze(struct ata_port *ap)
1124 {
1125  WARN_ON(!ap->ops->error_handler);
1126 
1127  if (ap->ops->freeze)
1128  ap->ops->freeze(ap);
1129 
1130  ap->pflags |= ATA_PFLAG_FROZEN;
1131 
1132  DPRINTK("ata%u port frozen\n", ap->print_id);
1133 }
1134 
1149 int ata_port_freeze(struct ata_port *ap)
1150 {
1151  int nr_aborted;
1152 
1153  WARN_ON(!ap->ops->error_handler);
1154 
1155  __ata_port_freeze(ap);
1156  nr_aborted = ata_port_abort(ap);
1157 
1158  return nr_aborted;
1159 }
1160 
1175 {
1176  u32 sntf;
1177  int rc;
1178 
1179  if (!(ap->flags & ATA_FLAG_AN))
1180  return 0;
1181 
1182  rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1183  if (rc == 0)
1184  sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1185 
1186  if (!sata_pmp_attached(ap) || rc) {
1187  /* PMP is not attached or SNTF is not available */
1188  if (!sata_pmp_attached(ap)) {
1189  /* PMP is not attached. Check whether ATAPI
1190  * AN is configured. If so, notify media
1191  * change.
1192  */
1193  struct ata_device *dev = ap->link.device;
1194 
1195  if ((dev->class == ATA_DEV_ATAPI) &&
1196  (dev->flags & ATA_DFLAG_AN))
1198  return 0;
1199  } else {
1200  /* PMP is attached but SNTF is not available.
1201  * ATAPI async media change notification is
1202  * not used. The PMP must be reporting PHY
1203  * status change, schedule EH.
1204  */
1206  return 1;
1207  }
1208  } else {
1209  /* PMP is attached and SNTF is available */
1210  struct ata_link *link;
1211 
1212  /* check and notify ATAPI AN */
1213  ata_for_each_link(link, ap, EDGE) {
1214  if (!(sntf & (1 << link->pmp)))
1215  continue;
1216 
1217  if ((link->device->class == ATA_DEV_ATAPI) &&
1218  (link->device->flags & ATA_DFLAG_AN))
1220  }
1221 
1222  /* If PMP is reporting that PHY status of some
1223  * downstream ports has changed, schedule EH.
1224  */
1225  if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1227  return 1;
1228  }
1229 
1230  return 0;
1231  }
1232 }
1233 
1244 {
1245  unsigned long flags;
1246 
1247  if (!ap->ops->error_handler)
1248  return;
1249 
1250  spin_lock_irqsave(ap->lock, flags);
1251  __ata_port_freeze(ap);
1252  spin_unlock_irqrestore(ap->lock, flags);
1253 }
1254 
1264 void ata_eh_thaw_port(struct ata_port *ap)
1265 {
1266  unsigned long flags;
1267 
1268  if (!ap->ops->error_handler)
1269  return;
1270 
1271  spin_lock_irqsave(ap->lock, flags);
1272 
1273  ap->pflags &= ~ATA_PFLAG_FROZEN;
1274 
1275  if (ap->ops->thaw)
1276  ap->ops->thaw(ap);
1277 
1278  spin_unlock_irqrestore(ap->lock, flags);
1279 
1280  DPRINTK("ata%u port thawed\n", ap->print_id);
1281 }
1282 
1283 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1284 {
1285  /* nada */
1286 }
1287 
1288 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1289 {
1290  struct ata_port *ap = qc->ap;
1291  struct scsi_cmnd *scmd = qc->scsicmd;
1292  unsigned long flags;
1293 
1294  spin_lock_irqsave(ap->lock, flags);
1295  qc->scsidone = ata_eh_scsidone;
1296  __ata_qc_complete(qc);
1297  WARN_ON(ata_tag_valid(qc->tag));
1298  spin_unlock_irqrestore(ap->lock, flags);
1299 
1300  scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1301 }
1302 
1311 {
1312  struct scsi_cmnd *scmd = qc->scsicmd;
1313  scmd->retries = scmd->allowed;
1314  __ata_eh_qc_complete(qc);
1315 }
1316 
1329 {
1330  struct scsi_cmnd *scmd = qc->scsicmd;
1331  if (!qc->err_mask && scmd->retries)
1332  scmd->retries--;
1333  __ata_eh_qc_complete(qc);
1334 }
1335 
1345 void ata_dev_disable(struct ata_device *dev)
1346 {
1347  if (!ata_dev_enabled(dev))
1348  return;
1349 
1350  if (ata_msg_drv(dev->link->ap))
1351  ata_dev_warn(dev, "disabled\n");
1352  ata_acpi_on_disable(dev);
1354  dev->class++;
1355 
1356  /* From now till the next successful probe, ering is used to
1357  * track probe failures. Clear accumulated device error info.
1358  */
1359  ata_ering_clear(&dev->ering);
1360 }
1361 
1372 {
1373  struct ata_link *link = dev->link;
1374  struct ata_port *ap = link->ap;
1375  struct ata_eh_context *ehc = &link->eh_context;
1376  unsigned long flags;
1377 
1378  ata_dev_disable(dev);
1379 
1380  spin_lock_irqsave(ap->lock, flags);
1381 
1382  dev->flags &= ~ATA_DFLAG_DETACH;
1383 
1384  if (ata_scsi_offline_dev(dev)) {
1385  dev->flags |= ATA_DFLAG_DETACHED;
1387  }
1388 
1389  /* clear per-dev EH info */
1390  ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1391  ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1392  ehc->saved_xfer_mode[dev->devno] = 0;
1393  ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1394 
1395  spin_unlock_irqrestore(ap->lock, flags);
1396 }
1397 
1411 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1412  unsigned int action)
1413 {
1414  struct ata_port *ap = link->ap;
1415  struct ata_eh_info *ehi = &link->eh_info;
1416  struct ata_eh_context *ehc = &link->eh_context;
1417  unsigned long flags;
1418 
1419  spin_lock_irqsave(ap->lock, flags);
1420 
1421  ata_eh_clear_action(link, dev, ehi, action);
1422 
1423  /* About to take EH action, set RECOVERED. Ignore actions on
1424  * slave links as master will do them again.
1425  */
1426  if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1427  ap->pflags |= ATA_PFLAG_RECOVERED;
1428 
1429  spin_unlock_irqrestore(ap->lock, flags);
1430 }
1431 
1444 void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1445  unsigned int action)
1446 {
1447  struct ata_eh_context *ehc = &link->eh_context;
1448 
1449  ata_eh_clear_action(link, dev, &ehc->i, action);
1450 }
1451 
1466 static const char *ata_err_string(unsigned int err_mask)
1467 {
1468  if (err_mask & AC_ERR_HOST_BUS)
1469  return "host bus error";
1470  if (err_mask & AC_ERR_ATA_BUS)
1471  return "ATA bus error";
1472  if (err_mask & AC_ERR_TIMEOUT)
1473  return "timeout";
1474  if (err_mask & AC_ERR_HSM)
1475  return "HSM violation";
1476  if (err_mask & AC_ERR_SYSTEM)
1477  return "internal error";
1478  if (err_mask & AC_ERR_MEDIA)
1479  return "media error";
1480  if (err_mask & AC_ERR_INVALID)
1481  return "invalid argument";
1482  if (err_mask & AC_ERR_DEV)
1483  return "device error";
1484  return "unknown error";
1485 }
1486 
1503 unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1504  u8 page, void *buf, unsigned int sectors)
1505 {
1506  struct ata_taskfile tf;
1507  unsigned int err_mask;
1508 
1509  DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
1510 
1511  ata_tf_init(dev, &tf);
1513  tf.lbal = log;
1514  tf.lbam = page;
1515  tf.nsect = sectors;
1516  tf.hob_nsect = sectors >> 8;
1518  tf.protocol = ATA_PROT_PIO;
1519 
1520  err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1521  buf, sectors * ATA_SECT_SIZE, 0);
1522 
1523  DPRINTK("EXIT, err_mask=%x\n", err_mask);
1524  return err_mask;
1525 }
1526 
1542 static int ata_eh_read_log_10h(struct ata_device *dev,
1543  int *tag, struct ata_taskfile *tf)
1544 {
1545  u8 *buf = dev->link->ap->sector_buf;
1546  unsigned int err_mask;
1547  u8 csum;
1548  int i;
1549 
1550  err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
1551  if (err_mask)
1552  return -EIO;
1553 
1554  csum = 0;
1555  for (i = 0; i < ATA_SECT_SIZE; i++)
1556  csum += buf[i];
1557  if (csum)
1558  ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
1559  csum);
1560 
1561  if (buf[0] & 0x80)
1562  return -ENOENT;
1563 
1564  *tag = buf[0] & 0x1f;
1565 
1566  tf->command = buf[2];
1567  tf->feature = buf[3];
1568  tf->lbal = buf[4];
1569  tf->lbam = buf[5];
1570  tf->lbah = buf[6];
1571  tf->device = buf[7];
1572  tf->hob_lbal = buf[8];
1573  tf->hob_lbam = buf[9];
1574  tf->hob_lbah = buf[10];
1575  tf->nsect = buf[12];
1576  tf->hob_nsect = buf[13];
1577 
1578  return 0;
1579 }
1580 
1594 static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1595 {
1596  u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1597  struct ata_taskfile tf;
1598  unsigned int err_mask;
1599 
1600  ata_tf_init(dev, &tf);
1601 
1603  tf.command = ATA_CMD_PACKET;
1605 
1606  err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1607  if (err_mask == AC_ERR_DEV)
1608  *r_sense_key = tf.feature >> 4;
1609  return err_mask;
1610 }
1611 
1627 static unsigned int atapi_eh_request_sense(struct ata_device *dev,
1628  u8 *sense_buf, u8 dfl_sense_key)
1629 {
1630  u8 cdb[ATAPI_CDB_LEN] =
1631  { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1632  struct ata_port *ap = dev->link->ap;
1633  struct ata_taskfile tf;
1634 
1635  DPRINTK("ATAPI request sense\n");
1636 
1637  /* FIXME: is this needed? */
1638  memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1639 
1640  /* initialize sense_buf with the error register,
1641  * for the case where they are -not- overwritten
1642  */
1643  sense_buf[0] = 0x70;
1644  sense_buf[2] = dfl_sense_key;
1645 
1646  /* some devices time out if garbage left in tf */
1647  ata_tf_init(dev, &tf);
1648 
1650  tf.command = ATA_CMD_PACKET;
1651 
1652  /* is it pointless to prefer PIO for "safety reasons"? */
1653  if (ap->flags & ATA_FLAG_PIO_DMA) {
1654  tf.protocol = ATAPI_PROT_DMA;
1655  tf.feature |= ATAPI_PKT_DMA;
1656  } else {
1657  tf.protocol = ATAPI_PROT_PIO;
1659  tf.lbah = 0;
1660  }
1661 
1662  return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1663  sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1664 }
1665 
1676 static void ata_eh_analyze_serror(struct ata_link *link)
1677 {
1678  struct ata_eh_context *ehc = &link->eh_context;
1679  u32 serror = ehc->i.serror;
1680  unsigned int err_mask = 0, action = 0;
1681  u32 hotplug_mask;
1682 
1683  if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1684  err_mask |= AC_ERR_ATA_BUS;
1685  action |= ATA_EH_RESET;
1686  }
1687  if (serror & SERR_PROTOCOL) {
1688  err_mask |= AC_ERR_HSM;
1689  action |= ATA_EH_RESET;
1690  }
1691  if (serror & SERR_INTERNAL) {
1692  err_mask |= AC_ERR_SYSTEM;
1693  action |= ATA_EH_RESET;
1694  }
1695 
1696  /* Determine whether a hotplug event has occurred. Both
1697  * SError.N/X are considered hotplug events for enabled or
1698  * host links. For disabled PMP links, only N bit is
1699  * considered as X bit is left at 1 for link plugging.
1700  */
1701  if (link->lpm_policy > ATA_LPM_MAX_POWER)
1702  hotplug_mask = 0; /* hotplug doesn't work w/ LPM */
1703  else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1704  hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1705  else
1706  hotplug_mask = SERR_PHYRDY_CHG;
1707 
1708  if (serror & hotplug_mask)
1709  ata_ehi_hotplugged(&ehc->i);
1710 
1711  ehc->i.err_mask |= err_mask;
1712  ehc->i.action |= action;
1713 }
1714 
1728 {
1729  struct ata_port *ap = link->ap;
1730  struct ata_eh_context *ehc = &link->eh_context;
1731  struct ata_device *dev = link->device;
1732  struct ata_queued_cmd *qc;
1733  struct ata_taskfile tf;
1734  int tag, rc;
1735 
1736  /* if frozen, we can't do much */
1737  if (ap->pflags & ATA_PFLAG_FROZEN)
1738  return;
1739 
1740  /* is it NCQ device error? */
1741  if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1742  return;
1743 
1744  /* has LLDD analyzed already? */
1745  for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1746  qc = __ata_qc_from_tag(ap, tag);
1747 
1748  if (!(qc->flags & ATA_QCFLAG_FAILED))
1749  continue;
1750 
1751  if (qc->err_mask)
1752  return;
1753  }
1754 
1755  /* okay, this error is ours */
1756  memset(&tf, 0, sizeof(tf));
1757  rc = ata_eh_read_log_10h(dev, &tag, &tf);
1758  if (rc) {
1759  ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
1760  rc);
1761  return;
1762  }
1763 
1764  if (!(link->sactive & (1 << tag))) {
1765  ata_link_err(link, "log page 10h reported inactive tag %d\n",
1766  tag);
1767  return;
1768  }
1769 
1770  /* we've got the perpetrator, condemn it */
1771  qc = __ata_qc_from_tag(ap, tag);
1772  memcpy(&qc->result_tf, &tf, sizeof(tf));
1774  qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1775  ehc->i.err_mask &= ~AC_ERR_DEV;
1776 }
1777 
1793 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1794  const struct ata_taskfile *tf)
1795 {
1796  unsigned int tmp, action = 0;
1797  u8 stat = tf->command, err = tf->feature;
1798 
1799  if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1800  qc->err_mask |= AC_ERR_HSM;
1801  return ATA_EH_RESET;
1802  }
1803 
1804  if (stat & (ATA_ERR | ATA_DF))
1805  qc->err_mask |= AC_ERR_DEV;
1806  else
1807  return 0;
1808 
1809  switch (qc->dev->class) {
1810  case ATA_DEV_ATA:
1811  if (err & ATA_ICRC)
1812  qc->err_mask |= AC_ERR_ATA_BUS;
1813  if (err & ATA_UNC)
1814  qc->err_mask |= AC_ERR_MEDIA;
1815  if (err & ATA_IDNF)
1816  qc->err_mask |= AC_ERR_INVALID;
1817  break;
1818 
1819  case ATA_DEV_ATAPI:
1820  if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1821  tmp = atapi_eh_request_sense(qc->dev,
1822  qc->scsicmd->sense_buffer,
1823  qc->result_tf.feature >> 4);
1824  if (!tmp) {
1825  /* ATA_QCFLAG_SENSE_VALID is used to
1826  * tell atapi_qc_complete() that sense
1827  * data is already valid.
1828  *
1829  * TODO: interpret sense data and set
1830  * appropriate err_mask.
1831  */
1833  } else
1834  qc->err_mask |= tmp;
1835  }
1836  }
1837 
1838  if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1839  action |= ATA_EH_RESET;
1840 
1841  return action;
1842 }
1843 
1844 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1845  int *xfer_ok)
1846 {
1847  int base = 0;
1848 
1849  if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1850  *xfer_ok = 1;
1851 
1852  if (!*xfer_ok)
1853  base = ATA_ECAT_DUBIOUS_NONE;
1854 
1855  if (err_mask & AC_ERR_ATA_BUS)
1856  return base + ATA_ECAT_ATA_BUS;
1857 
1858  if (err_mask & AC_ERR_TIMEOUT)
1859  return base + ATA_ECAT_TOUT_HSM;
1860 
1861  if (eflags & ATA_EFLAG_IS_IO) {
1862  if (err_mask & AC_ERR_HSM)
1863  return base + ATA_ECAT_TOUT_HSM;
1864  if ((err_mask &
1865  (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1866  return base + ATA_ECAT_UNK_DEV;
1867  }
1868 
1869  return 0;
1870 }
1871 
1874  int xfer_ok;
1876 };
1877 
1878 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1879 {
1880  struct speed_down_verdict_arg *arg = void_arg;
1881  int cat;
1882 
1883  if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
1884  return -1;
1885 
1886  cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1887  &arg->xfer_ok);
1888  arg->nr_errors[cat]++;
1889 
1890  return 0;
1891 }
1892 
1950 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1951 {
1952  const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1953  u64 j64 = get_jiffies_64();
1954  struct speed_down_verdict_arg arg;
1955  unsigned int verdict = 0;
1956 
1957  /* scan past 5 mins of error history */
1958  memset(&arg, 0, sizeof(arg));
1959  arg.since = j64 - min(j64, j5mins);
1960  ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1961 
1962  if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1963  arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1964  verdict |= ATA_EH_SPDN_SPEED_DOWN |
1966 
1967  if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1968  arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1970 
1971  if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1972  arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1973  arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1974  verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
1975 
1976  /* scan past 10 mins of error history */
1977  memset(&arg, 0, sizeof(arg));
1978  arg.since = j64 - min(j64, j10mins);
1979  ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1980 
1981  if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1982  arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1983  verdict |= ATA_EH_SPDN_NCQ_OFF;
1984 
1985  if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1986  arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1987  arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1988  verdict |= ATA_EH_SPDN_SPEED_DOWN;
1989 
1990  return verdict;
1991 }
1992 
2010 static unsigned int ata_eh_speed_down(struct ata_device *dev,
2011  unsigned int eflags, unsigned int err_mask)
2012 {
2013  struct ata_link *link = ata_dev_phys_link(dev);
2014  int xfer_ok = 0;
2015  unsigned int verdict;
2016  unsigned int action = 0;
2017 
2018  /* don't bother if Cat-0 error */
2019  if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
2020  return 0;
2021 
2022  /* record error and determine whether speed down is necessary */
2023  ata_ering_record(&dev->ering, eflags, err_mask);
2024  verdict = ata_eh_speed_down_verdict(dev);
2025 
2026  /* turn off NCQ? */
2027  if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
2028  (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
2030  dev->flags |= ATA_DFLAG_NCQ_OFF;
2031  ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
2032  goto done;
2033  }
2034 
2035  /* speed down? */
2036  if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
2037  /* speed down SATA link speed if possible */
2038  if (sata_down_spd_limit(link, 0) == 0) {
2039  action |= ATA_EH_RESET;
2040  goto done;
2041  }
2042 
2043  /* lower transfer mode */
2044  if (dev->spdn_cnt < 2) {
2045  static const int dma_dnxfer_sel[] =
2047  static const int pio_dnxfer_sel[] =
2049  int sel;
2050 
2051  if (dev->xfer_shift != ATA_SHIFT_PIO)
2052  sel = dma_dnxfer_sel[dev->spdn_cnt];
2053  else
2054  sel = pio_dnxfer_sel[dev->spdn_cnt];
2055 
2056  dev->spdn_cnt++;
2057 
2058  if (ata_down_xfermask_limit(dev, sel) == 0) {
2059  action |= ATA_EH_RESET;
2060  goto done;
2061  }
2062  }
2063  }
2064 
2065  /* Fall back to PIO? Slowing down to PIO is meaningless for
2066  * SATA ATA devices. Consider it only for PATA and SATAPI.
2067  */
2068  if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
2069  (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
2070  (dev->xfer_shift != ATA_SHIFT_PIO)) {
2072  dev->spdn_cnt = 0;
2073  action |= ATA_EH_RESET;
2074  goto done;
2075  }
2076  }
2077 
2078  return 0;
2079  done:
2080  /* device has been slowed down, blow error history */
2081  if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
2082  ata_ering_clear(&dev->ering);
2083  return action;
2084 }
2085 
2095 static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
2096 {
2097  if (qc->flags & AC_ERR_MEDIA)
2098  return 0; /* don't retry media errors */
2099  if (qc->flags & ATA_QCFLAG_IO)
2100  return 1; /* otherwise retry anything from fs stack */
2101  if (qc->err_mask & AC_ERR_INVALID)
2102  return 0; /* don't retry these */
2103  return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */
2104 }
2105 
2117 static void ata_eh_link_autopsy(struct ata_link *link)
2118 {
2119  struct ata_port *ap = link->ap;
2120  struct ata_eh_context *ehc = &link->eh_context;
2121  struct ata_device *dev;
2122  unsigned int all_err_mask = 0, eflags = 0;
2123  int tag;
2124  u32 serror;
2125  int rc;
2126 
2127  DPRINTK("ENTER\n");
2128 
2129  if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
2130  return;
2131 
2132  /* obtain and analyze SError */
2133  rc = sata_scr_read(link, SCR_ERROR, &serror);
2134  if (rc == 0) {
2135  ehc->i.serror |= serror;
2136  ata_eh_analyze_serror(link);
2137  } else if (rc != -EOPNOTSUPP) {
2138  /* SError read failed, force reset and probing */
2139  ehc->i.probe_mask |= ATA_ALL_DEVICES;
2140  ehc->i.action |= ATA_EH_RESET;
2141  ehc->i.err_mask |= AC_ERR_OTHER;
2142  }
2143 
2144  /* analyze NCQ failure */
2146 
2147  /* any real error trumps AC_ERR_OTHER */
2148  if (ehc->i.err_mask & ~AC_ERR_OTHER)
2149  ehc->i.err_mask &= ~AC_ERR_OTHER;
2150 
2151  all_err_mask |= ehc->i.err_mask;
2152 
2153  for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2154  struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2155 
2156  if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2157  ata_dev_phys_link(qc->dev) != link)
2158  continue;
2159 
2160  /* inherit upper level err_mask */
2161  qc->err_mask |= ehc->i.err_mask;
2162 
2163  /* analyze TF */
2164  ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2165 
2166  /* DEV errors are probably spurious in case of ATA_BUS error */
2167  if (qc->err_mask & AC_ERR_ATA_BUS)
2168  qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2169  AC_ERR_INVALID);
2170 
2171  /* any real error trumps unknown error */
2172  if (qc->err_mask & ~AC_ERR_OTHER)
2173  qc->err_mask &= ~AC_ERR_OTHER;
2174 
2175  /* SENSE_VALID trumps dev/unknown error and revalidation */
2176  if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2177  qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2178 
2179  /* determine whether the command is worth retrying */
2180  if (ata_eh_worth_retry(qc))
2181  qc->flags |= ATA_QCFLAG_RETRY;
2182 
2183  /* accumulate error info */
2184  ehc->i.dev = qc->dev;
2185  all_err_mask |= qc->err_mask;
2186  if (qc->flags & ATA_QCFLAG_IO)
2187  eflags |= ATA_EFLAG_IS_IO;
2188  }
2189 
2190  /* enforce default EH actions */
2191  if (ap->pflags & ATA_PFLAG_FROZEN ||
2192  all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2193  ehc->i.action |= ATA_EH_RESET;
2194  else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2195  (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2196  ehc->i.action |= ATA_EH_REVALIDATE;
2197 
2198  /* If we have offending qcs and the associated failed device,
2199  * perform per-dev EH action only on the offending device.
2200  */
2201  if (ehc->i.dev) {
2202  ehc->i.dev_action[ehc->i.dev->devno] |=
2203  ehc->i.action & ATA_EH_PERDEV_MASK;
2204  ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2205  }
2206 
2207  /* propagate timeout to host link */
2208  if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2209  ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2210 
2211  /* record error and consider speeding down */
2212  dev = ehc->i.dev;
2213  if (!dev && ((ata_link_max_devices(link) == 1 &&
2214  ata_dev_enabled(link->device))))
2215  dev = link->device;
2216 
2217  if (dev) {
2218  if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2219  eflags |= ATA_EFLAG_DUBIOUS_XFER;
2220  ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2221  }
2222 
2223  DPRINTK("EXIT\n");
2224 }
2225 
2236 void ata_eh_autopsy(struct ata_port *ap)
2237 {
2238  struct ata_link *link;
2239 
2240  ata_for_each_link(link, ap, EDGE)
2241  ata_eh_link_autopsy(link);
2242 
2243  /* Handle the frigging slave link. Autopsy is done similarly
2244  * but actions and flags are transferred over to the master
2245  * link and handled from there.
2246  */
2247  if (ap->slave_link) {
2248  struct ata_eh_context *mehc = &ap->link.eh_context;
2249  struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2250 
2251  /* transfer control flags from master to slave */
2252  sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2253 
2254  /* perform autopsy on the slave link */
2255  ata_eh_link_autopsy(ap->slave_link);
2256 
2257  /* transfer actions from slave to master and clear slave */
2259  mehc->i.action |= sehc->i.action;
2260  mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2261  mehc->i.flags |= sehc->i.flags;
2263  }
2264 
2265  /* Autopsy of fanout ports can affect host link autopsy.
2266  * Perform host link autopsy last.
2267  */
2268  if (sata_pmp_attached(ap))
2269  ata_eh_link_autopsy(&ap->link);
2270 }
2271 
2283 {
2284 #ifdef CONFIG_ATA_VERBOSE_ERROR
2285  static const struct
2286  {
2287  u8 command;
2288  const char *text;
2289  } cmd_descr[] = {
2290  { ATA_CMD_DEV_RESET, "DEVICE RESET" },
2291  { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
2292  { ATA_CMD_STANDBY, "STANDBY" },
2293  { ATA_CMD_IDLE, "IDLE" },
2294  { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
2295  { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
2296  { ATA_CMD_NOP, "NOP" },
2297  { ATA_CMD_FLUSH, "FLUSH CACHE" },
2298  { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
2299  { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
2300  { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
2301  { ATA_CMD_SERVICE, "SERVICE" },
2302  { ATA_CMD_READ, "READ DMA" },
2303  { ATA_CMD_READ_EXT, "READ DMA EXT" },
2304  { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
2305  { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
2306  { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
2307  { ATA_CMD_WRITE, "WRITE DMA" },
2308  { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
2309  { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
2310  { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
2311  { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2312  { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
2313  { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2314  { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
2315  { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
2316  { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
2317  { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
2318  { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
2319  { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
2320  { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
2321  { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
2322  { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
2323  { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
2324  { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
2325  { ATA_CMD_SET_FEATURES, "SET FEATURES" },
2326  { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
2327  { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
2328  { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
2329  { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
2330  { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
2331  { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
2332  { ATA_CMD_SLEEP, "SLEEP" },
2333  { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
2334  { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
2335  { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
2336  { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
2337  { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
2338  { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
2339  { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
2340  { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
2341  { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
2342  { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
2343  { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
2344  { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
2345  { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
2346  { ATA_CMD_PMP_READ, "READ BUFFER" },
2347  { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
2348  { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
2349  { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
2350  { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
2351  { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
2352  { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
2353  { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
2354  { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
2355  { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
2356  { ATA_CMD_SMART, "SMART" },
2357  { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
2358  { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
2359  { ATA_CMD_DSM, "DATA SET MANAGEMENT" },
2360  { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
2361  { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
2362  { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
2363  { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
2364  { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
2365  { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2366  { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
2367  { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
2368  { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
2369  { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
2370  { ATA_CMD_RESTORE, "RECALIBRATE" },
2371  { 0, NULL } /* terminate list */
2372  };
2373 
2374  unsigned int i;
2375  for (i = 0; cmd_descr[i].text; i++)
2376  if (cmd_descr[i].command == command)
2377  return cmd_descr[i].text;
2378 #endif
2379 
2380  return NULL;
2381 }
2382 
2392 static void ata_eh_link_report(struct ata_link *link)
2393 {
2394  struct ata_port *ap = link->ap;
2395  struct ata_eh_context *ehc = &link->eh_context;
2396  const char *frozen, *desc;
2397  char tries_buf[6];
2398  int tag, nr_failed = 0;
2399 
2400  if (ehc->i.flags & ATA_EHI_QUIET)
2401  return;
2402 
2403  desc = NULL;
2404  if (ehc->i.desc[0] != '\0')
2405  desc = ehc->i.desc;
2406 
2407  for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2408  struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2409 
2410  if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2411  ata_dev_phys_link(qc->dev) != link ||
2412  ((qc->flags & ATA_QCFLAG_QUIET) &&
2413  qc->err_mask == AC_ERR_DEV))
2414  continue;
2415  if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2416  continue;
2417 
2418  nr_failed++;
2419  }
2420 
2421  if (!nr_failed && !ehc->i.err_mask)
2422  return;
2423 
2424  frozen = "";
2425  if (ap->pflags & ATA_PFLAG_FROZEN)
2426  frozen = " frozen";
2427 
2428  memset(tries_buf, 0, sizeof(tries_buf));
2429  if (ap->eh_tries < ATA_EH_MAX_TRIES)
2430  snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2431  ap->eh_tries);
2432 
2433  if (ehc->i.dev) {
2434  ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
2435  "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2436  ehc->i.err_mask, link->sactive, ehc->i.serror,
2437  ehc->i.action, frozen, tries_buf);
2438  if (desc)
2439  ata_dev_err(ehc->i.dev, "%s\n", desc);
2440  } else {
2441  ata_link_err(link, "exception Emask 0x%x "
2442  "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2443  ehc->i.err_mask, link->sactive, ehc->i.serror,
2444  ehc->i.action, frozen, tries_buf);
2445  if (desc)
2446  ata_link_err(link, "%s\n", desc);
2447  }
2448 
2449 #ifdef CONFIG_ATA_VERBOSE_ERROR
2450  if (ehc->i.serror)
2451  ata_link_err(link,
2452  "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2453  ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2454  ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2455  ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2456  ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2457  ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2458  ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2459  ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2460  ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2461  ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2462  ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2463  ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2464  ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2465  ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2466  ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2467  ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2468  ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2469  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2470 #endif
2471 
2472  for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2473  struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2474  struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2475  const u8 *cdb = qc->cdb;
2476  char data_buf[20] = "";
2477  char cdb_buf[70] = "";
2478 
2479  if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2480  ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2481  continue;
2482 
2483  if (qc->dma_dir != DMA_NONE) {
2484  static const char *dma_str[] = {
2485  [DMA_BIDIRECTIONAL] = "bidi",
2486  [DMA_TO_DEVICE] = "out",
2487  [DMA_FROM_DEVICE] = "in",
2488  };
2489  static const char *prot_str[] = {
2490  [ATA_PROT_PIO] = "pio",
2491  [ATA_PROT_DMA] = "dma",
2492  [ATA_PROT_NCQ] = "ncq",
2493  [ATAPI_PROT_PIO] = "pio",
2494  [ATAPI_PROT_DMA] = "dma",
2495  };
2496 
2497  snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2498  prot_str[qc->tf.protocol], qc->nbytes,
2499  dma_str[qc->dma_dir]);
2500  }
2501 
2502  if (ata_is_atapi(qc->tf.protocol)) {
2503  if (qc->scsicmd)
2505  else
2506  snprintf(cdb_buf, sizeof(cdb_buf),
2507  "cdb %02x %02x %02x %02x %02x %02x %02x %02x "
2508  "%02x %02x %02x %02x %02x %02x %02x %02x\n ",
2509  cdb[0], cdb[1], cdb[2], cdb[3],
2510  cdb[4], cdb[5], cdb[6], cdb[7],
2511  cdb[8], cdb[9], cdb[10], cdb[11],
2512  cdb[12], cdb[13], cdb[14], cdb[15]);
2513  } else {
2514  const char *descr = ata_get_cmd_descript(cmd->command);
2515  if (descr)
2516  ata_dev_err(qc->dev, "failed command: %s\n",
2517  descr);
2518  }
2519 
2520  ata_dev_err(qc->dev,
2521  "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2522  "tag %d%s\n %s"
2523  "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2524  "Emask 0x%x (%s)%s\n",
2525  cmd->command, cmd->feature, cmd->nsect,
2526  cmd->lbal, cmd->lbam, cmd->lbah,
2527  cmd->hob_feature, cmd->hob_nsect,
2528  cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2529  cmd->device, qc->tag, data_buf, cdb_buf,
2530  res->command, res->feature, res->nsect,
2531  res->lbal, res->lbam, res->lbah,
2532  res->hob_feature, res->hob_nsect,
2533  res->hob_lbal, res->hob_lbam, res->hob_lbah,
2534  res->device, qc->err_mask, ata_err_string(qc->err_mask),
2535  qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2536 
2537 #ifdef CONFIG_ATA_VERBOSE_ERROR
2538  if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2539  ATA_ERR)) {
2540  if (res->command & ATA_BUSY)
2541  ata_dev_err(qc->dev, "status: { Busy }\n");
2542  else
2543  ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
2544  res->command & ATA_DRDY ? "DRDY " : "",
2545  res->command & ATA_DF ? "DF " : "",
2546  res->command & ATA_DRQ ? "DRQ " : "",
2547  res->command & ATA_ERR ? "ERR " : "");
2548  }
2549 
2550  if (cmd->command != ATA_CMD_PACKET &&
2551  (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
2552  ATA_ABORTED)))
2553  ata_dev_err(qc->dev, "error: { %s%s%s%s}\n",
2554  res->feature & ATA_ICRC ? "ICRC " : "",
2555  res->feature & ATA_UNC ? "UNC " : "",
2556  res->feature & ATA_IDNF ? "IDNF " : "",
2557  res->feature & ATA_ABORTED ? "ABRT " : "");
2558 #endif
2559  }
2560 }
2561 
2571 void ata_eh_report(struct ata_port *ap)
2572 {
2573  struct ata_link *link;
2574 
2575  ata_for_each_link(link, ap, HOST_FIRST)
2576  ata_eh_link_report(link);
2577 }
2578 
2579 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2580  unsigned int *classes, unsigned long deadline,
2581  bool clear_classes)
2582 {
2583  struct ata_device *dev;
2584 
2585  if (clear_classes)
2586  ata_for_each_dev(dev, link, ALL)
2587  classes[dev->devno] = ATA_DEV_UNKNOWN;
2588 
2589  return reset(link, classes, deadline);
2590 }
2591 
2592 static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
2593 {
2594  if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2595  return 0;
2596  if (rc == -EAGAIN)
2597  return 1;
2598  if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2599  return 1;
2600  return 0;
2601 }
2602 
2603 int ata_eh_reset(struct ata_link *link, int classify,
2604  ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2605  ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2606 {
2607  struct ata_port *ap = link->ap;
2608  struct ata_link *slave = ap->slave_link;
2609  struct ata_eh_context *ehc = &link->eh_context;
2610  struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2611  unsigned int *classes = ehc->classes;
2612  unsigned int lflags = link->flags;
2613  int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2614  int max_tries = 0, try = 0;
2615  struct ata_link *failed_link;
2616  struct ata_device *dev;
2617  unsigned long deadline, now;
2618  ata_reset_fn_t reset;
2619  unsigned long flags;
2620  u32 sstatus;
2621  int nr_unknown, rc;
2622 
2623  /*
2624  * Prepare to reset
2625  */
2626  while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2627  max_tries++;
2628  if (link->flags & ATA_LFLAG_RST_ONCE)
2629  max_tries = 1;
2630  if (link->flags & ATA_LFLAG_NO_HRST)
2631  hardreset = NULL;
2632  if (link->flags & ATA_LFLAG_NO_SRST)
2633  softreset = NULL;
2634 
2635  /* make sure each reset attempt is at least COOL_DOWN apart */
2636  if (ehc->i.flags & ATA_EHI_DID_RESET) {
2637  now = jiffies;
2638  WARN_ON(time_after(ehc->last_reset, now));
2639  deadline = ata_deadline(ehc->last_reset,
2641  if (time_before(now, deadline))
2642  schedule_timeout_uninterruptible(deadline - now);
2643  }
2644 
2645  spin_lock_irqsave(ap->lock, flags);
2646  ap->pflags |= ATA_PFLAG_RESETTING;
2647  spin_unlock_irqrestore(ap->lock, flags);
2648 
2650 
2651  ata_for_each_dev(dev, link, ALL) {
2652  /* If we issue an SRST then an ATA drive (not ATAPI)
2653  * may change configuration and be in PIO0 timing. If
2654  * we do a hard reset (or are coming from power on)
2655  * this is true for ATA or ATAPI. Until we've set a
2656  * suitable controller mode we should not touch the
2657  * bus as we may be talking too fast.
2658  */
2659  dev->pio_mode = XFER_PIO_0;
2660 
2661  /* If the controller has a pio mode setup function
2662  * then use it to set the chipset to rights. Don't
2663  * touch the DMA setup as that will be dealt with when
2664  * configuring devices.
2665  */
2666  if (ap->ops->set_piomode)
2667  ap->ops->set_piomode(ap, dev);
2668  }
2669 
2670  /* prefer hardreset */
2671  reset = NULL;
2672  ehc->i.action &= ~ATA_EH_RESET;
2673  if (hardreset) {
2674  reset = hardreset;
2675  ehc->i.action |= ATA_EH_HARDRESET;
2676  } else if (softreset) {
2677  reset = softreset;
2678  ehc->i.action |= ATA_EH_SOFTRESET;
2679  }
2680 
2681  if (prereset) {
2682  unsigned long deadline = ata_deadline(jiffies,
2684 
2685  if (slave) {
2686  sehc->i.action &= ~ATA_EH_RESET;
2687  sehc->i.action |= ehc->i.action;
2688  }
2689 
2690  rc = prereset(link, deadline);
2691 
2692  /* If present, do prereset on slave link too. Reset
2693  * is skipped iff both master and slave links report
2694  * -ENOENT or clear ATA_EH_RESET.
2695  */
2696  if (slave && (rc == 0 || rc == -ENOENT)) {
2697  int tmp;
2698 
2699  tmp = prereset(slave, deadline);
2700  if (tmp != -ENOENT)
2701  rc = tmp;
2702 
2703  ehc->i.action |= sehc->i.action;
2704  }
2705 
2706  if (rc) {
2707  if (rc == -ENOENT) {
2708  ata_link_dbg(link, "port disabled--ignoring\n");
2709  ehc->i.action &= ~ATA_EH_RESET;
2710 
2711  ata_for_each_dev(dev, link, ALL)
2712  classes[dev->devno] = ATA_DEV_NONE;
2713 
2714  rc = 0;
2715  } else
2716  ata_link_err(link,
2717  "prereset failed (errno=%d)\n",
2718  rc);
2719  goto out;
2720  }
2721 
2722  /* prereset() might have cleared ATA_EH_RESET. If so,
2723  * bang classes, thaw and return.
2724  */
2725  if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2726  ata_for_each_dev(dev, link, ALL)
2727  classes[dev->devno] = ATA_DEV_NONE;
2728  if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2729  ata_is_host_link(link))
2730  ata_eh_thaw_port(ap);
2731  rc = 0;
2732  goto out;
2733  }
2734  }
2735 
2736  retry:
2737  /*
2738  * Perform reset
2739  */
2740  if (ata_is_host_link(link))
2741  ata_eh_freeze_port(ap);
2742 
2743  deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2744 
2745  if (reset) {
2746  if (verbose)
2747  ata_link_info(link, "%s resetting link\n",
2748  reset == softreset ? "soft" : "hard");
2749 
2750  /* mark that this EH session started with reset */
2751  ehc->last_reset = jiffies;
2752  if (reset == hardreset)
2753  ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2754  else
2755  ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2756 
2757  rc = ata_do_reset(link, reset, classes, deadline, true);
2758  if (rc && rc != -EAGAIN) {
2759  failed_link = link;
2760  goto fail;
2761  }
2762 
2763  /* hardreset slave link if existent */
2764  if (slave && reset == hardreset) {
2765  int tmp;
2766 
2767  if (verbose)
2768  ata_link_info(slave, "hard resetting link\n");
2769 
2771  tmp = ata_do_reset(slave, reset, classes, deadline,
2772  false);
2773  switch (tmp) {
2774  case -EAGAIN:
2775  rc = -EAGAIN;
2776  case 0:
2777  break;
2778  default:
2779  failed_link = slave;
2780  rc = tmp;
2781  goto fail;
2782  }
2783  }
2784 
2785  /* perform follow-up SRST if necessary */
2786  if (reset == hardreset &&
2787  ata_eh_followup_srst_needed(link, rc)) {
2788  reset = softreset;
2789 
2790  if (!reset) {
2791  ata_link_err(link,
2792  "follow-up softreset required but no softreset available\n");
2793  failed_link = link;
2794  rc = -EINVAL;
2795  goto fail;
2796  }
2797 
2799  rc = ata_do_reset(link, reset, classes, deadline, true);
2800  if (rc) {
2801  failed_link = link;
2802  goto fail;
2803  }
2804  }
2805  } else {
2806  if (verbose)
2807  ata_link_info(link,
2808  "no reset method available, skipping reset\n");
2809  if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2810  lflags |= ATA_LFLAG_ASSUME_ATA;
2811  }
2812 
2813  /*
2814  * Post-reset processing
2815  */
2816  ata_for_each_dev(dev, link, ALL) {
2817  /* After the reset, the device state is PIO 0 and the
2818  * controller state is undefined. Reset also wakes up
2819  * drives from sleeping mode.
2820  */
2821  dev->pio_mode = XFER_PIO_0;
2822  dev->flags &= ~ATA_DFLAG_SLEEPING;
2823 
2825  continue;
2826 
2827  /* apply class override */
2828  if (lflags & ATA_LFLAG_ASSUME_ATA)
2829  classes[dev->devno] = ATA_DEV_ATA;
2830  else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2831  classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2832  }
2833 
2834  /* record current link speed */
2835  if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2836  link->sata_spd = (sstatus >> 4) & 0xf;
2837  if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2838  slave->sata_spd = (sstatus >> 4) & 0xf;
2839 
2840  /* thaw the port */
2841  if (ata_is_host_link(link))
2842  ata_eh_thaw_port(ap);
2843 
2844  /* postreset() should clear hardware SError. Although SError
2845  * is cleared during link resume, clearing SError here is
2846  * necessary as some PHYs raise hotplug events after SRST.
2847  * This introduces race condition where hotplug occurs between
2848  * reset and here. This race is mediated by cross checking
2849  * link onlineness and classification result later.
2850  */
2851  if (postreset) {
2852  postreset(link, classes);
2853  if (slave)
2854  postreset(slave, classes);
2855  }
2856 
2857  /*
2858  * Some controllers can't be frozen very well and may set spurious
2859  * error conditions during reset. Clear accumulated error
2860  * information and re-thaw the port if frozen. As reset is the
2861  * final recovery action and we cross check link onlineness against
2862  * device classification later, no hotplug event is lost by this.
2863  */
2864  spin_lock_irqsave(link->ap->lock, flags);
2865  memset(&link->eh_info, 0, sizeof(link->eh_info));
2866  if (slave)
2867  memset(&slave->eh_info, 0, sizeof(link->eh_info));
2868  ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2869  spin_unlock_irqrestore(link->ap->lock, flags);
2870 
2871  if (ap->pflags & ATA_PFLAG_FROZEN)
2872  ata_eh_thaw_port(ap);
2873 
2874  /*
2875  * Make sure onlineness and classification result correspond.
2876  * Hotplug could have happened during reset and some
2877  * controllers fail to wait while a drive is spinning up after
2878  * being hotplugged causing misdetection. By cross checking
2879  * link on/offlineness and classification result, those
2880  * conditions can be reliably detected and retried.
2881  */
2882  nr_unknown = 0;
2883  ata_for_each_dev(dev, link, ALL) {
2885  if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2886  ata_dev_dbg(dev, "link online but device misclassified\n");
2887  classes[dev->devno] = ATA_DEV_NONE;
2888  nr_unknown++;
2889  }
2890  } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2891  if (ata_class_enabled(classes[dev->devno]))
2892  ata_dev_dbg(dev,
2893  "link offline, clearing class %d to NONE\n",
2894  classes[dev->devno]);
2895  classes[dev->devno] = ATA_DEV_NONE;
2896  } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2897  ata_dev_dbg(dev,
2898  "link status unknown, clearing UNKNOWN to NONE\n");
2899  classes[dev->devno] = ATA_DEV_NONE;
2900  }
2901  }
2902 
2903  if (classify && nr_unknown) {
2904  if (try < max_tries) {
2905  ata_link_warn(link,
2906  "link online but %d devices misclassified, retrying\n",
2907  nr_unknown);
2908  failed_link = link;
2909  rc = -EAGAIN;
2910  goto fail;
2911  }
2912  ata_link_warn(link,
2913  "link online but %d devices misclassified, "
2914  "device detection might fail\n", nr_unknown);
2915  }
2916 
2917  /* reset successful, schedule revalidation */
2918  ata_eh_done(link, NULL, ATA_EH_RESET);
2919  if (slave)
2920  ata_eh_done(slave, NULL, ATA_EH_RESET);
2921  ehc->last_reset = jiffies; /* update to completion time */
2922  ehc->i.action |= ATA_EH_REVALIDATE;
2923  link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */
2924 
2925  rc = 0;
2926  out:
2927  /* clear hotplug flag */
2928  ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2929  if (slave)
2930  sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2931 
2932  spin_lock_irqsave(ap->lock, flags);
2933  ap->pflags &= ~ATA_PFLAG_RESETTING;
2934  spin_unlock_irqrestore(ap->lock, flags);
2935 
2936  return rc;
2937 
2938  fail:
2939  /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2940  if (!ata_is_host_link(link) &&
2941  sata_scr_read(link, SCR_STATUS, &sstatus))
2942  rc = -ERESTART;
2943 
2944  if (try >= max_tries) {
2945  /*
2946  * Thaw host port even if reset failed, so that the port
2947  * can be retried on the next phy event. This risks
2948  * repeated EH runs but seems to be a better tradeoff than
2949  * shutting down a port after a botched hotplug attempt.
2950  */
2951  if (ata_is_host_link(link))
2952  ata_eh_thaw_port(ap);
2953  goto out;
2954  }
2955 
2956  now = jiffies;
2957  if (time_before(now, deadline)) {
2958  unsigned long delta = deadline - now;
2959 
2960  ata_link_warn(failed_link,
2961  "reset failed (errno=%d), retrying in %u secs\n",
2962  rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2963 
2964  ata_eh_release(ap);
2965  while (delta)
2966  delta = schedule_timeout_uninterruptible(delta);
2967  ata_eh_acquire(ap);
2968  }
2969 
2970  /*
2971  * While disks spinup behind PMP, some controllers fail sending SRST.
2972  * They need to be reset - as well as the PMP - before retrying.
2973  */
2974  if (rc == -ERESTART) {
2975  if (ata_is_host_link(link))
2976  ata_eh_thaw_port(ap);
2977  goto out;
2978  }
2979 
2980  if (try == max_tries - 1) {
2981  sata_down_spd_limit(link, 0);
2982  if (slave)
2983  sata_down_spd_limit(slave, 0);
2984  } else if (rc == -EPIPE)
2985  sata_down_spd_limit(failed_link, 0);
2986 
2987  if (hardreset)
2988  reset = hardreset;
2989  goto retry;
2990 }
2991 
2992 static inline void ata_eh_pull_park_action(struct ata_port *ap)
2993 {
2994  struct ata_link *link;
2995  struct ata_device *dev;
2996  unsigned long flags;
2997 
2998  /*
2999  * This function can be thought of as an extended version of
3000  * ata_eh_about_to_do() specially crafted to accommodate the
3001  * requirements of ATA_EH_PARK handling. Since the EH thread
3002  * does not leave the do {} while () loop in ata_eh_recover as
3003  * long as the timeout for a park request to *one* device on
3004  * the port has not expired, and since we still want to pick
3005  * up park requests to other devices on the same port or
3006  * timeout updates for the same device, we have to pull
3007  * ATA_EH_PARK actions from eh_info into eh_context.i
3008  * ourselves at the beginning of each pass over the loop.
3009  *
3010  * Additionally, all write accesses to &ap->park_req_pending
3011  * through INIT_COMPLETION() (see below) or complete_all()
3012  * (see ata_scsi_park_store()) are protected by the host lock.
3013  * As a result we have that park_req_pending.done is zero on
3014  * exit from this function, i.e. when ATA_EH_PARK actions for
3015  * *all* devices on port ap have been pulled into the
3016  * respective eh_context structs. If, and only if,
3017  * park_req_pending.done is non-zero by the time we reach
3018  * wait_for_completion_timeout(), another ATA_EH_PARK action
3019  * has been scheduled for at least one of the devices on port
3020  * ap and we have to cycle over the do {} while () loop in
3021  * ata_eh_recover() again.
3022  */
3023 
3024  spin_lock_irqsave(ap->lock, flags);
3026  ata_for_each_link(link, ap, EDGE) {
3027  ata_for_each_dev(dev, link, ALL) {
3028  struct ata_eh_info *ehi = &link->eh_info;
3029 
3030  link->eh_context.i.dev_action[dev->devno] |=
3031  ehi->dev_action[dev->devno] & ATA_EH_PARK;
3032  ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
3033  }
3034  }
3035  spin_unlock_irqrestore(ap->lock, flags);
3036 }
3037 
3038 static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
3039 {
3040  struct ata_eh_context *ehc = &dev->link->eh_context;
3041  struct ata_taskfile tf;
3042  unsigned int err_mask;
3043 
3044  ata_tf_init(dev, &tf);
3045  if (park) {
3046  ehc->unloaded_mask |= 1 << dev->devno;
3048  tf.feature = 0x44;
3049  tf.lbal = 0x4c;
3050  tf.lbam = 0x4e;
3051  tf.lbah = 0x55;
3052  } else {
3053  ehc->unloaded_mask &= ~(1 << dev->devno);
3055  }
3056 
3058  tf.protocol |= ATA_PROT_NODATA;
3059  err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3060  if (park && (err_mask || tf.lbal != 0xc4)) {
3061  ata_dev_err(dev, "head unload failed!\n");
3062  ehc->unloaded_mask &= ~(1 << dev->devno);
3063  }
3064 }
3065 
3066 static int ata_eh_revalidate_and_attach(struct ata_link *link,
3067  struct ata_device **r_failed_dev)
3068 {
3069  struct ata_port *ap = link->ap;
3070  struct ata_eh_context *ehc = &link->eh_context;
3071  struct ata_device *dev;
3072  unsigned int new_mask = 0;
3073  unsigned long flags;
3074  int rc = 0;
3075 
3076  DPRINTK("ENTER\n");
3077 
3078  /* For PATA drive side cable detection to work, IDENTIFY must
3079  * be done backwards such that PDIAG- is released by the slave
3080  * device before the master device is identified.
3081  */
3082  ata_for_each_dev(dev, link, ALL_REVERSE) {
3083  unsigned int action = ata_eh_dev_action(dev);
3084  unsigned int readid_flags = 0;
3085 
3086  if (ehc->i.flags & ATA_EHI_DID_RESET)
3087  readid_flags |= ATA_READID_POSTRESET;
3088 
3089  if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
3090  WARN_ON(dev->class == ATA_DEV_PMP);
3091 
3093  rc = -EIO;
3094  goto err;
3095  }
3096 
3097  ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
3098  rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
3099  readid_flags);
3100  if (rc)
3101  goto err;
3102 
3103  ata_eh_done(link, dev, ATA_EH_REVALIDATE);
3104 
3105  /* Configuration may have changed, reconfigure
3106  * transfer mode.
3107  */
3108  ehc->i.flags |= ATA_EHI_SETMODE;
3109 
3110  /* schedule the scsi_rescan_device() here */
3112  } else if (dev->class == ATA_DEV_UNKNOWN &&
3113  ehc->tries[dev->devno] &&
3114  ata_class_enabled(ehc->classes[dev->devno])) {
3115  /* Temporarily set dev->class, it will be
3116  * permanently set once all configurations are
3117  * complete. This is necessary because new
3118  * device configuration is done in two
3119  * separate loops.
3120  */
3121  dev->class = ehc->classes[dev->devno];
3122 
3123  if (dev->class == ATA_DEV_PMP)
3124  rc = sata_pmp_attach(dev);
3125  else
3126  rc = ata_dev_read_id(dev, &dev->class,
3127  readid_flags, dev->id);
3128 
3129  /* read_id might have changed class, store and reset */
3130  ehc->classes[dev->devno] = dev->class;
3131  dev->class = ATA_DEV_UNKNOWN;
3132 
3133  switch (rc) {
3134  case 0:
3135  /* clear error info accumulated during probe */
3136  ata_ering_clear(&dev->ering);
3137  new_mask |= 1 << dev->devno;
3138  break;
3139  case -ENOENT:
3140  /* IDENTIFY was issued to non-existent
3141  * device. No need to reset. Just
3142  * thaw and ignore the device.
3143  */
3144  ata_eh_thaw_port(ap);
3145  break;
3146  default:
3147  goto err;
3148  }
3149  }
3150  }
3151 
3152  /* PDIAG- should have been released, ask cable type if post-reset */
3153  if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
3154  if (ap->ops->cable_detect)
3155  ap->cbl = ap->ops->cable_detect(ap);
3156  ata_force_cbl(ap);
3157  }
3158 
3159  /* Configure new devices forward such that user doesn't see
3160  * device detection messages backwards.
3161  */
3162  ata_for_each_dev(dev, link, ALL) {
3163  if (!(new_mask & (1 << dev->devno)))
3164  continue;
3165 
3166  dev->class = ehc->classes[dev->devno];
3167 
3168  if (dev->class == ATA_DEV_PMP)
3169  continue;
3170 
3171  ehc->i.flags |= ATA_EHI_PRINTINFO;
3172  rc = ata_dev_configure(dev);
3173  ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3174  if (rc) {
3175  dev->class = ATA_DEV_UNKNOWN;
3176  goto err;
3177  }
3178 
3179  spin_lock_irqsave(ap->lock, flags);
3181  spin_unlock_irqrestore(ap->lock, flags);
3182 
3183  /* new device discovered, configure xfermode */
3184  ehc->i.flags |= ATA_EHI_SETMODE;
3185  }
3186 
3187  return 0;
3188 
3189  err:
3190  *r_failed_dev = dev;
3191  DPRINTK("EXIT rc=%d\n", rc);
3192  return rc;
3193 }
3194 
3210 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3211 {
3212  struct ata_port *ap = link->ap;
3213  struct ata_device *dev;
3214  int rc;
3215 
3216  /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3217  ata_for_each_dev(dev, link, ENABLED) {
3218  if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3219  struct ata_ering_entry *ent;
3220 
3221  ent = ata_ering_top(&dev->ering);
3222  if (ent)
3223  ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3224  }
3225  }
3226 
3227  /* has private set_mode? */
3228  if (ap->ops->set_mode)
3229  rc = ap->ops->set_mode(link, r_failed_dev);
3230  else
3231  rc = ata_do_set_mode(link, r_failed_dev);
3232 
3233  /* if transfer mode has changed, set DUBIOUS_XFER on device */
3234  ata_for_each_dev(dev, link, ENABLED) {
3235  struct ata_eh_context *ehc = &link->eh_context;
3236  u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3237  u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3238 
3239  if (dev->xfer_mode != saved_xfer_mode ||
3240  ata_ncq_enabled(dev) != saved_ncq)
3241  dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3242  }
3243 
3244  return rc;
3245 }
3246 
3261 static int atapi_eh_clear_ua(struct ata_device *dev)
3262 {
3263  int i;
3264 
3265  for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3266  u8 *sense_buffer = dev->link->ap->sector_buf;
3267  u8 sense_key = 0;
3268  unsigned int err_mask;
3269 
3270  err_mask = atapi_eh_tur(dev, &sense_key);
3271  if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3272  ata_dev_warn(dev,
3273  "TEST_UNIT_READY failed (err_mask=0x%x)\n",
3274  err_mask);
3275  return -EIO;
3276  }
3277 
3278  if (!err_mask || sense_key != UNIT_ATTENTION)
3279  return 0;
3280 
3281  err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3282  if (err_mask) {
3283  ata_dev_warn(dev, "failed to clear "
3284  "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3285  return -EIO;
3286  }
3287  }
3288 
3289  ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
3290  ATA_EH_UA_TRIES);
3291 
3292  return 0;
3293 }
3294 
3312 static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3313 {
3314  struct ata_link *link = dev->link;
3315  struct ata_port *ap = link->ap;
3316  struct ata_queued_cmd *qc;
3317  struct ata_taskfile tf;
3318  unsigned int err_mask;
3319  int rc = 0;
3320 
3321  /* did flush fail for this device? */
3322  if (!ata_tag_valid(link->active_tag))
3323  return 0;
3324 
3325  qc = __ata_qc_from_tag(ap, link->active_tag);
3326  if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3327  qc->tf.command != ATA_CMD_FLUSH))
3328  return 0;
3329 
3330  /* if the device failed it, it should be reported to upper layers */
3331  if (qc->err_mask & AC_ERR_DEV)
3332  return 0;
3333 
3334  /* flush failed for some other reason, give it another shot */
3335  ata_tf_init(dev, &tf);
3336 
3337  tf.command = qc->tf.command;
3338  tf.flags |= ATA_TFLAG_DEVICE;
3340 
3341  ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
3342  tf.command, qc->err_mask);
3343 
3344  err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3345  if (!err_mask) {
3346  /*
3347  * FLUSH is complete but there's no way to
3348  * successfully complete a failed command from EH.
3349  * Making sure retry is allowed at least once and
3350  * retrying it should do the trick - whatever was in
3351  * the cache is already on the platter and this won't
3352  * cause infinite loop.
3353  */
3354  qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3355  } else {
3356  ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
3357  err_mask);
3358  rc = -EIO;
3359 
3360  /* if device failed it, report it to upper layers */
3361  if (err_mask & AC_ERR_DEV) {
3362  qc->err_mask |= AC_ERR_DEV;
3363  qc->result_tf = tf;
3364  if (!(ap->pflags & ATA_PFLAG_FROZEN))
3365  rc = 0;
3366  }
3367  }
3368  return rc;
3369 }
3370 
3388 static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3389  struct ata_device **r_failed_dev)
3390 {
3391  struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
3392  struct ata_eh_context *ehc = &link->eh_context;
3393  struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3394  enum ata_lpm_policy old_policy = link->lpm_policy;
3395  bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
3396  unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3397  unsigned int err_mask;
3398  int rc;
3399 
3400  /* if the link or host doesn't do LPM, noop */
3401  if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
3402  return 0;
3403 
3404  /*
3405  * DIPM is enabled only for MIN_POWER as some devices
3406  * misbehave when the host NACKs transition to SLUMBER. Order
3407  * device and link configurations such that the host always
3408  * allows DIPM requests.
3409  */
3410  ata_for_each_dev(dev, link, ENABLED) {
3411  bool hipm = ata_id_has_hipm(dev->id);
3412  bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
3413 
3414  /* find the first enabled and LPM enabled devices */
3415  if (!link_dev)
3416  link_dev = dev;
3417 
3418  if (!lpm_dev && (hipm || dipm))
3419  lpm_dev = dev;
3420 
3421  hints &= ~ATA_LPM_EMPTY;
3422  if (!hipm)
3423  hints &= ~ATA_LPM_HIPM;
3424 
3425  /* disable DIPM before changing link config */
3426  if (policy != ATA_LPM_MIN_POWER && dipm) {
3427  err_mask = ata_dev_set_feature(dev,
3429  if (err_mask && err_mask != AC_ERR_DEV) {
3430  ata_dev_warn(dev,
3431  "failed to disable DIPM, Emask 0x%x\n",
3432  err_mask);
3433  rc = -EIO;
3434  goto fail;
3435  }
3436  }
3437  }
3438 
3439  if (ap) {
3440  rc = ap->ops->set_lpm(link, policy, hints);
3441  if (!rc && ap->slave_link)
3442  rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
3443  } else
3444  rc = sata_pmp_set_lpm(link, policy, hints);
3445 
3446  /*
3447  * Attribute link config failure to the first (LPM) enabled
3448  * device on the link.
3449  */
3450  if (rc) {
3451  if (rc == -EOPNOTSUPP) {
3452  link->flags |= ATA_LFLAG_NO_LPM;
3453  return 0;
3454  }
3455  dev = lpm_dev ? lpm_dev : link_dev;
3456  goto fail;
3457  }
3458 
3459  /*
3460  * Low level driver acked the transition. Issue DIPM command
3461  * with the new policy set.
3462  */
3463  link->lpm_policy = policy;
3464  if (ap && ap->slave_link)
3465  ap->slave_link->lpm_policy = policy;
3466 
3467  /* host config updated, enable DIPM if transitioning to MIN_POWER */
3468  ata_for_each_dev(dev, link, ENABLED) {
3469  if (policy == ATA_LPM_MIN_POWER && !no_dipm &&
3470  ata_id_has_dipm(dev->id)) {
3471  err_mask = ata_dev_set_feature(dev,
3473  if (err_mask && err_mask != AC_ERR_DEV) {
3474  ata_dev_warn(dev,
3475  "failed to enable DIPM, Emask 0x%x\n",
3476  err_mask);
3477  rc = -EIO;
3478  goto fail;
3479  }
3480  }
3481  }
3482 
3483  return 0;
3484 
3485 fail:
3486  /* restore the old policy */
3487  link->lpm_policy = old_policy;
3488  if (ap && ap->slave_link)
3489  ap->slave_link->lpm_policy = old_policy;
3490 
3491  /* if no device or only one more chance is left, disable LPM */
3492  if (!dev || ehc->tries[dev->devno] <= 2) {
3493  ata_link_warn(link, "disabling LPM on the link\n");
3494  link->flags |= ATA_LFLAG_NO_LPM;
3495  }
3496  if (r_failed_dev)
3497  *r_failed_dev = dev;
3498  return rc;
3499 }
3500 
3502 {
3503  struct ata_device *dev;
3504  int cnt = 0;
3505 
3506  ata_for_each_dev(dev, link, ENABLED)
3507  cnt++;
3508  return cnt;
3509 }
3510 
3511 static int ata_link_nr_vacant(struct ata_link *link)
3512 {
3513  struct ata_device *dev;
3514  int cnt = 0;
3515 
3516  ata_for_each_dev(dev, link, ALL)
3517  if (dev->class == ATA_DEV_UNKNOWN)
3518  cnt++;
3519  return cnt;
3520 }
3521 
3522 static int ata_eh_skip_recovery(struct ata_link *link)
3523 {
3524  struct ata_port *ap = link->ap;
3525  struct ata_eh_context *ehc = &link->eh_context;
3526  struct ata_device *dev;
3527 
3528  /* skip disabled links */
3529  if (link->flags & ATA_LFLAG_DISABLED)
3530  return 1;
3531 
3532  /* skip if explicitly requested */
3533  if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3534  return 1;
3535 
3536  /* thaw frozen port and recover failed devices */
3537  if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3538  return 0;
3539 
3540  /* reset at least once if reset is requested */
3541  if ((ehc->i.action & ATA_EH_RESET) &&
3542  !(ehc->i.flags & ATA_EHI_DID_RESET))
3543  return 0;
3544 
3545  /* skip if class codes for all vacant slots are ATA_DEV_NONE */
3546  ata_for_each_dev(dev, link, ALL) {
3547  if (dev->class == ATA_DEV_UNKNOWN &&
3548  ehc->classes[dev->devno] != ATA_DEV_NONE)
3549  return 0;
3550  }
3551 
3552  return 1;
3553 }
3554 
3555 static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3556 {
3558  u64 now = get_jiffies_64();
3559  int *trials = void_arg;
3560 
3561  if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
3562  (ent->timestamp < now - min(now, interval)))
3563  return -1;
3564 
3565  (*trials)++;
3566  return 0;
3567 }
3568 
3569 static int ata_eh_schedule_probe(struct ata_device *dev)
3570 {
3571  struct ata_eh_context *ehc = &dev->link->eh_context;
3572  struct ata_link *link = ata_dev_phys_link(dev);
3573  int trials = 0;
3574 
3575  if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3576  (ehc->did_probe_mask & (1 << dev->devno)))
3577  return 0;
3578 
3579  ata_eh_detach_dev(dev);
3580  ata_dev_init(dev);
3581  ehc->did_probe_mask |= (1 << dev->devno);
3582  ehc->i.action |= ATA_EH_RESET;
3583  ehc->saved_xfer_mode[dev->devno] = 0;
3584  ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3585 
3586  /* the link maybe in a deep sleep, wake it up */
3587  if (link->lpm_policy > ATA_LPM_MAX_POWER) {
3588  if (ata_is_host_link(link))
3589  link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
3590  ATA_LPM_EMPTY);
3591  else
3593  ATA_LPM_EMPTY);
3594  }
3595 
3596  /* Record and count probe trials on the ering. The specific
3597  * error mask used is irrelevant. Because a successful device
3598  * detection clears the ering, this count accumulates only if
3599  * there are consecutive failed probes.
3600  *
3601  * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3602  * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3603  * forced to 1.5Gbps.
3604  *
3605  * This is to work around cases where failed link speed
3606  * negotiation results in device misdetection leading to
3607  * infinite DEVXCHG or PHRDY CHG events.
3608  */
3609  ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3610  ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3611 
3612  if (trials > ATA_EH_PROBE_TRIALS)
3613  sata_down_spd_limit(link, 1);
3614 
3615  return 1;
3616 }
3617 
3618 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3619 {
3620  struct ata_eh_context *ehc = &dev->link->eh_context;
3621 
3622  /* -EAGAIN from EH routine indicates retry without prejudice.
3623  * The requester is responsible for ensuring forward progress.
3624  */
3625  if (err != -EAGAIN)
3626  ehc->tries[dev->devno]--;
3627 
3628  switch (err) {
3629  case -ENODEV:
3630  /* device missing or wrong IDENTIFY data, schedule probing */
3631  ehc->i.probe_mask |= (1 << dev->devno);
3632  case -EINVAL:
3633  /* give it just one more chance */
3634  ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3635  case -EIO:
3636  if (ehc->tries[dev->devno] == 1) {
3637  /* This is the last chance, better to slow
3638  * down than lose it.
3639  */
3641  if (dev->pio_mode > XFER_PIO_0)
3643  }
3644  }
3645 
3646  if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3647  /* disable device if it has used up all its chances */
3648  ata_dev_disable(dev);
3649 
3650  /* detach if offline */
3652  ata_eh_detach_dev(dev);
3653 
3654  /* schedule probe if necessary */
3655  if (ata_eh_schedule_probe(dev)) {
3656  ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3657  memset(ehc->cmd_timeout_idx[dev->devno], 0,
3658  sizeof(ehc->cmd_timeout_idx[dev->devno]));
3659  }
3660 
3661  return 1;
3662  } else {
3663  ehc->i.action |= ATA_EH_RESET;
3664  return 0;
3665  }
3666 }
3667 
3690 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3691  ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3692  ata_postreset_fn_t postreset,
3693  struct ata_link **r_failed_link)
3694 {
3695  struct ata_link *link;
3696  struct ata_device *dev;
3697  int rc, nr_fails;
3698  unsigned long flags, deadline;
3699 
3700  DPRINTK("ENTER\n");
3701 
3702  /* prep for recovery */
3703  ata_for_each_link(link, ap, EDGE) {
3704  struct ata_eh_context *ehc = &link->eh_context;
3705 
3706  /* re-enable link? */
3707  if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3709  spin_lock_irqsave(ap->lock, flags);
3710  link->flags &= ~ATA_LFLAG_DISABLED;
3711  spin_unlock_irqrestore(ap->lock, flags);
3713  }
3714 
3715  ata_for_each_dev(dev, link, ALL) {
3716  if (link->flags & ATA_LFLAG_NO_RETRY)
3717  ehc->tries[dev->devno] = 1;
3718  else
3719  ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3720 
3721  /* collect port action mask recorded in dev actions */
3722  ehc->i.action |= ehc->i.dev_action[dev->devno] &
3724  ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3725 
3726  /* process hotplug request */
3727  if (dev->flags & ATA_DFLAG_DETACH)
3728  ata_eh_detach_dev(dev);
3729 
3730  /* schedule probe if necessary */
3731  if (!ata_dev_enabled(dev))
3732  ata_eh_schedule_probe(dev);
3733  }
3734  }
3735 
3736  retry:
3737  rc = 0;
3738 
3739  /* if UNLOADING, finish immediately */
3740  if (ap->pflags & ATA_PFLAG_UNLOADING)
3741  goto out;
3742 
3743  /* prep for EH */
3744  ata_for_each_link(link, ap, EDGE) {
3745  struct ata_eh_context *ehc = &link->eh_context;
3746 
3747  /* skip EH if possible. */
3748  if (ata_eh_skip_recovery(link))
3749  ehc->i.action = 0;
3750 
3751  ata_for_each_dev(dev, link, ALL)
3752  ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3753  }
3754 
3755  /* reset */
3756  ata_for_each_link(link, ap, EDGE) {
3757  struct ata_eh_context *ehc = &link->eh_context;
3758 
3759  if (!(ehc->i.action & ATA_EH_RESET))
3760  continue;
3761 
3762  rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3763  prereset, softreset, hardreset, postreset);
3764  if (rc) {
3765  ata_link_err(link, "reset failed, giving up\n");
3766  goto out;
3767  }
3768  }
3769 
3770  do {
3771  unsigned long now;
3772 
3773  /*
3774  * clears ATA_EH_PARK in eh_info and resets
3775  * ap->park_req_pending
3776  */
3777  ata_eh_pull_park_action(ap);
3778 
3779  deadline = jiffies;
3780  ata_for_each_link(link, ap, EDGE) {
3781  ata_for_each_dev(dev, link, ALL) {
3782  struct ata_eh_context *ehc = &link->eh_context;
3783  unsigned long tmp;
3784 
3785  if (dev->class != ATA_DEV_ATA)
3786  continue;
3787  if (!(ehc->i.dev_action[dev->devno] &
3788  ATA_EH_PARK))
3789  continue;
3790  tmp = dev->unpark_deadline;
3791  if (time_before(deadline, tmp))
3792  deadline = tmp;
3793  else if (time_before_eq(tmp, jiffies))
3794  continue;
3795  if (ehc->unloaded_mask & (1 << dev->devno))
3796  continue;
3797 
3798  ata_eh_park_issue_cmd(dev, 1);
3799  }
3800  }
3801 
3802  now = jiffies;
3803  if (time_before_eq(deadline, now))
3804  break;
3805 
3806  ata_eh_release(ap);
3808  deadline - now);
3809  ata_eh_acquire(ap);
3810  } while (deadline);
3811  ata_for_each_link(link, ap, EDGE) {
3812  ata_for_each_dev(dev, link, ALL) {
3813  if (!(link->eh_context.unloaded_mask &
3814  (1 << dev->devno)))
3815  continue;
3816 
3817  ata_eh_park_issue_cmd(dev, 0);
3818  ata_eh_done(link, dev, ATA_EH_PARK);
3819  }
3820  }
3821 
3822  /* the rest */
3823  nr_fails = 0;
3824  ata_for_each_link(link, ap, PMP_FIRST) {
3825  struct ata_eh_context *ehc = &link->eh_context;
3826 
3827  if (sata_pmp_attached(ap) && ata_is_host_link(link))
3828  goto config_lpm;
3829 
3830  /* revalidate existing devices and attach new ones */
3831  rc = ata_eh_revalidate_and_attach(link, &dev);
3832  if (rc)
3833  goto rest_fail;
3834 
3835  /* if PMP got attached, return, pmp EH will take care of it */
3836  if (link->device->class == ATA_DEV_PMP) {
3837  ehc->i.action = 0;
3838  return 0;
3839  }
3840 
3841  /* configure transfer mode if necessary */
3842  if (ehc->i.flags & ATA_EHI_SETMODE) {
3843  rc = ata_set_mode(link, &dev);
3844  if (rc)
3845  goto rest_fail;
3846  ehc->i.flags &= ~ATA_EHI_SETMODE;
3847  }
3848 
3849  /* If reset has been issued, clear UA to avoid
3850  * disrupting the current users of the device.
3851  */
3852  if (ehc->i.flags & ATA_EHI_DID_RESET) {
3853  ata_for_each_dev(dev, link, ALL) {
3854  if (dev->class != ATA_DEV_ATAPI)
3855  continue;
3856  rc = atapi_eh_clear_ua(dev);
3857  if (rc)
3858  goto rest_fail;
3859  }
3860  }
3861 
3862  /* retry flush if necessary */
3863  ata_for_each_dev(dev, link, ALL) {
3864  if (dev->class != ATA_DEV_ATA)
3865  continue;
3866  rc = ata_eh_maybe_retry_flush(dev);
3867  if (rc)
3868  goto rest_fail;
3869  }
3870 
3871  config_lpm:
3872  /* configure link power saving */
3873  if (link->lpm_policy != ap->target_lpm_policy) {
3874  rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
3875  if (rc)
3876  goto rest_fail;
3877  }
3878 
3879  /* this link is okay now */
3880  ehc->i.flags = 0;
3881  continue;
3882 
3883  rest_fail:
3884  nr_fails++;
3885  if (dev)
3886  ata_eh_handle_dev_fail(dev, rc);
3887 
3888  if (ap->pflags & ATA_PFLAG_FROZEN) {
3889  /* PMP reset requires working host port.
3890  * Can't retry if it's frozen.
3891  */
3892  if (sata_pmp_attached(ap))
3893  goto out;
3894  break;
3895  }
3896  }
3897 
3898  if (nr_fails)
3899  goto retry;
3900 
3901  out:
3902  if (rc && r_failed_link)
3903  *r_failed_link = link;
3904 
3905  DPRINTK("EXIT, rc=%d\n", rc);
3906  return rc;
3907 }
3908 
3919 void ata_eh_finish(struct ata_port *ap)
3920 {
3921  int tag;
3922 
3923  /* retry or finish qcs */
3924  for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3925  struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3926 
3927  if (!(qc->flags & ATA_QCFLAG_FAILED))
3928  continue;
3929 
3930  if (qc->err_mask) {
3931  /* FIXME: Once EH migration is complete,
3932  * generate sense data in this function,
3933  * considering both err_mask and tf.
3934  */
3935  if (qc->flags & ATA_QCFLAG_RETRY)
3936  ata_eh_qc_retry(qc);
3937  else
3938  ata_eh_qc_complete(qc);
3939  } else {
3940  if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3941  ata_eh_qc_complete(qc);
3942  } else {
3943  /* feed zero TF to sense generation */
3944  memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3945  ata_eh_qc_retry(qc);
3946  }
3947  }
3948  }
3949 
3950  /* make sure nr_active_links is zero after EH */
3951  WARN_ON(ap->nr_active_links);
3952  ap->nr_active_links = 0;
3953 }
3954 
3969 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3970  ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3971  ata_postreset_fn_t postreset)
3972 {
3973  struct ata_device *dev;
3974  int rc;
3975 
3976  ata_eh_autopsy(ap);
3977  ata_eh_report(ap);
3978 
3979  rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3980  NULL);
3981  if (rc) {
3982  ata_for_each_dev(dev, &ap->link, ALL)
3983  ata_dev_disable(dev);
3984  }
3985 
3986  ata_eh_finish(ap);
3987 }
3988 
3999 {
4000  struct ata_port_operations *ops = ap->ops;
4002 
4003  /* ignore built-in hardreset if SCR access is not available */
4004  if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
4005  hardreset = NULL;
4006 
4007  ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
4008 }
4009 
4010 #ifdef CONFIG_PM
4011 
4020 static void ata_eh_handle_port_suspend(struct ata_port *ap)
4021 {
4022  unsigned long flags;
4023  int rc = 0;
4024 
4025  /* are we suspending? */
4026  spin_lock_irqsave(ap->lock, flags);
4027  if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4028  ap->pm_mesg.event == PM_EVENT_ON) {
4029  spin_unlock_irqrestore(ap->lock, flags);
4030  return;
4031  }
4032  spin_unlock_irqrestore(ap->lock, flags);
4033 
4035 
4036  /* tell ACPI we're suspending */
4037  rc = ata_acpi_on_suspend(ap);
4038  if (rc)
4039  goto out;
4040 
4041  /* suspend */
4042  ata_eh_freeze_port(ap);
4043 
4044  if (ap->ops->port_suspend)
4045  rc = ap->ops->port_suspend(ap, ap->pm_mesg);
4046 
4048  out:
4049  /* report result */
4050  spin_lock_irqsave(ap->lock, flags);
4051 
4052  ap->pflags &= ~ATA_PFLAG_PM_PENDING;
4053  if (rc == 0)
4054  ap->pflags |= ATA_PFLAG_SUSPENDED;
4055  else if (ap->pflags & ATA_PFLAG_FROZEN)
4057 
4058  if (ap->pm_result) {
4059  *ap->pm_result = rc;
4060  ap->pm_result = NULL;
4061  }
4062 
4063  spin_unlock_irqrestore(ap->lock, flags);
4064 
4065  return;
4066 }
4067 
4077 static void ata_eh_handle_port_resume(struct ata_port *ap)
4078 {
4079  struct ata_link *link;
4080  struct ata_device *dev;
4081  unsigned long flags;
4082  int rc = 0;
4083 
4084  /* are we resuming? */
4085  spin_lock_irqsave(ap->lock, flags);
4086  if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4087  ap->pm_mesg.event != PM_EVENT_ON) {
4088  spin_unlock_irqrestore(ap->lock, flags);
4089  return;
4090  }
4091  spin_unlock_irqrestore(ap->lock, flags);
4092 
4094 
4095  /*
4096  * Error timestamps are in jiffies which doesn't run while
4097  * suspended and PHY events during resume isn't too uncommon.
4098  * When the two are combined, it can lead to unnecessary speed
4099  * downs if the machine is suspended and resumed repeatedly.
4100  * Clear error history.
4101  */
4102  ata_for_each_link(link, ap, HOST_FIRST)
4103  ata_for_each_dev(dev, link, ALL)
4104  ata_ering_clear(&dev->ering);
4105 
4107 
4108  if (ap->ops->port_resume)
4109  rc = ap->ops->port_resume(ap);
4110 
4111  /* tell ACPI that we're resuming */
4112  ata_acpi_on_resume(ap);
4113 
4114  /* report result */
4115  spin_lock_irqsave(ap->lock, flags);
4116  ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
4117  if (ap->pm_result) {
4118  *ap->pm_result = rc;
4119  ap->pm_result = NULL;
4120  }
4121  spin_unlock_irqrestore(ap->lock, flags);
4122 }
4123 #endif /* CONFIG_PM */