Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
aerdrv_core.c
Go to the documentation of this file.
1 /*
2  * drivers/pci/pcie/aer/aerdrv_core.c
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License. See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * This file implements the core part of PCI-Express AER. When an pci-express
9  * error is delivered, an error message will be collected and printed to
10  * console, then, an error recovery procedure will be executed by following
11  * the pci error recovery rules.
12  *
13  * Copyright (C) 2006 Intel Corp.
14  * Tom Long Nguyen ([email protected])
15  * Zhang Yanmin ([email protected])
16  *
17  */
18 
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/pm.h>
24 #include <linux/suspend.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
27 #include <linux/kfifo.h>
28 #include "aerdrv.h"
29 
30 static bool forceload;
31 static bool nosourceid;
32 module_param(forceload, bool, 0);
33 module_param(nosourceid, bool, 0);
34 
35 #define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
36  PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
37 
39 {
40  if (pcie_aer_get_firmware_first(dev))
41  return -EIO;
42 
44  return -EIO;
45 
46  return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
47 }
49 
51 {
52  if (pcie_aer_get_firmware_first(dev))
53  return -EIO;
54 
55  return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
57 }
59 
61 {
62  int pos;
63  u32 status;
64 
66  if (!pos)
67  return -EIO;
68 
69  pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
70  if (status)
71  pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
72 
73  return 0;
74 }
76 
82 static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
83 {
85  e_info->dev[e_info->error_dev_num] = dev;
86  e_info->error_dev_num++;
87  return 0;
88  }
89  return -ENOSPC;
90 }
91 
92 #define PCI_BUS(x) (((x) >> 8) & 0xff)
93 
99 static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
100 {
101  int pos;
102  u32 status, mask;
103  u16 reg16;
104 
105  /*
106  * When bus id is equal to 0, it might be a bad id
107  * reported by root port.
108  */
109  if (!nosourceid && (PCI_BUS(e_info->id) != 0)) {
110  /* Device ID match? */
111  if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
112  return true;
113 
114  /* Continue id comparing if there is no multiple error */
115  if (!e_info->multi_error_valid)
116  return false;
117  }
118 
119  /*
120  * When either
121  * 1) nosourceid==y;
122  * 2) bus id is equal to 0. Some ports might lose the bus
123  * id of error source id;
124  * 3) There are multiple errors and prior id comparing fails;
125  * We check AER status registers to find possible reporter.
126  */
127  if (atomic_read(&dev->enable_cnt) == 0)
128  return false;
129 
130  /* Check if AER is enabled */
132  if (!(reg16 & PCI_EXP_AER_FLAGS))
133  return false;
134 
136  if (!pos)
137  return false;
138 
139  /* Check if error is recorded */
140  if (e_info->severity == AER_CORRECTABLE) {
141  pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
142  pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
143  } else {
144  pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
145  pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
146  }
147  if (status & ~mask)
148  return true;
149 
150  return false;
151 }
152 
153 static int find_device_iter(struct pci_dev *dev, void *data)
154 {
155  struct aer_err_info *e_info = (struct aer_err_info *)data;
156 
157  if (is_error_source(dev, e_info)) {
158  /* List this device */
159  if (add_error_device(e_info, dev)) {
160  /* We cannot handle more... Stop iteration */
161  /* TODO: Should print error message here? */
162  return 1;
163  }
164 
165  /* If there is only a single error, stop iteration */
166  if (!e_info->multi_error_valid)
167  return 1;
168  }
169  return 0;
170 }
171 
184 static bool find_source_device(struct pci_dev *parent,
185  struct aer_err_info *e_info)
186 {
187  struct pci_dev *dev = parent;
188  int result;
189 
190  /* Must reset in this function */
191  e_info->error_dev_num = 0;
192 
193  /* Is Root Port an agent that sends error message? */
194  result = find_device_iter(dev, e_info);
195  if (result)
196  return true;
197 
198  pci_walk_bus(parent->subordinate, find_device_iter, e_info);
199 
200  if (!e_info->error_dev_num) {
201  dev_printk(KERN_DEBUG, &parent->dev,
202  "can't find device of ID%04x\n",
203  e_info->id);
204  return false;
205  }
206  return true;
207 }
208 
209 static int report_error_detected(struct pci_dev *dev, void *data)
210 {
211  pci_ers_result_t vote;
212  const struct pci_error_handlers *err_handler;
213  struct aer_broadcast_data *result_data;
214  result_data = (struct aer_broadcast_data *) data;
215 
216  device_lock(&dev->dev);
217  dev->error_state = result_data->state;
218 
219  if (!dev->driver ||
220  !dev->driver->err_handler ||
221  !dev->driver->err_handler->error_detected) {
222  if (result_data->state == pci_channel_io_frozen &&
223  !(dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) {
224  /*
225  * In case of fatal recovery, if one of down-
226  * stream device has no driver. We might be
227  * unable to recover because a later insmod
228  * of a driver for this device is unaware of
229  * its hw state.
230  */
231  dev_printk(KERN_DEBUG, &dev->dev, "device has %s\n",
232  dev->driver ?
233  "no AER-aware driver" : "no driver");
234  }
235  goto out;
236  }
237 
238  err_handler = dev->driver->err_handler;
239  vote = err_handler->error_detected(dev, result_data->state);
240  result_data->result = merge_result(result_data->result, vote);
241 out:
242  device_unlock(&dev->dev);
243  return 0;
244 }
245 
246 static int report_mmio_enabled(struct pci_dev *dev, void *data)
247 {
248  pci_ers_result_t vote;
249  const struct pci_error_handlers *err_handler;
250  struct aer_broadcast_data *result_data;
251  result_data = (struct aer_broadcast_data *) data;
252 
253  device_lock(&dev->dev);
254  if (!dev->driver ||
255  !dev->driver->err_handler ||
256  !dev->driver->err_handler->mmio_enabled)
257  goto out;
258 
259  err_handler = dev->driver->err_handler;
260  vote = err_handler->mmio_enabled(dev);
261  result_data->result = merge_result(result_data->result, vote);
262 out:
263  device_unlock(&dev->dev);
264  return 0;
265 }
266 
267 static int report_slot_reset(struct pci_dev *dev, void *data)
268 {
269  pci_ers_result_t vote;
270  const struct pci_error_handlers *err_handler;
271  struct aer_broadcast_data *result_data;
272  result_data = (struct aer_broadcast_data *) data;
273 
274  device_lock(&dev->dev);
275  if (!dev->driver ||
276  !dev->driver->err_handler ||
277  !dev->driver->err_handler->slot_reset)
278  goto out;
279 
280  err_handler = dev->driver->err_handler;
281  vote = err_handler->slot_reset(dev);
282  result_data->result = merge_result(result_data->result, vote);
283 out:
284  device_unlock(&dev->dev);
285  return 0;
286 }
287 
288 static int report_resume(struct pci_dev *dev, void *data)
289 {
290  const struct pci_error_handlers *err_handler;
291 
292  device_lock(&dev->dev);
294 
295  if (!dev->driver ||
296  !dev->driver->err_handler ||
297  !dev->driver->err_handler->resume)
298  goto out;
299 
300  err_handler = dev->driver->err_handler;
301  err_handler->resume(dev);
302 out:
303  device_unlock(&dev->dev);
304  return 0;
305 }
306 
318 static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
320  char *error_mesg,
321  int (*cb)(struct pci_dev *, void *))
322 {
323  struct aer_broadcast_data result_data;
324 
325  dev_printk(KERN_DEBUG, &dev->dev, "broadcast %s message\n", error_mesg);
326  result_data.state = state;
327  if (cb == report_error_detected)
328  result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
329  else
330  result_data.result = PCI_ERS_RESULT_RECOVERED;
331 
332  if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
333  /*
334  * If the error is reported by a bridge, we think this error
335  * is related to the downstream link of the bridge, so we
336  * do error recovery on all subordinates of the bridge instead
337  * of the bridge and clear the error status of the bridge.
338  */
339  if (cb == report_error_detected)
340  dev->error_state = state;
341  pci_walk_bus(dev->subordinate, cb, &result_data);
342  if (cb == report_resume) {
345  }
346  } else {
347  /*
348  * If the error is reported by an end point, we think this
349  * error is related to the upstream link of the end point.
350  */
351  pci_walk_bus(dev->bus, cb, &result_data);
352  }
353 
354  return result_data.result;
355 }
356 
364 {
365  u16 p2p_ctrl;
366 
367  /* Assert Secondary Bus Reset */
368  pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl);
369  p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
370  pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
371 
372  /*
373  * we should send hot reset message for 2ms to allow it time to
374  * propagate to all downstream ports
375  */
376  msleep(2);
377 
378  /* De-assert Secondary Bus Reset */
379  p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
380  pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
381 
382  /*
383  * System software must wait for at least 100ms from the end
384  * of a reset of one or more device before it is permitted
385  * to issue Configuration Requests to those devices.
386  */
387  msleep(200);
388 }
389 
396 static pci_ers_result_t default_downstream_reset_link(struct pci_dev *dev)
397 {
399  dev_printk(KERN_DEBUG, &dev->dev,
400  "Downstream Port link has been reset\n");
402 }
403 
404 static int find_aer_service_iter(struct device *device, void *data)
405 {
406  struct pcie_port_service_driver *service_driver, **drv;
407 
408  drv = (struct pcie_port_service_driver **) data;
409 
410  if (device->bus == &pcie_port_bus_type && device->driver) {
411  service_driver = to_service_driver(device->driver);
412  if (service_driver->service == PCIE_PORT_SERVICE_AER) {
413  *drv = service_driver;
414  return 1;
415  }
416  }
417 
418  return 0;
419 }
420 
421 static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
422 {
423  struct pcie_port_service_driver *drv = NULL;
424 
425  device_for_each_child(&dev->dev, &drv, find_aer_service_iter);
426 
427  return drv;
428 }
429 
430 static pci_ers_result_t reset_link(struct pci_dev *dev)
431 {
432  struct pci_dev *udev;
435 
436  if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
437  /* Reset this port for all subordinates */
438  udev = dev;
439  } else {
440  /* Reset the upstream component (likely downstream port) */
441  udev = dev->bus->self;
442  }
443 
444  /* Use the aer driver of the component firstly */
445  driver = find_aer_service(udev);
446 
447  if (driver && driver->reset_link) {
448  status = driver->reset_link(udev);
449  } else if (pci_pcie_type(udev) == PCI_EXP_TYPE_DOWNSTREAM) {
450  status = default_downstream_reset_link(udev);
451  } else {
452  dev_printk(KERN_DEBUG, &dev->dev,
453  "no link-reset support at upstream device %s\n",
454  pci_name(udev));
456  }
457 
458  if (status != PCI_ERS_RESULT_RECOVERED) {
459  dev_printk(KERN_DEBUG, &dev->dev,
460  "link reset at upstream device %s failed\n",
461  pci_name(udev));
463  }
464 
465  return status;
466 }
467 
477 static void do_recovery(struct pci_dev *dev, int severity)
478 {
481 
482  if (severity == AER_FATAL)
483  state = pci_channel_io_frozen;
484  else
485  state = pci_channel_io_normal;
486 
487  status = broadcast_error_message(dev,
488  state,
489  "error_detected",
490  report_error_detected);
491 
492  if (severity == AER_FATAL) {
493  result = reset_link(dev);
494  if (result != PCI_ERS_RESULT_RECOVERED)
495  goto failed;
496  }
497 
498  if (status == PCI_ERS_RESULT_CAN_RECOVER)
499  status = broadcast_error_message(dev,
500  state,
501  "mmio_enabled",
502  report_mmio_enabled);
503 
504  if (status == PCI_ERS_RESULT_NEED_RESET) {
505  /*
506  * TODO: Should call platform-specific
507  * functions to reset slot before calling
508  * drivers' slot_reset callbacks?
509  */
510  status = broadcast_error_message(dev,
511  state,
512  "slot_reset",
513  report_slot_reset);
514  }
515 
516  if (status != PCI_ERS_RESULT_RECOVERED)
517  goto failed;
518 
519  broadcast_error_message(dev,
520  state,
521  "resume",
522  report_resume);
523 
524  dev_info(&dev->dev, "AER: Device recovery successful\n");
525  return;
526 
527 failed:
528  /* TODO: Should kernel panic here? */
529  dev_info(&dev->dev, "AER: Device recovery failed\n");
530 }
531 
540 static void handle_error_source(struct pcie_device *aerdev,
541  struct pci_dev *dev,
542  struct aer_err_info *info)
543 {
544  int pos;
545 
546  if (info->severity == AER_CORRECTABLE) {
547  /*
548  * Correctable error does not need software intevention.
549  * No need to go through error recovery process.
550  */
552  if (pos)
553  pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
554  info->status);
555  } else
556  do_recovery(dev, info->severity);
557 }
558 
559 #ifdef CONFIG_ACPI_APEI_PCIEAER
560 static void aer_recover_work_func(struct work_struct *work);
561 
562 #define AER_RECOVER_RING_ORDER 4
563 #define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER)
564 
565 struct aer_recover_entry
566 {
567  u8 bus;
568  u8 devfn;
569  u16 domain;
570  int severity;
571 };
572 
573 static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
574  AER_RECOVER_RING_SIZE);
575 /*
576  * Mutual exclusion for writers of aer_recover_ring, reader side don't
577  * need lock, because there is only one reader and lock is not needed
578  * between reader and writer.
579  */
580 static DEFINE_SPINLOCK(aer_recover_ring_lock);
581 static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
582 
583 void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
584  int severity)
585 {
586  unsigned long flags;
587  struct aer_recover_entry entry = {
588  .bus = bus,
589  .devfn = devfn,
590  .domain = domain,
591  .severity = severity,
592  };
593 
594  spin_lock_irqsave(&aer_recover_ring_lock, flags);
595  if (kfifo_put(&aer_recover_ring, &entry))
596  schedule_work(&aer_recover_work);
597  else
598  pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
599  domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
600  spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
601 }
603 
604 static void aer_recover_work_func(struct work_struct *work)
605 {
606  struct aer_recover_entry entry;
607  struct pci_dev *pdev;
608 
609  while (kfifo_get(&aer_recover_ring, &entry)) {
610  pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
611  entry.devfn);
612  if (!pdev) {
613  pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
614  entry.domain, entry.bus,
615  PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
616  continue;
617  }
618  do_recovery(pdev, entry.severity);
619  }
620 }
621 #endif
622 
632 static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
633 {
634  int pos, temp;
635 
636  /* Must reset in this function */
637  info->status = 0;
638  info->tlp_header_valid = 0;
639 
641 
642  /* The device might not support AER */
643  if (!pos)
644  return 1;
645 
646  if (info->severity == AER_CORRECTABLE) {
647  pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
648  &info->status);
649  pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK,
650  &info->mask);
651  if (!(info->status & ~info->mask))
652  return 0;
653  } else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE ||
654  info->severity == AER_NONFATAL) {
655 
656  /* Link is still healthy for IO reads */
657  pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
658  &info->status);
659  pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK,
660  &info->mask);
661  if (!(info->status & ~info->mask))
662  return 0;
663 
664  /* Get First Error Pointer */
665  pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp);
666  info->first_error = PCI_ERR_CAP_FEP(temp);
667 
668  if (info->status & AER_LOG_TLP_MASKS) {
669  info->tlp_header_valid = 1;
670  pci_read_config_dword(dev,
671  pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
672  pci_read_config_dword(dev,
673  pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
674  pci_read_config_dword(dev,
675  pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
676  pci_read_config_dword(dev,
677  pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
678  }
679  }
680 
681  return 1;
682 }
683 
684 static inline void aer_process_err_devices(struct pcie_device *p_device,
685  struct aer_err_info *e_info)
686 {
687  int i;
688 
689  /* Report all before handle them, not to lost records by reset etc. */
690  for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
691  if (get_device_error_info(e_info->dev[i], e_info))
692  aer_print_error(e_info->dev[i], e_info);
693  }
694  for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
695  if (get_device_error_info(e_info->dev[i], e_info))
696  handle_error_source(p_device, e_info->dev[i], e_info);
697  }
698 }
699 
705 static void aer_isr_one_error(struct pcie_device *p_device,
706  struct aer_err_source *e_src)
707 {
708  struct aer_err_info *e_info;
709 
710  /* struct aer_err_info might be big, so we allocate it with slab */
711  e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
712  if (!e_info) {
713  dev_printk(KERN_DEBUG, &p_device->port->dev,
714  "Can't allocate mem when processing AER errors\n");
715  return;
716  }
717 
718  /*
719  * There is a possibility that both correctable error and
720  * uncorrectable error being logged. Report correctable error first.
721  */
722  if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
723  e_info->id = ERR_COR_ID(e_src->id);
724  e_info->severity = AER_CORRECTABLE;
725 
726  if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
727  e_info->multi_error_valid = 1;
728  else
729  e_info->multi_error_valid = 0;
730 
731  aer_print_port_info(p_device->port, e_info);
732 
733  if (find_source_device(p_device->port, e_info))
734  aer_process_err_devices(p_device, e_info);
735  }
736 
737  if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
738  e_info->id = ERR_UNCOR_ID(e_src->id);
739 
740  if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
741  e_info->severity = AER_FATAL;
742  else
743  e_info->severity = AER_NONFATAL;
744 
746  e_info->multi_error_valid = 1;
747  else
748  e_info->multi_error_valid = 0;
749 
750  aer_print_port_info(p_device->port, e_info);
751 
752  if (find_source_device(p_device->port, e_info))
753  aer_process_err_devices(p_device, e_info);
754  }
755 
756  kfree(e_info);
757 }
758 
768 static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
769 {
770  unsigned long flags;
771 
772  /* Lock access to Root error producer/consumer index */
773  spin_lock_irqsave(&rpc->e_lock, flags);
774  if (rpc->prod_idx == rpc->cons_idx) {
775  spin_unlock_irqrestore(&rpc->e_lock, flags);
776  return 0;
777  }
778 
779  *e_src = rpc->e_sources[rpc->cons_idx];
780  rpc->cons_idx++;
781  if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
782  rpc->cons_idx = 0;
783  spin_unlock_irqrestore(&rpc->e_lock, flags);
784 
785  return 1;
786 }
787 
794 void aer_isr(struct work_struct *work)
795 {
796  struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
797  struct pcie_device *p_device = rpc->rpd;
798  struct aer_err_source uninitialized_var(e_src);
799 
800  mutex_lock(&rpc->rpc_mutex);
801  while (get_e_source(rpc, &e_src))
802  aer_isr_one_error(p_device, &e_src);
803  mutex_unlock(&rpc->rpc_mutex);
804 
805  wake_up(&rpc->wait_release);
806 }
807 
814 int aer_init(struct pcie_device *dev)
815 {
816  if (forceload) {
817  dev_printk(KERN_DEBUG, &dev->device,
818  "aerdrv forceload requested.\n");
819  pcie_aer_force_firmware_first(dev->port, 0);
820  }
821  return 0;
822 }