Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mce-severity.c
Go to the documentation of this file.
1 /*
2  * MCE grading rules.
3  * Copyright 2008, 2009 Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License
7  * as published by the Free Software Foundation; version 2
8  * of the License.
9  *
10  * Author: Andi Kleen
11  */
12 #include <linux/kernel.h>
13 #include <linux/seq_file.h>
14 #include <linux/init.h>
15 #include <linux/debugfs.h>
16 #include <asm/mce.h>
17 
18 #include "mce-internal.h"
19 
20 /*
21  * Grade an mce by severity. In general the most severe ones are processed
22  * first. Since there are quite a lot of combinations test the bits in a
23  * table-driven way. The rules are simply processed in order, first
24  * match wins.
25  *
26  * Note this is only used for machine check exceptions, the corrected
27  * errors use much simpler rules. The exceptions still check for the corrected
28  * errors, but only to leave them alone for the CMCI handler (except for
29  * panic situations)
30  */
31 
32 enum context { IN_KERNEL = 1, IN_USER = 2 };
33 enum ser { SER_REQUIRED = 1, NO_SER = 2 };
34 
35 static struct severity {
36  u64 mask;
37  u64 result;
38  unsigned char sev;
39  unsigned char mcgmask;
40  unsigned char mcgres;
41  unsigned char ser;
42  unsigned char context;
43  unsigned char covered;
44  char *msg;
45 } severities[] = {
46 #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
47 #define KERNEL .context = IN_KERNEL
48 #define USER .context = IN_USER
49 #define SER .ser = SER_REQUIRED
50 #define NOSER .ser = NO_SER
51 #define BITCLR(x) .mask = x, .result = 0
52 #define BITSET(x) .mask = x, .result = x
53 #define MCGMASK(x, y) .mcgmask = x, .mcgres = y
54 #define MASK(x, y) .mask = x, .result = y
55 #define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S)
56 #define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR)
57 #define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV)
58 
59  MCESEV(
60  NO, "Invalid",
62  ),
63  MCESEV(
64  NO, "Not enabled",
66  ),
67  MCESEV(
68  PANIC, "Processor context corrupt",
70  ),
71  /* When MCIP is not set something is very confused */
72  MCESEV(
73  PANIC, "MCIP not set in MCA handler",
75  ),
76  /* Neither return not error IP -- no chance to recover -> PANIC */
77  MCESEV(
78  PANIC, "Neither restart nor error IP",
80  ),
81  MCESEV(
82  PANIC, "In kernel and no restart IP",
83  KERNEL, MCGMASK(MCG_STATUS_RIPV, 0)
84  ),
85  MCESEV(
86  KEEP, "Corrected error",
87  NOSER, BITCLR(MCI_STATUS_UC)
88  ),
89 
90  /* ignore OVER for UCNA */
91  MCESEV(
92  KEEP, "Uncorrected no action required",
93  SER, MASK(MCI_UC_SAR, MCI_STATUS_UC)
94  ),
95  MCESEV(
96  PANIC, "Illegal combination (UCNA with AR=1)",
97  SER,
99  ),
100  MCESEV(
101  KEEP, "Non signalled machine check",
102  SER, BITCLR(MCI_STATUS_S)
103  ),
104 
105  MCESEV(
106  PANIC, "Action required with lost events",
107  SER, BITSET(MCI_STATUS_OVER|MCI_UC_SAR)
108  ),
109 
110  /* known AR MCACODs: */
111 #ifdef CONFIG_MEMORY_FAILURE
112  MCESEV(
113  KEEP, "HT thread notices Action required: data load error",
114  SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
116  ),
117  MCESEV(
118  AR, "Action required: data load error",
119  SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
120  USER
121  ),
122  MCESEV(
123  KEEP, "HT thread notices Action required: instruction fetch error",
124  SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
126  ),
127  MCESEV(
128  AR, "Action required: instruction fetch error",
129  SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
130  USER
131  ),
132 #endif
133  MCESEV(
134  PANIC, "Action required: unknown MCACOD",
135  SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_SAR)
136  ),
137 
138  /* known AO MCACODs: */
139  MCESEV(
140  AO, "Action optional: memory scrubbing error",
141  SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD_SCRUBMSK, MCI_UC_S|MCACOD_SCRUB)
142  ),
143  MCESEV(
144  AO, "Action optional: last level cache writeback error",
145  SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD, MCI_UC_S|MCACOD_L3WB)
146  ),
147  MCESEV(
148  SOME, "Action optional: unknown MCACOD",
149  SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S)
150  ),
151  MCESEV(
152  SOME, "Action optional with lost events",
153  SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_OVER|MCI_UC_S)
154  ),
155 
156  MCESEV(
157  PANIC, "Overflowed uncorrected",
159  ),
160  MCESEV(
161  UC, "Uncorrected",
163  ),
164  MCESEV(
165  SOME, "No match",
166  BITSET(0)
167  ) /* always matches. keep at end */
168 };
169 
170 /*
171  * If mcgstatus indicated that ip/cs on the stack were
172  * no good, then "m->cs" will be zero and we will have
173  * to assume the worst case (IN_KERNEL) as we actually
174  * have no idea what we were executing when the machine
175  * check hit.
176  * If we do have a good "m->cs" (or a faked one in the
177  * case we were executing in VM86 mode) we can use it to
178  * distinguish an exception taken in user from from one
179  * taken in the kernel.
180  */
181 static int error_context(struct mce *m)
182 {
183  return ((m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
184 }
185 
186 int mce_severity(struct mce *m, int tolerant, char **msg)
187 {
188  enum context ctx = error_context(m);
189  struct severity *s;
190 
191  for (s = severities;; s++) {
192  if ((m->status & s->mask) != s->result)
193  continue;
194  if ((m->mcgstatus & s->mcgmask) != s->mcgres)
195  continue;
196  if (s->ser == SER_REQUIRED && !mce_ser)
197  continue;
198  if (s->ser == NO_SER && mce_ser)
199  continue;
200  if (s->context && ctx != s->context)
201  continue;
202  if (msg)
203  *msg = s->msg;
204  s->covered = 1;
205  if (s->sev >= MCE_UC_SEVERITY && ctx == IN_KERNEL) {
206  if (panic_on_oops || tolerant < 1)
207  return MCE_PANIC_SEVERITY;
208  }
209  return s->sev;
210  }
211 }
212 
213 #ifdef CONFIG_DEBUG_FS
214 static void *s_start(struct seq_file *f, loff_t *pos)
215 {
216  if (*pos >= ARRAY_SIZE(severities))
217  return NULL;
218  return &severities[*pos];
219 }
220 
221 static void *s_next(struct seq_file *f, void *data, loff_t *pos)
222 {
223  if (++(*pos) >= ARRAY_SIZE(severities))
224  return NULL;
225  return &severities[*pos];
226 }
227 
228 static void s_stop(struct seq_file *f, void *data)
229 {
230 }
231 
232 static int s_show(struct seq_file *f, void *data)
233 {
234  struct severity *ser = data;
235  seq_printf(f, "%d\t%s\n", ser->covered, ser->msg);
236  return 0;
237 }
238 
239 static const struct seq_operations severities_seq_ops = {
240  .start = s_start,
241  .next = s_next,
242  .stop = s_stop,
243  .show = s_show,
244 };
245 
246 static int severities_coverage_open(struct inode *inode, struct file *file)
247 {
248  return seq_open(file, &severities_seq_ops);
249 }
250 
251 static ssize_t severities_coverage_write(struct file *file,
252  const char __user *ubuf,
253  size_t count, loff_t *ppos)
254 {
255  int i;
256  for (i = 0; i < ARRAY_SIZE(severities); i++)
257  severities[i].covered = 0;
258  return count;
259 }
260 
261 static const struct file_operations severities_coverage_fops = {
262  .open = severities_coverage_open,
263  .release = seq_release,
264  .read = seq_read,
265  .write = severities_coverage_write,
266  .llseek = seq_lseek,
267 };
268 
269 static int __init severities_debugfs_init(void)
270 {
271  struct dentry *dmce, *fsev;
272 
273  dmce = mce_get_debugfs_dir();
274  if (!dmce)
275  goto err_out;
276 
277  fsev = debugfs_create_file("severities-coverage", 0444, dmce, NULL,
278  &severities_coverage_fops);
279  if (!fsev)
280  goto err_out;
281 
282  return 0;
283 
284 err_out:
285  return -ENOMEM;
286 }
287 late_initcall(severities_debugfs_init);
288 #endif /* CONFIG_DEBUG_FS */