36 #include <linux/types.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
40 #include <linux/device.h>
43 #include <linux/capability.h>
44 #include <linux/poll.h>
45 #include <linux/sched.h>
48 #include <xen/events.h>
51 #include <asm/xen/hypercall.h>
52 #include <asm/xen/hypervisor.h>
54 #define XEN_MCELOG "xen_mcelog: "
65 .recordlen =
sizeof(
struct xen_mce),
69 static int xen_mce_chrdev_open_count;
70 static int xen_mce_chrdev_open_exclu;
76 spin_lock(&xen_mce_chrdev_state_lock);
78 if (xen_mce_chrdev_open_exclu ||
80 spin_unlock(&xen_mce_chrdev_state_lock);
86 xen_mce_chrdev_open_exclu = 1;
87 xen_mce_chrdev_open_count++;
89 spin_unlock(&xen_mce_chrdev_state_lock);
94 static int xen_mce_chrdev_release(
struct inode *inode,
struct file *file)
96 spin_lock(&xen_mce_chrdev_state_lock);
98 xen_mce_chrdev_open_count--;
99 xen_mce_chrdev_open_exclu = 0;
101 spin_unlock(&xen_mce_chrdev_state_lock);
106 static ssize_t xen_mce_chrdev_read(
struct file *filp,
char __user *ubuf,
107 size_t usize, loff_t *off)
115 num = xen_mcelog.
next;
123 for (i = 0; i < num; i++) {
139 return err ? err : buf - ubuf;
142 static unsigned int xen_mce_chrdev_poll(
struct file *file,
poll_table *
wait)
144 poll_wait(file, &xen_mce_chrdev_wait, wait);
152 static long xen_mce_chrdev_ioctl(
struct file *
f,
unsigned int cmd,
169 flags = xen_mcelog.
flags;
170 }
while (
cmpxchg(&xen_mcelog.
flags, flags, 0) != flags);
180 .open = xen_mce_chrdev_open,
181 .release = xen_mce_chrdev_release,
182 .read = xen_mce_chrdev_read,
183 .poll = xen_mce_chrdev_poll,
184 .unlocked_ioctl = xen_mce_chrdev_ioctl,
188 static struct miscdevice xen_mce_chrdev_device = {
201 entry = xen_mcelog.
next;
210 (
unsigned long *)&xen_mcelog.
flags);
219 static int convert_log(
struct mc_info *mi)
240 for (i = 0; i <
ncpus; i++)
262 if ((!mic) || (mic->
size == 0) ||
295 mc_op.u.mc_fetch.flags =
flags;
296 ret = HYPERVISOR_mca(&mc_op);
300 "urgnet" :
"nonurgent");
308 ret = convert_log(&g_mi);
311 "Failed to convert this error log, "
312 "continue acking it anyway\n");
315 ret = HYPERVISOR_mca(&mc_op);
318 "Failed to ack previous error log\n");
338 "Failed to handle urgent mc_info queue, "
339 "continue handling nonurgent mc_info queue anyway.\n");
345 "Failed to handle nonurgent mc_info queue.\n");
360 static int bind_virq_for_mce(
void)
371 ret = HYPERVISOR_mca(&mc_op);
378 ncpus = mc_op.u.mc_physcpuinfo.ncpus;
384 ret = HYPERVISOR_mca(&mc_op);
392 xen_mce_interrupt, 0,
"mce",
NULL);
402 static int __init xen_late_init_mcelog(
void)
409 return bind_virq_for_mce();