Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
irq_32.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
3  *
4  * This file contains the lowest level x86-specific interrupt
5  * entry, irq-stacks and irq statistics code. All the remaining
6  * irq logic is done by the generic kernel/irq/ code and
7  * by the x86-specific irq controller code. (e.g. i8259.c and
8  * io_apic.c.)
9  */
10 
11 #include <linux/module.h>
12 #include <linux/seq_file.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/notifier.h>
16 #include <linux/cpu.h>
17 #include <linux/delay.h>
18 #include <linux/uaccess.h>
19 #include <linux/percpu.h>
20 #include <linux/mm.h>
21 
22 #include <asm/apic.h>
23 
24 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
25 EXPORT_PER_CPU_SYMBOL(irq_stat);
26 
27 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
28 EXPORT_PER_CPU_SYMBOL(irq_regs);
29 
30 #ifdef CONFIG_DEBUG_STACKOVERFLOW
31 
33 
34 /* Debugging check for stack overflow: is there less than 1KB free? */
35 static int check_stack_overflow(void)
36 {
37  long sp;
38 
39  __asm__ __volatile__("andl %%esp,%0" :
40  "=r" (sp) : "0" (THREAD_SIZE - 1));
41 
42  return sp < (sizeof(struct thread_info) + STACK_WARN);
43 }
44 
45 static void print_stack_overflow(void)
46 {
47  printk(KERN_WARNING "low stack detected by irq handler\n");
48  dump_stack();
50  panic("low stack detected by irq handler - check messages\n");
51 }
52 
53 #else
54 static inline int check_stack_overflow(void) { return 0; }
55 static inline void print_stack_overflow(void) { }
56 #endif
57 
58 /*
59  * per-CPU IRQ handling contexts (thread information and stack)
60  */
61 union irq_ctx {
65 
66 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
67 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
68 
69 static void call_on_stack(void *func, void *stack)
70 {
71  asm volatile("xchgl %%ebx,%%esp \n"
72  "call *%%edi \n"
73  "movl %%ebx,%%esp \n"
74  : "=b" (stack)
75  : "0" (stack),
76  "D"(func)
77  : "memory", "cc", "edx", "ecx", "eax");
78 }
79 
80 static inline int
81 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
82 {
83  union irq_ctx *curctx, *irqctx;
84  u32 *isp, arg1, arg2;
85 
86  curctx = (union irq_ctx *) current_thread_info();
87  irqctx = __this_cpu_read(hardirq_ctx);
88 
89  /*
90  * this is where we switch to the IRQ stack. However, if we are
91  * already using the IRQ stack (because we interrupted a hardirq
92  * handler) we can't do that and just have to keep using the
93  * current stack (which is the irq stack already after all)
94  */
95  if (unlikely(curctx == irqctx))
96  return 0;
97 
98  /* build the stack frame on the IRQ stack */
99  isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
100  irqctx->tinfo.task = curctx->tinfo.task;
101  irqctx->tinfo.previous_esp = current_stack_pointer;
102 
103  /* Copy the preempt_count so that the [soft]irq checks work. */
104  irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
105 
106  if (unlikely(overflow))
107  call_on_stack(print_stack_overflow, isp);
108 
109  asm volatile("xchgl %%ebx,%%esp \n"
110  "call *%%edi \n"
111  "movl %%ebx,%%esp \n"
112  : "=a" (arg1), "=d" (arg2), "=b" (isp)
113  : "0" (irq), "1" (desc), "2" (isp),
114  "D" (desc->handle_irq)
115  : "memory", "cc", "ecx");
116  return 1;
117 }
118 
119 /*
120  * allocate per-cpu stacks for hardirq and for softirq processing
121  */
123 {
124  union irq_ctx *irqctx;
125 
126  if (per_cpu(hardirq_ctx, cpu))
127  return;
128 
129  irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
130  THREADINFO_GFP,
132  memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
133  irqctx->tinfo.cpu = cpu;
134  irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
135  irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
136 
137  per_cpu(hardirq_ctx, cpu) = irqctx;
138 
139  irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
140  THREADINFO_GFP,
142  memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
143  irqctx->tinfo.cpu = cpu;
144  irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
145 
146  per_cpu(softirq_ctx, cpu) = irqctx;
147 
148  printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
149  cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
150 }
151 
153 {
154  unsigned long flags;
155  struct thread_info *curctx;
156  union irq_ctx *irqctx;
157  u32 *isp;
158 
159  if (in_interrupt())
160  return;
161 
162  local_irq_save(flags);
163 
164  if (local_softirq_pending()) {
165  curctx = current_thread_info();
166  irqctx = __this_cpu_read(softirq_ctx);
167  irqctx->tinfo.task = curctx->task;
168  irqctx->tinfo.previous_esp = current_stack_pointer;
169 
170  /* build the stack frame on the softirq stack */
171  isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
172 
173  call_on_stack(__do_softirq, isp);
174  /*
175  * Shouldn't happen, we returned above if in_interrupt():
176  */
178  }
179 
180  local_irq_restore(flags);
181 }
182 
183 bool handle_irq(unsigned irq, struct pt_regs *regs)
184 {
185  struct irq_desc *desc;
186  int overflow;
187 
188  overflow = check_stack_overflow();
189 
190  desc = irq_to_desc(irq);
191  if (unlikely(!desc))
192  return false;
193 
194  if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
195  if (unlikely(overflow))
196  print_stack_overflow();
197  desc->handle_irq(irq, desc);
198  }
199 
200  return true;
201 }