Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
chip.c
Go to the documentation of this file.
1 /*
2  * linux/kernel/irq/chip.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6  *
7  * This file contains the core interrupt handling code, for irq-chip
8  * based architectures.
9  *
10  * Detailed information is available in Documentation/DocBook/genericirq
11  */
12 
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18 
19 #include <trace/events/irq.h>
20 
21 #include "internals.h"
22 
28 int irq_set_chip(unsigned int irq, struct irq_chip *chip)
29 {
30  unsigned long flags;
31  struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
32 
33  if (!desc)
34  return -EINVAL;
35 
36  if (!chip)
37  chip = &no_irq_chip;
38 
39  desc->irq_data.chip = chip;
40  irq_put_desc_unlock(desc, flags);
41  /*
42  * For !CONFIG_SPARSE_IRQ make the irq show up in
43  * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
44  * already marked, and this call is harmless.
45  */
46  irq_reserve_irq(irq);
47  return 0;
48 }
50 
56 int irq_set_irq_type(unsigned int irq, unsigned int type)
57 {
58  unsigned long flags;
59  struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
60  int ret = 0;
61 
62  if (!desc)
63  return -EINVAL;
64 
65  type &= IRQ_TYPE_SENSE_MASK;
66  ret = __irq_set_trigger(desc, irq, type);
67  irq_put_desc_busunlock(desc, flags);
68  return ret;
69 }
71 
79 int irq_set_handler_data(unsigned int irq, void *data)
80 {
81  unsigned long flags;
82  struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
83 
84  if (!desc)
85  return -EINVAL;
86  desc->irq_data.handler_data = data;
87  irq_put_desc_unlock(desc, flags);
88  return 0;
89 }
91 
99 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
100 {
101  unsigned long flags;
102  struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
103 
104  if (!desc)
105  return -EINVAL;
106  desc->irq_data.msi_desc = entry;
107  if (entry)
108  entry->irq = irq;
109  irq_put_desc_unlock(desc, flags);
110  return 0;
111 }
112 
120 int irq_set_chip_data(unsigned int irq, void *data)
121 {
122  unsigned long flags;
123  struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
124 
125  if (!desc)
126  return -EINVAL;
127  desc->irq_data.chip_data = data;
128  irq_put_desc_unlock(desc, flags);
129  return 0;
130 }
132 
133 struct irq_data *irq_get_irq_data(unsigned int irq)
134 {
135  struct irq_desc *desc = irq_to_desc(irq);
136 
137  return desc ? &desc->irq_data : NULL;
138 }
140 
141 static void irq_state_clr_disabled(struct irq_desc *desc)
142 {
143  irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
144 }
145 
146 static void irq_state_set_disabled(struct irq_desc *desc)
147 {
148  irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
149 }
150 
151 static void irq_state_clr_masked(struct irq_desc *desc)
152 {
153  irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
154 }
155 
156 static void irq_state_set_masked(struct irq_desc *desc)
157 {
158  irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
159 }
160 
161 int irq_startup(struct irq_desc *desc, bool resend)
162 {
163  int ret = 0;
164 
165  irq_state_clr_disabled(desc);
166  desc->depth = 0;
167 
168  if (desc->irq_data.chip->irq_startup) {
169  ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
170  irq_state_clr_masked(desc);
171  } else {
172  irq_enable(desc);
173  }
174  if (resend)
175  check_irq_resend(desc, desc->irq_data.irq);
176  return ret;
177 }
178 
180 {
181  irq_state_set_disabled(desc);
182  desc->depth = 1;
183  if (desc->irq_data.chip->irq_shutdown)
184  desc->irq_data.chip->irq_shutdown(&desc->irq_data);
185  else if (desc->irq_data.chip->irq_disable)
186  desc->irq_data.chip->irq_disable(&desc->irq_data);
187  else
188  desc->irq_data.chip->irq_mask(&desc->irq_data);
189  irq_state_set_masked(desc);
190 }
191 
192 void irq_enable(struct irq_desc *desc)
193 {
194  irq_state_clr_disabled(desc);
195  if (desc->irq_data.chip->irq_enable)
196  desc->irq_data.chip->irq_enable(&desc->irq_data);
197  else
198  desc->irq_data.chip->irq_unmask(&desc->irq_data);
199  irq_state_clr_masked(desc);
200 }
201 
202 void irq_disable(struct irq_desc *desc)
203 {
204  irq_state_set_disabled(desc);
205  if (desc->irq_data.chip->irq_disable) {
206  desc->irq_data.chip->irq_disable(&desc->irq_data);
207  irq_state_set_masked(desc);
208  }
209 }
210 
211 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
212 {
213  if (desc->irq_data.chip->irq_enable)
214  desc->irq_data.chip->irq_enable(&desc->irq_data);
215  else
216  desc->irq_data.chip->irq_unmask(&desc->irq_data);
217  cpumask_set_cpu(cpu, desc->percpu_enabled);
218 }
219 
220 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
221 {
222  if (desc->irq_data.chip->irq_disable)
223  desc->irq_data.chip->irq_disable(&desc->irq_data);
224  else
225  desc->irq_data.chip->irq_mask(&desc->irq_data);
226  cpumask_clear_cpu(cpu, desc->percpu_enabled);
227 }
228 
229 static inline void mask_ack_irq(struct irq_desc *desc)
230 {
231  if (desc->irq_data.chip->irq_mask_ack)
232  desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
233  else {
234  desc->irq_data.chip->irq_mask(&desc->irq_data);
235  if (desc->irq_data.chip->irq_ack)
236  desc->irq_data.chip->irq_ack(&desc->irq_data);
237  }
238  irq_state_set_masked(desc);
239 }
240 
241 void mask_irq(struct irq_desc *desc)
242 {
243  if (desc->irq_data.chip->irq_mask) {
244  desc->irq_data.chip->irq_mask(&desc->irq_data);
245  irq_state_set_masked(desc);
246  }
247 }
248 
249 void unmask_irq(struct irq_desc *desc)
250 {
251  if (desc->irq_data.chip->irq_unmask) {
252  desc->irq_data.chip->irq_unmask(&desc->irq_data);
253  irq_state_clr_masked(desc);
254  }
255 }
256 
257 /*
258  * handle_nested_irq - Handle a nested irq from a irq thread
259  * @irq: the interrupt number
260  *
261  * Handle interrupts which are nested into a threaded interrupt
262  * handler. The handler function is called inside the calling
263  * threads context.
264  */
265 void handle_nested_irq(unsigned int irq)
266 {
267  struct irq_desc *desc = irq_to_desc(irq);
268  struct irqaction *action;
269  irqreturn_t action_ret;
270 
271  might_sleep();
272 
273  raw_spin_lock_irq(&desc->lock);
274 
275  kstat_incr_irqs_this_cpu(irq, desc);
276 
277  action = desc->action;
278  if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
279  desc->istate |= IRQS_PENDING;
280  goto out_unlock;
281  }
282 
283  irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
284  raw_spin_unlock_irq(&desc->lock);
285 
286  action_ret = action->thread_fn(action->irq, action->dev_id);
287  if (!noirqdebug)
288  note_interrupt(irq, desc, action_ret);
289 
290  raw_spin_lock_irq(&desc->lock);
291  irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
292 
293 out_unlock:
294  raw_spin_unlock_irq(&desc->lock);
295 }
297 
298 static bool irq_check_poll(struct irq_desc *desc)
299 {
300  if (!(desc->istate & IRQS_POLL_INPROGRESS))
301  return false;
302  return irq_wait_for_poll(desc);
303 }
304 
317 void
318 handle_simple_irq(unsigned int irq, struct irq_desc *desc)
319 {
320  raw_spin_lock(&desc->lock);
321 
322  if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
323  if (!irq_check_poll(desc))
324  goto out_unlock;
325 
326  desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
327  kstat_incr_irqs_this_cpu(irq, desc);
328 
329  if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
330  desc->istate |= IRQS_PENDING;
331  goto out_unlock;
332  }
333 
334  handle_irq_event(desc);
335 
336 out_unlock:
337  raw_spin_unlock(&desc->lock);
338 }
340 
341 /*
342  * Called unconditionally from handle_level_irq() and only for oneshot
343  * interrupts from handle_fasteoi_irq()
344  */
345 static void cond_unmask_irq(struct irq_desc *desc)
346 {
347  /*
348  * We need to unmask in the following cases:
349  * - Standard level irq (IRQF_ONESHOT is not set)
350  * - Oneshot irq which did not wake the thread (caused by a
351  * spurious interrupt or a primary handler handling it
352  * completely).
353  */
354  if (!irqd_irq_disabled(&desc->irq_data) &&
355  irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
356  unmask_irq(desc);
357 }
358 
369 void
370 handle_level_irq(unsigned int irq, struct irq_desc *desc)
371 {
372  raw_spin_lock(&desc->lock);
373  mask_ack_irq(desc);
374 
375  if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
376  if (!irq_check_poll(desc))
377  goto out_unlock;
378 
379  desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
380  kstat_incr_irqs_this_cpu(irq, desc);
381 
382  /*
383  * If its disabled or no action available
384  * keep it masked and get out of here
385  */
386  if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
387  desc->istate |= IRQS_PENDING;
388  goto out_unlock;
389  }
390 
391  handle_irq_event(desc);
392 
393  cond_unmask_irq(desc);
394 
395 out_unlock:
396  raw_spin_unlock(&desc->lock);
397 }
399 
400 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
401 static inline void preflow_handler(struct irq_desc *desc)
402 {
403  if (desc->preflow_handler)
404  desc->preflow_handler(&desc->irq_data);
405 }
406 #else
407 static inline void preflow_handler(struct irq_desc *desc) { }
408 #endif
409 
420 void
421 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
422 {
423  raw_spin_lock(&desc->lock);
424 
425  if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
426  if (!irq_check_poll(desc))
427  goto out;
428 
429  desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
430  kstat_incr_irqs_this_cpu(irq, desc);
431 
432  /*
433  * If its disabled or no action available
434  * then mask it and get out of here:
435  */
436  if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
437  desc->istate |= IRQS_PENDING;
438  mask_irq(desc);
439  goto out;
440  }
441 
442  if (desc->istate & IRQS_ONESHOT)
443  mask_irq(desc);
444 
445  preflow_handler(desc);
446  handle_irq_event(desc);
447 
448  if (desc->istate & IRQS_ONESHOT)
449  cond_unmask_irq(desc);
450 
451 out_eoi:
452  desc->irq_data.chip->irq_eoi(&desc->irq_data);
453 out_unlock:
454  raw_spin_unlock(&desc->lock);
455  return;
456 out:
457  if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
458  goto out_eoi;
459  goto out_unlock;
460 }
461 
478 void
479 handle_edge_irq(unsigned int irq, struct irq_desc *desc)
480 {
481  raw_spin_lock(&desc->lock);
482 
483  desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
484  /*
485  * If we're currently running this IRQ, or its disabled,
486  * we shouldn't process the IRQ. Mark it pending, handle
487  * the necessary masking and go out
488  */
489  if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
490  irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
491  if (!irq_check_poll(desc)) {
492  desc->istate |= IRQS_PENDING;
493  mask_ack_irq(desc);
494  goto out_unlock;
495  }
496  }
497  kstat_incr_irqs_this_cpu(irq, desc);
498 
499  /* Start handling the irq */
500  desc->irq_data.chip->irq_ack(&desc->irq_data);
501 
502  do {
503  if (unlikely(!desc->action)) {
504  mask_irq(desc);
505  goto out_unlock;
506  }
507 
508  /*
509  * When another irq arrived while we were handling
510  * one, we could have masked the irq.
511  * Renable it, if it was not disabled in meantime.
512  */
513  if (unlikely(desc->istate & IRQS_PENDING)) {
514  if (!irqd_irq_disabled(&desc->irq_data) &&
515  irqd_irq_masked(&desc->irq_data))
516  unmask_irq(desc);
517  }
518 
519  handle_irq_event(desc);
520 
521  } while ((desc->istate & IRQS_PENDING) &&
522  !irqd_irq_disabled(&desc->irq_data));
523 
524 out_unlock:
525  raw_spin_unlock(&desc->lock);
526 }
528 
529 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
530 
538 void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
539 {
540  struct irq_chip *chip = irq_desc_get_chip(desc);
541 
542  raw_spin_lock(&desc->lock);
543 
544  desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
545  /*
546  * If we're currently running this IRQ, or its disabled,
547  * we shouldn't process the IRQ. Mark it pending, handle
548  * the necessary masking and go out
549  */
550  if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
551  irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
552  if (!irq_check_poll(desc)) {
553  desc->istate |= IRQS_PENDING;
554  goto out_eoi;
555  }
556  }
557  kstat_incr_irqs_this_cpu(irq, desc);
558 
559  do {
560  if (unlikely(!desc->action))
561  goto out_eoi;
562 
563  handle_irq_event(desc);
564 
565  } while ((desc->istate & IRQS_PENDING) &&
566  !irqd_irq_disabled(&desc->irq_data));
567 
568 out_eoi:
569  chip->irq_eoi(&desc->irq_data);
570  raw_spin_unlock(&desc->lock);
571 }
572 #endif
573 
581 void
582 handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
583 {
584  struct irq_chip *chip = irq_desc_get_chip(desc);
585 
586  kstat_incr_irqs_this_cpu(irq, desc);
587 
588  if (chip->irq_ack)
589  chip->irq_ack(&desc->irq_data);
590 
591  handle_irq_event_percpu(desc, desc->action);
592 
593  if (chip->irq_eoi)
594  chip->irq_eoi(&desc->irq_data);
595 }
596 
609 void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
610 {
611  struct irq_chip *chip = irq_desc_get_chip(desc);
612  struct irqaction *action = desc->action;
613  void *dev_id = __this_cpu_ptr(action->percpu_dev_id);
615 
616  kstat_incr_irqs_this_cpu(irq, desc);
617 
618  if (chip->irq_ack)
619  chip->irq_ack(&desc->irq_data);
620 
621  trace_irq_handler_entry(irq, action);
622  res = action->handler(irq, dev_id);
623  trace_irq_handler_exit(irq, action, res);
624 
625  if (chip->irq_eoi)
626  chip->irq_eoi(&desc->irq_data);
627 }
628 
629 void
630 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
631  const char *name)
632 {
633  unsigned long flags;
634  struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
635 
636  if (!desc)
637  return;
638 
639  if (!handle) {
640  handle = handle_bad_irq;
641  } else {
642  if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
643  goto out;
644  }
645 
646  /* Uninstall? */
647  if (handle == handle_bad_irq) {
648  if (desc->irq_data.chip != &no_irq_chip)
649  mask_ack_irq(desc);
650  irq_state_set_disabled(desc);
651  desc->depth = 1;
652  }
653  desc->handle_irq = handle;
654  desc->name = name;
655 
656  if (handle != handle_bad_irq && is_chained) {
657  irq_settings_set_noprobe(desc);
658  irq_settings_set_norequest(desc);
659  irq_settings_set_nothread(desc);
660  irq_startup(desc, true);
661  }
662 out:
663  irq_put_desc_busunlock(desc, flags);
664 }
666 
667 void
668 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
669  irq_flow_handler_t handle, const char *name)
670 {
671  irq_set_chip(irq, chip);
672  __irq_set_handler(irq, handle, 0, name);
673 }
675 
676 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
677 {
678  unsigned long flags;
679  struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
680 
681  if (!desc)
682  return;
683  irq_settings_clr_and_set(desc, clr, set);
684 
685  irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
687  if (irq_settings_has_no_balance_set(desc))
688  irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
689  if (irq_settings_is_per_cpu(desc))
690  irqd_set(&desc->irq_data, IRQD_PER_CPU);
691  if (irq_settings_can_move_pcntxt(desc))
692  irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
693  if (irq_settings_is_level(desc))
694  irqd_set(&desc->irq_data, IRQD_LEVEL);
695 
696  irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
697 
698  irq_put_desc_unlock(desc, flags);
699 }
701 
708 void irq_cpu_online(void)
709 {
710  struct irq_desc *desc;
711  struct irq_chip *chip;
712  unsigned long flags;
713  unsigned int irq;
714 
715  for_each_active_irq(irq) {
716  desc = irq_to_desc(irq);
717  if (!desc)
718  continue;
719 
720  raw_spin_lock_irqsave(&desc->lock, flags);
721 
722  chip = irq_data_get_irq_chip(&desc->irq_data);
723  if (chip && chip->irq_cpu_online &&
724  (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
725  !irqd_irq_disabled(&desc->irq_data)))
726  chip->irq_cpu_online(&desc->irq_data);
727 
728  raw_spin_unlock_irqrestore(&desc->lock, flags);
729  }
730 }
731 
738 void irq_cpu_offline(void)
739 {
740  struct irq_desc *desc;
741  struct irq_chip *chip;
742  unsigned long flags;
743  unsigned int irq;
744 
745  for_each_active_irq(irq) {
746  desc = irq_to_desc(irq);
747  if (!desc)
748  continue;
749 
750  raw_spin_lock_irqsave(&desc->lock, flags);
751 
752  chip = irq_data_get_irq_chip(&desc->irq_data);
753  if (chip && chip->irq_cpu_offline &&
754  (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
755  !irqd_irq_disabled(&desc->irq_data)))
756  chip->irq_cpu_offline(&desc->irq_data);
757 
758  raw_spin_unlock_irqrestore(&desc->lock, flags);
759  }
760 }