Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
manage.c
Go to the documentation of this file.
1 /*
2  * linux/kernel/irq/manage.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006 Thomas Gleixner
6  *
7  * This file contains driver APIs to the irq subsystem.
8  */
9 
10 #define pr_fmt(fmt) "genirq: " fmt
11 
12 #include <linux/irq.h>
13 #include <linux/kthread.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/task_work.h>
20 
21 #include "internals.h"
22 
23 #ifdef CONFIG_IRQ_FORCED_THREADING
25 
26 static int __init setup_forced_irqthreads(char *arg)
27 {
28  force_irqthreads = true;
29  return 0;
30 }
31 early_param("threadirqs", setup_forced_irqthreads);
32 #endif
33 
44 void synchronize_irq(unsigned int irq)
45 {
46  struct irq_desc *desc = irq_to_desc(irq);
47  bool inprogress;
48 
49  if (!desc)
50  return;
51 
52  do {
53  unsigned long flags;
54 
55  /*
56  * Wait until we're out of the critical section. This might
57  * give the wrong answer due to the lack of memory barriers.
58  */
59  while (irqd_irq_inprogress(&desc->irq_data))
60  cpu_relax();
61 
62  /* Ok, that indicated we're done: double-check carefully. */
63  raw_spin_lock_irqsave(&desc->lock, flags);
64  inprogress = irqd_irq_inprogress(&desc->irq_data);
65  raw_spin_unlock_irqrestore(&desc->lock, flags);
66 
67  /* Oops, that failed? */
68  } while (inprogress);
69 
70  /*
71  * We made sure that no hardirq handler is running. Now verify
72  * that no threaded handlers are active.
73  */
75 }
77 
78 #ifdef CONFIG_SMP
79 cpumask_var_t irq_default_affinity;
80 
86 int irq_can_set_affinity(unsigned int irq)
87 {
88  struct irq_desc *desc = irq_to_desc(irq);
89 
90  if (!desc || !irqd_can_balance(&desc->irq_data) ||
91  !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
92  return 0;
93 
94  return 1;
95 }
96 
106 void irq_set_thread_affinity(struct irq_desc *desc)
107 {
108  struct irqaction *action = desc->action;
109 
110  while (action) {
111  if (action->thread)
113  action = action->next;
114  }
115 }
116 
117 #ifdef CONFIG_GENERIC_PENDING_IRQ
118 static inline bool irq_can_move_pcntxt(struct irq_data *data)
119 {
120  return irqd_can_move_in_process_context(data);
121 }
122 static inline bool irq_move_pending(struct irq_data *data)
123 {
124  return irqd_is_setaffinity_pending(data);
125 }
126 static inline void
127 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
128 {
129  cpumask_copy(desc->pending_mask, mask);
130 }
131 static inline void
132 irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
133 {
134  cpumask_copy(mask, desc->pending_mask);
135 }
136 #else
137 static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
138 static inline bool irq_move_pending(struct irq_data *data) { return false; }
139 static inline void
140 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
141 static inline void
142 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
143 #endif
144 
145 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
146  bool force)
147 {
148  struct irq_desc *desc = irq_data_to_desc(data);
149  struct irq_chip *chip = irq_data_get_irq_chip(data);
150  int ret;
151 
152  ret = chip->irq_set_affinity(data, mask, false);
153  switch (ret) {
154  case IRQ_SET_MASK_OK:
155  cpumask_copy(data->affinity, mask);
158  ret = 0;
159  }
160 
161  return ret;
162 }
163 
164 int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
165 {
166  struct irq_chip *chip = irq_data_get_irq_chip(data);
167  struct irq_desc *desc = irq_data_to_desc(data);
168  int ret = 0;
169 
170  if (!chip || !chip->irq_set_affinity)
171  return -EINVAL;
172 
173  if (irq_can_move_pcntxt(data)) {
174  ret = irq_do_set_affinity(data, mask, false);
175  } else {
176  irqd_set_move_pending(data);
177  irq_copy_pending(desc, mask);
178  }
179 
180  if (desc->affinity_notify) {
181  kref_get(&desc->affinity_notify->kref);
182  schedule_work(&desc->affinity_notify->work);
183  }
184  irqd_set(data, IRQD_AFFINITY_SET);
185 
186  return ret;
187 }
188 
195 int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
196 {
197  struct irq_desc *desc = irq_to_desc(irq);
198  unsigned long flags;
199  int ret;
200 
201  if (!desc)
202  return -EINVAL;
203 
204  raw_spin_lock_irqsave(&desc->lock, flags);
205  ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
206  raw_spin_unlock_irqrestore(&desc->lock, flags);
207  return ret;
208 }
209 
210 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
211 {
212  unsigned long flags;
213  struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
214 
215  if (!desc)
216  return -EINVAL;
217  desc->affinity_hint = m;
218  irq_put_desc_unlock(desc, flags);
219  return 0;
220 }
221 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
222 
223 static void irq_affinity_notify(struct work_struct *work)
224 {
225  struct irq_affinity_notify *notify =
226  container_of(work, struct irq_affinity_notify, work);
227  struct irq_desc *desc = irq_to_desc(notify->irq);
229  unsigned long flags;
230 
231  if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
232  goto out;
233 
234  raw_spin_lock_irqsave(&desc->lock, flags);
235  if (irq_move_pending(&desc->irq_data))
236  irq_get_pending(cpumask, desc);
237  else
238  cpumask_copy(cpumask, desc->irq_data.affinity);
239  raw_spin_unlock_irqrestore(&desc->lock, flags);
240 
241  notify->notify(notify, cpumask);
242 
243  free_cpumask_var(cpumask);
244 out:
245  kref_put(&notify->kref, notify->release);
246 }
247 
259 int
260 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
261 {
262  struct irq_desc *desc = irq_to_desc(irq);
263  struct irq_affinity_notify *old_notify;
264  unsigned long flags;
265 
266  /* The release function is promised process context */
267  might_sleep();
268 
269  if (!desc)
270  return -EINVAL;
271 
272  /* Complete initialisation of *notify */
273  if (notify) {
274  notify->irq = irq;
275  kref_init(&notify->kref);
276  INIT_WORK(&notify->work, irq_affinity_notify);
277  }
278 
279  raw_spin_lock_irqsave(&desc->lock, flags);
280  old_notify = desc->affinity_notify;
281  desc->affinity_notify = notify;
282  raw_spin_unlock_irqrestore(&desc->lock, flags);
283 
284  if (old_notify)
285  kref_put(&old_notify->kref, old_notify->release);
286 
287  return 0;
288 }
289 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
290 
291 #ifndef CONFIG_AUTO_IRQ_AFFINITY
292 /*
293  * Generic version of the affinity autoselector.
294  */
295 static int
296 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
297 {
298  struct cpumask *set = irq_default_affinity;
299  int node = desc->irq_data.node;
300 
301  /* Excludes PER_CPU and NO_BALANCE interrupts */
302  if (!irq_can_set_affinity(irq))
303  return 0;
304 
305  /*
306  * Preserve an userspace affinity setup, but make sure that
307  * one of the targets is online.
308  */
309  if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
310  if (cpumask_intersects(desc->irq_data.affinity,
311  cpu_online_mask))
312  set = desc->irq_data.affinity;
313  else
314  irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
315  }
316 
317  cpumask_and(mask, cpu_online_mask, set);
318  if (node != NUMA_NO_NODE) {
319  const struct cpumask *nodemask = cpumask_of_node(node);
320 
321  /* make sure at least one of the cpus in nodemask is online */
322  if (cpumask_intersects(mask, nodemask))
323  cpumask_and(mask, mask, nodemask);
324  }
325  irq_do_set_affinity(&desc->irq_data, mask, false);
326  return 0;
327 }
328 #else
329 static inline int
330 setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
331 {
332  return irq_select_affinity(irq);
333 }
334 #endif
335 
336 /*
337  * Called when affinity is set via /proc/irq
338  */
339 int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
340 {
341  struct irq_desc *desc = irq_to_desc(irq);
342  unsigned long flags;
343  int ret;
344 
345  raw_spin_lock_irqsave(&desc->lock, flags);
346  ret = setup_affinity(irq, desc, mask);
347  raw_spin_unlock_irqrestore(&desc->lock, flags);
348  return ret;
349 }
350 
351 #else
352 static inline int
353 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
354 {
355  return 0;
356 }
357 #endif
358 
359 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
360 {
361  if (suspend) {
362  if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
363  return;
364  desc->istate |= IRQS_SUSPENDED;
365  }
366 
367  if (!desc->depth++)
368  irq_disable(desc);
369 }
370 
371 static int __disable_irq_nosync(unsigned int irq)
372 {
373  unsigned long flags;
374  struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
375 
376  if (!desc)
377  return -EINVAL;
378  __disable_irq(desc, irq, false);
379  irq_put_desc_busunlock(desc, flags);
380  return 0;
381 }
382 
394 void disable_irq_nosync(unsigned int irq)
395 {
396  __disable_irq_nosync(irq);
397 }
399 
412 void disable_irq(unsigned int irq)
413 {
414  if (!__disable_irq_nosync(irq))
415  synchronize_irq(irq);
416 }
418 
419 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
420 {
421  if (resume) {
422  if (!(desc->istate & IRQS_SUSPENDED)) {
423  if (!desc->action)
424  return;
425  if (!(desc->action->flags & IRQF_FORCE_RESUME))
426  return;
427  /* Pretend that it got disabled ! */
428  desc->depth++;
429  }
430  desc->istate &= ~IRQS_SUSPENDED;
431  }
432 
433  switch (desc->depth) {
434  case 0:
435  err_out:
436  WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
437  break;
438  case 1: {
439  if (desc->istate & IRQS_SUSPENDED)
440  goto err_out;
441  /* Prevent probing on this irq: */
442  irq_settings_set_noprobe(desc);
443  irq_enable(desc);
444  check_irq_resend(desc, irq);
445  /* fall-through */
446  }
447  default:
448  desc->depth--;
449  }
450 }
451 
463 void enable_irq(unsigned int irq)
464 {
465  unsigned long flags;
466  struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
467 
468  if (!desc)
469  return;
470  if (WARN(!desc->irq_data.chip,
471  KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
472  goto out;
473 
474  __enable_irq(desc, irq, false);
475 out:
476  irq_put_desc_busunlock(desc, flags);
477 }
479 
480 static int set_irq_wake_real(unsigned int irq, unsigned int on)
481 {
482  struct irq_desc *desc = irq_to_desc(irq);
483  int ret = -ENXIO;
484 
485  if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
486  return 0;
487 
488  if (desc->irq_data.chip->irq_set_wake)
489  ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
490 
491  return ret;
492 }
493 
506 int irq_set_irq_wake(unsigned int irq, unsigned int on)
507 {
508  unsigned long flags;
509  struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
510  int ret = 0;
511 
512  if (!desc)
513  return -EINVAL;
514 
515  /* wakeup-capable irqs can be shared between drivers that
516  * don't need to have the same sleep mode behaviors.
517  */
518  if (on) {
519  if (desc->wake_depth++ == 0) {
520  ret = set_irq_wake_real(irq, on);
521  if (ret)
522  desc->wake_depth = 0;
523  else
524  irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
525  }
526  } else {
527  if (desc->wake_depth == 0) {
528  WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
529  } else if (--desc->wake_depth == 0) {
530  ret = set_irq_wake_real(irq, on);
531  if (ret)
532  desc->wake_depth = 1;
533  else
534  irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
535  }
536  }
537  irq_put_desc_busunlock(desc, flags);
538  return ret;
539 }
541 
542 /*
543  * Internal function that tells the architecture code whether a
544  * particular irq has been exclusively allocated or is available
545  * for driver use.
546  */
547 int can_request_irq(unsigned int irq, unsigned long irqflags)
548 {
549  unsigned long flags;
550  struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
551  int canrequest = 0;
552 
553  if (!desc)
554  return 0;
555 
556  if (irq_settings_can_request(desc)) {
557  if (desc->action)
558  if (irqflags & desc->action->flags & IRQF_SHARED)
559  canrequest =1;
560  }
561  irq_put_desc_unlock(desc, flags);
562  return canrequest;
563 }
564 
565 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
566  unsigned long flags)
567 {
568  struct irq_chip *chip = desc->irq_data.chip;
569  int ret, unmask = 0;
570 
571  if (!chip || !chip->irq_set_type) {
572  /*
573  * IRQF_TRIGGER_* but the PIC does not support multiple
574  * flow-types?
575  */
576  pr_debug("No set_type function for IRQ %d (%s)\n", irq,
577  chip ? (chip->name ? : "unknown") : "unknown");
578  return 0;
579  }
580 
581  flags &= IRQ_TYPE_SENSE_MASK;
582 
583  if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
584  if (!irqd_irq_masked(&desc->irq_data))
585  mask_irq(desc);
586  if (!irqd_irq_disabled(&desc->irq_data))
587  unmask = 1;
588  }
589 
590  /* caller masked out all except trigger mode flags */
591  ret = chip->irq_set_type(&desc->irq_data, flags);
592 
593  switch (ret) {
594  case IRQ_SET_MASK_OK:
595  irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
596  irqd_set(&desc->irq_data, flags);
597 
599  flags = irqd_get_trigger_type(&desc->irq_data);
600  irq_settings_set_trigger_mask(desc, flags);
601  irqd_clear(&desc->irq_data, IRQD_LEVEL);
602  irq_settings_clr_level(desc);
603  if (flags & IRQ_TYPE_LEVEL_MASK) {
604  irq_settings_set_level(desc);
605  irqd_set(&desc->irq_data, IRQD_LEVEL);
606  }
607 
608  ret = 0;
609  break;
610  default:
611  pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
612  flags, irq, chip->irq_set_type);
613  }
614  if (unmask)
615  unmask_irq(desc);
616  return ret;
617 }
618 
619 /*
620  * Default primary interrupt handler for threaded interrupts. Is
621  * assigned as primary handler when request_threaded_irq is called
622  * with handler == NULL. Useful for oneshot interrupts.
623  */
624 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
625 {
626  return IRQ_WAKE_THREAD;
627 }
628 
629 /*
630  * Primary handler for nested threaded interrupts. Should never be
631  * called.
632  */
633 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
634 {
635  WARN(1, "Primary handler called for nested irq %d\n", irq);
636  return IRQ_NONE;
637 }
638 
639 static int irq_wait_for_interrupt(struct irqaction *action)
640 {
642 
643  while (!kthread_should_stop()) {
644 
646  &action->thread_flags)) {
648  return 0;
649  }
650  schedule();
652  }
654  return -1;
655 }
656 
657 /*
658  * Oneshot interrupts keep the irq line masked until the threaded
659  * handler finished. unmask if the interrupt has not been disabled and
660  * is marked MASKED.
661  */
662 static void irq_finalize_oneshot(struct irq_desc *desc,
663  struct irqaction *action)
664 {
665  if (!(desc->istate & IRQS_ONESHOT))
666  return;
667 again:
668  chip_bus_lock(desc);
669  raw_spin_lock_irq(&desc->lock);
670 
671  /*
672  * Implausible though it may be we need to protect us against
673  * the following scenario:
674  *
675  * The thread is faster done than the hard interrupt handler
676  * on the other CPU. If we unmask the irq line then the
677  * interrupt can come in again and masks the line, leaves due
678  * to IRQS_INPROGRESS and the irq line is masked forever.
679  *
680  * This also serializes the state of shared oneshot handlers
681  * versus "desc->threads_onehsot |= action->thread_mask;" in
682  * irq_wake_thread(). See the comment there which explains the
683  * serialization.
684  */
685  if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
686  raw_spin_unlock_irq(&desc->lock);
687  chip_bus_sync_unlock(desc);
688  cpu_relax();
689  goto again;
690  }
691 
692  /*
693  * Now check again, whether the thread should run. Otherwise
694  * we would clear the threads_oneshot bit of this thread which
695  * was just set.
696  */
697  if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
698  goto out_unlock;
699 
700  desc->threads_oneshot &= ~action->thread_mask;
701 
702  if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
703  irqd_irq_masked(&desc->irq_data))
704  unmask_irq(desc);
705 
706 out_unlock:
707  raw_spin_unlock_irq(&desc->lock);
708  chip_bus_sync_unlock(desc);
709 }
710 
711 #ifdef CONFIG_SMP
712 /*
713  * Check whether we need to chasnge the affinity of the interrupt thread.
714  */
715 static void
716 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
717 {
719 
721  return;
722 
723  /*
724  * In case we are out of memory we set IRQTF_AFFINITY again and
725  * try again next time
726  */
727  if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
729  return;
730  }
731 
732  raw_spin_lock_irq(&desc->lock);
733  cpumask_copy(mask, desc->irq_data.affinity);
734  raw_spin_unlock_irq(&desc->lock);
735 
736  set_cpus_allowed_ptr(current, mask);
737  free_cpumask_var(mask);
738 }
739 #else
740 static inline void
741 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
742 #endif
743 
744 /*
745  * Interrupts which are not explicitely requested as threaded
746  * interrupts rely on the implicit bh/preempt disable of the hard irq
747  * context. So we need to disable bh here to avoid deadlocks and other
748  * side effects.
749  */
750 static irqreturn_t
751 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
752 {
754 
756  ret = action->thread_fn(action->irq, action->dev_id);
757  irq_finalize_oneshot(desc, action);
758  local_bh_enable();
759  return ret;
760 }
761 
762 /*
763  * Interrupts explicitely requested as threaded interupts want to be
764  * preemtible - many of them need to sleep and wait for slow busses to
765  * complete.
766  */
767 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
768  struct irqaction *action)
769 {
771 
772  ret = action->thread_fn(action->irq, action->dev_id);
773  irq_finalize_oneshot(desc, action);
774  return ret;
775 }
776 
777 static void wake_threads_waitq(struct irq_desc *desc)
778 {
779  if (atomic_dec_and_test(&desc->threads_active) &&
780  waitqueue_active(&desc->wait_for_threads))
781  wake_up(&desc->wait_for_threads);
782 }
783 
784 static void irq_thread_dtor(struct callback_head *unused)
785 {
786  struct task_struct *tsk = current;
787  struct irq_desc *desc;
788  struct irqaction *action;
789 
790  if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
791  return;
792 
793  action = kthread_data(tsk);
794 
795  pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
796  tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
797 
798 
799  desc = irq_to_desc(action->irq);
800  /*
801  * If IRQTF_RUNTHREAD is set, we need to decrement
802  * desc->threads_active and wake possible waiters.
803  */
805  wake_threads_waitq(desc);
806 
807  /* Prevent a stale desc->threads_oneshot */
808  irq_finalize_oneshot(desc, action);
809 }
810 
811 /*
812  * Interrupt handler thread
813  */
814 static int irq_thread(void *data)
815 {
816  struct callback_head on_exit_work;
817  static const struct sched_param param = {
818  .sched_priority = MAX_USER_RT_PRIO/2,
819  };
820  struct irqaction *action = data;
821  struct irq_desc *desc = irq_to_desc(action->irq);
822  irqreturn_t (*handler_fn)(struct irq_desc *desc,
823  struct irqaction *action);
824 
826  &action->thread_flags))
827  handler_fn = irq_forced_thread_fn;
828  else
829  handler_fn = irq_thread_fn;
830 
832 
833  init_task_work(&on_exit_work, irq_thread_dtor);
834  task_work_add(current, &on_exit_work, false);
835 
836  while (!irq_wait_for_interrupt(action)) {
837  irqreturn_t action_ret;
838 
839  irq_thread_check_affinity(desc, action);
840 
841  action_ret = handler_fn(desc, action);
842  if (!noirqdebug)
843  note_interrupt(action->irq, desc, action_ret);
844 
845  wake_threads_waitq(desc);
846  }
847 
848  /*
849  * This is the regular exit path. __free_irq() is stopping the
850  * thread via kthread_stop() after calling
851  * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
852  * oneshot mask bit can be set. We cannot verify that as we
853  * cannot touch the oneshot mask at this point anymore as
854  * __setup_irq() might have given out currents thread_mask
855  * again.
856  */
857  task_work_cancel(current, irq_thread_dtor);
858  return 0;
859 }
860 
861 static void irq_setup_forced_threading(struct irqaction *new)
862 {
863  if (!force_irqthreads)
864  return;
865  if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
866  return;
867 
868  new->flags |= IRQF_ONESHOT;
869 
870  if (!new->thread_fn) {
871  set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
872  new->thread_fn = new->handler;
873  new->handler = irq_default_primary_handler;
874  }
875 }
876 
877 /*
878  * Internal function to register an irqaction - typically used to
879  * allocate special interrupts that are part of the architecture.
880  */
881 static int
882 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
883 {
884  struct irqaction *old, **old_ptr;
885  unsigned long flags, thread_mask = 0;
886  int ret, nested, shared = 0;
888 
889  if (!desc)
890  return -EINVAL;
891 
892  if (desc->irq_data.chip == &no_irq_chip)
893  return -ENOSYS;
894  if (!try_module_get(desc->owner))
895  return -ENODEV;
896 
897  /*
898  * Check whether the interrupt nests into another interrupt
899  * thread.
900  */
901  nested = irq_settings_is_nested_thread(desc);
902  if (nested) {
903  if (!new->thread_fn) {
904  ret = -EINVAL;
905  goto out_mput;
906  }
907  /*
908  * Replace the primary handler which was provided from
909  * the driver for non nested interrupt handling by the
910  * dummy function which warns when called.
911  */
912  new->handler = irq_nested_primary_handler;
913  } else {
914  if (irq_settings_can_thread(desc))
915  irq_setup_forced_threading(new);
916  }
917 
918  /*
919  * Create a handler thread when a thread function is supplied
920  * and the interrupt does not nest into another interrupt
921  * thread.
922  */
923  if (new->thread_fn && !nested) {
924  struct task_struct *t;
925 
926  t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
927  new->name);
928  if (IS_ERR(t)) {
929  ret = PTR_ERR(t);
930  goto out_mput;
931  }
932  /*
933  * We keep the reference to the task struct even if
934  * the thread dies to avoid that the interrupt code
935  * references an already freed task_struct.
936  */
937  get_task_struct(t);
938  new->thread = t;
939  }
940 
941  if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
942  ret = -ENOMEM;
943  goto out_thread;
944  }
945 
946  /*
947  * Drivers are often written to work w/o knowledge about the
948  * underlying irq chip implementation, so a request for a
949  * threaded irq without a primary hard irq context handler
950  * requires the ONESHOT flag to be set. Some irq chips like
951  * MSI based interrupts are per se one shot safe. Check the
952  * chip flags, so we can avoid the unmask dance at the end of
953  * the threaded handler for those.
954  */
955  if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
956  new->flags &= ~IRQF_ONESHOT;
957 
958  /*
959  * The following block of code has to be executed atomically
960  */
961  raw_spin_lock_irqsave(&desc->lock, flags);
962  old_ptr = &desc->action;
963  old = *old_ptr;
964  if (old) {
965  /*
966  * Can't share interrupts unless both agree to and are
967  * the same type (level, edge, polarity). So both flag
968  * fields must have IRQF_SHARED set and the bits which
969  * set the trigger type must match. Also all must
970  * agree on ONESHOT.
971  */
972  if (!((old->flags & new->flags) & IRQF_SHARED) ||
973  ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
974  ((old->flags ^ new->flags) & IRQF_ONESHOT))
975  goto mismatch;
976 
977  /* All handlers must agree on per-cpuness */
978  if ((old->flags & IRQF_PERCPU) !=
979  (new->flags & IRQF_PERCPU))
980  goto mismatch;
981 
982  /* add new interrupt at end of irq queue */
983  do {
984  /*
985  * Or all existing action->thread_mask bits,
986  * so we can find the next zero bit for this
987  * new action.
988  */
989  thread_mask |= old->thread_mask;
990  old_ptr = &old->next;
991  old = *old_ptr;
992  } while (old);
993  shared = 1;
994  }
995 
996  /*
997  * Setup the thread mask for this irqaction for ONESHOT. For
998  * !ONESHOT irqs the thread mask is 0 so we can avoid a
999  * conditional in irq_wake_thread().
1000  */
1001  if (new->flags & IRQF_ONESHOT) {
1002  /*
1003  * Unlikely to have 32 resp 64 irqs sharing one line,
1004  * but who knows.
1005  */
1006  if (thread_mask == ~0UL) {
1007  ret = -EBUSY;
1008  goto out_mask;
1009  }
1010  /*
1011  * The thread_mask for the action is or'ed to
1012  * desc->thread_active to indicate that the
1013  * IRQF_ONESHOT thread handler has been woken, but not
1014  * yet finished. The bit is cleared when a thread
1015  * completes. When all threads of a shared interrupt
1016  * line have completed desc->threads_active becomes
1017  * zero and the interrupt line is unmasked. See
1018  * handle.c:irq_wake_thread() for further information.
1019  *
1020  * If no thread is woken by primary (hard irq context)
1021  * interrupt handlers, then desc->threads_active is
1022  * also checked for zero to unmask the irq line in the
1023  * affected hard irq flow handlers
1024  * (handle_[fasteoi|level]_irq).
1025  *
1026  * The new action gets the first zero bit of
1027  * thread_mask assigned. See the loop above which or's
1028  * all existing action->thread_mask bits.
1029  */
1030  new->thread_mask = 1 << ffz(thread_mask);
1031 
1032  } else if (new->handler == irq_default_primary_handler &&
1033  !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1034  /*
1035  * The interrupt was requested with handler = NULL, so
1036  * we use the default primary handler for it. But it
1037  * does not have the oneshot flag set. In combination
1038  * with level interrupts this is deadly, because the
1039  * default primary handler just wakes the thread, then
1040  * the irq lines is reenabled, but the device still
1041  * has the level irq asserted. Rinse and repeat....
1042  *
1043  * While this works for edge type interrupts, we play
1044  * it safe and reject unconditionally because we can't
1045  * say for sure which type this interrupt really
1046  * has. The type flags are unreliable as the
1047  * underlying chip implementation can override them.
1048  */
1049  pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1050  irq);
1051  ret = -EINVAL;
1052  goto out_mask;
1053  }
1054 
1055  if (!shared) {
1057 
1058  /* Setup the type (level, edge polarity) if configured: */
1059  if (new->flags & IRQF_TRIGGER_MASK) {
1060  ret = __irq_set_trigger(desc, irq,
1061  new->flags & IRQF_TRIGGER_MASK);
1062 
1063  if (ret)
1064  goto out_mask;
1065  }
1066 
1067  desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1068  IRQS_ONESHOT | IRQS_WAITING);
1069  irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1070 
1071  if (new->flags & IRQF_PERCPU) {
1072  irqd_set(&desc->irq_data, IRQD_PER_CPU);
1073  irq_settings_set_per_cpu(desc);
1074  }
1075 
1076  if (new->flags & IRQF_ONESHOT)
1077  desc->istate |= IRQS_ONESHOT;
1078 
1079  if (irq_settings_can_autoenable(desc))
1080  irq_startup(desc, true);
1081  else
1082  /* Undo nested disables: */
1083  desc->depth = 1;
1084 
1085  /* Exclude IRQ from balancing if requested */
1086  if (new->flags & IRQF_NOBALANCING) {
1087  irq_settings_set_no_balancing(desc);
1088  irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1089  }
1090 
1091  /* Set default affinity mask once everything is setup */
1092  setup_affinity(irq, desc, mask);
1093 
1094  } else if (new->flags & IRQF_TRIGGER_MASK) {
1095  unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1096  unsigned int omsk = irq_settings_get_trigger_mask(desc);
1097 
1098  if (nmsk != omsk)
1099  /* hope the handler works with current trigger mode */
1100  pr_warning("irq %d uses trigger mode %u; requested %u\n",
1101  irq, nmsk, omsk);
1102  }
1103 
1104  new->irq = irq;
1105  *old_ptr = new;
1106 
1107  /* Reset broken irq detection when installing new handler */
1108  desc->irq_count = 0;
1109  desc->irqs_unhandled = 0;
1110 
1111  /*
1112  * Check whether we disabled the irq via the spurious handler
1113  * before. Reenable it and give it another chance.
1114  */
1115  if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1116  desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1117  __enable_irq(desc, irq, false);
1118  }
1119 
1120  raw_spin_unlock_irqrestore(&desc->lock, flags);
1121 
1122  /*
1123  * Strictly no need to wake it up, but hung_task complains
1124  * when no hard interrupt wakes the thread up.
1125  */
1126  if (new->thread)
1127  wake_up_process(new->thread);
1128 
1129  register_irq_proc(irq, desc);
1130  new->dir = NULL;
1131  register_handler_proc(irq, new);
1132  free_cpumask_var(mask);
1133 
1134  return 0;
1135 
1136 mismatch:
1137  if (!(new->flags & IRQF_PROBE_SHARED)) {
1138  pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1139  irq, new->flags, new->name, old->flags, old->name);
1140 #ifdef CONFIG_DEBUG_SHIRQ
1141  dump_stack();
1142 #endif
1143  }
1144  ret = -EBUSY;
1145 
1146 out_mask:
1147  raw_spin_unlock_irqrestore(&desc->lock, flags);
1148  free_cpumask_var(mask);
1149 
1150 out_thread:
1151  if (new->thread) {
1152  struct task_struct *t = new->thread;
1153 
1154  new->thread = NULL;
1155  kthread_stop(t);
1156  put_task_struct(t);
1157  }
1158 out_mput:
1159  module_put(desc->owner);
1160  return ret;
1161 }
1162 
1170 int setup_irq(unsigned int irq, struct irqaction *act)
1171 {
1172  int retval;
1173  struct irq_desc *desc = irq_to_desc(irq);
1174 
1175  if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1176  return -EINVAL;
1177  chip_bus_lock(desc);
1178  retval = __setup_irq(irq, desc, act);
1179  chip_bus_sync_unlock(desc);
1180 
1181  return retval;
1182 }
1184 
1185 /*
1186  * Internal function to unregister an irqaction - used to free
1187  * regular and special interrupts that are part of the architecture.
1188  */
1189 static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1190 {
1191  struct irq_desc *desc = irq_to_desc(irq);
1192  struct irqaction *action, **action_ptr;
1193  unsigned long flags;
1194 
1195  WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1196 
1197  if (!desc)
1198  return NULL;
1199 
1200  raw_spin_lock_irqsave(&desc->lock, flags);
1201 
1202  /*
1203  * There can be multiple actions per IRQ descriptor, find the right
1204  * one based on the dev_id:
1205  */
1206  action_ptr = &desc->action;
1207  for (;;) {
1208  action = *action_ptr;
1209 
1210  if (!action) {
1211  WARN(1, "Trying to free already-free IRQ %d\n", irq);
1212  raw_spin_unlock_irqrestore(&desc->lock, flags);
1213 
1214  return NULL;
1215  }
1216 
1217  if (action->dev_id == dev_id)
1218  break;
1219  action_ptr = &action->next;
1220  }
1221 
1222  /* Found it - now remove it from the list of entries: */
1223  *action_ptr = action->next;
1224 
1225  /* If this was the last handler, shut down the IRQ line: */
1226  if (!desc->action)
1227  irq_shutdown(desc);
1228 
1229 #ifdef CONFIG_SMP
1230  /* make sure affinity_hint is cleaned up */
1231  if (WARN_ON_ONCE(desc->affinity_hint))
1232  desc->affinity_hint = NULL;
1233 #endif
1234 
1235  raw_spin_unlock_irqrestore(&desc->lock, flags);
1236 
1237  unregister_handler_proc(irq, action);
1238 
1239  /* Make sure it's not being used on another CPU: */
1240  synchronize_irq(irq);
1241 
1242 #ifdef CONFIG_DEBUG_SHIRQ
1243  /*
1244  * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1245  * event to happen even now it's being freed, so let's make sure that
1246  * is so by doing an extra call to the handler ....
1247  *
1248  * ( We do this after actually deregistering it, to make sure that a
1249  * 'real' IRQ doesn't run in * parallel with our fake. )
1250  */
1251  if (action->flags & IRQF_SHARED) {
1252  local_irq_save(flags);
1253  action->handler(irq, dev_id);
1254  local_irq_restore(flags);
1255  }
1256 #endif
1257 
1258  if (action->thread) {
1259  kthread_stop(action->thread);
1260  put_task_struct(action->thread);
1261  }
1262 
1263  module_put(desc->owner);
1264  return action;
1265 }
1266 
1274 void remove_irq(unsigned int irq, struct irqaction *act)
1275 {
1276  struct irq_desc *desc = irq_to_desc(irq);
1277 
1278  if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1279  __free_irq(irq, act->dev_id);
1280 }
1282 
1297 void free_irq(unsigned int irq, void *dev_id)
1298 {
1299  struct irq_desc *desc = irq_to_desc(irq);
1300 
1301  if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1302  return;
1303 
1304 #ifdef CONFIG_SMP
1305  if (WARN_ON(desc->affinity_notify))
1306  desc->affinity_notify = NULL;
1307 #endif
1308 
1309  chip_bus_lock(desc);
1310  kfree(__free_irq(irq, dev_id));
1311  chip_bus_sync_unlock(desc);
1312 }
1314 
1357 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1358  irq_handler_t thread_fn, unsigned long irqflags,
1359  const char *devname, void *dev_id)
1360 {
1361  struct irqaction *action;
1362  struct irq_desc *desc;
1363  int retval;
1364 
1365  /*
1366  * Sanity-check: shared interrupts must pass in a real dev-ID,
1367  * otherwise we'll have trouble later trying to figure out
1368  * which interrupt is which (messes up the interrupt freeing
1369  * logic etc).
1370  */
1371  if ((irqflags & IRQF_SHARED) && !dev_id)
1372  return -EINVAL;
1373 
1374  desc = irq_to_desc(irq);
1375  if (!desc)
1376  return -EINVAL;
1377 
1378  if (!irq_settings_can_request(desc) ||
1379  WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1380  return -EINVAL;
1381 
1382  if (!handler) {
1383  if (!thread_fn)
1384  return -EINVAL;
1385  handler = irq_default_primary_handler;
1386  }
1387 
1388  action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1389  if (!action)
1390  return -ENOMEM;
1391 
1392  action->handler = handler;
1393  action->thread_fn = thread_fn;
1394  action->flags = irqflags;
1395  action->name = devname;
1396  action->dev_id = dev_id;
1397 
1398  chip_bus_lock(desc);
1399  retval = __setup_irq(irq, desc, action);
1400  chip_bus_sync_unlock(desc);
1401 
1402  if (retval)
1403  kfree(action);
1404 
1405 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1406  if (!retval && (irqflags & IRQF_SHARED)) {
1407  /*
1408  * It's a shared IRQ -- the driver ought to be prepared for it
1409  * to happen immediately, so let's make sure....
1410  * We disable the irq to make sure that a 'real' IRQ doesn't
1411  * run in parallel with our fake.
1412  */
1413  unsigned long flags;
1414 
1415  disable_irq(irq);
1416  local_irq_save(flags);
1417 
1418  handler(irq, dev_id);
1419 
1420  local_irq_restore(flags);
1421  enable_irq(irq);
1422  }
1423 #endif
1424  return retval;
1425 }
1427 
1445 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1446  unsigned long flags, const char *name, void *dev_id)
1447 {
1448  struct irq_desc *desc = irq_to_desc(irq);
1449  int ret;
1450 
1451  if (!desc)
1452  return -EINVAL;
1453 
1454  if (irq_settings_is_nested_thread(desc)) {
1455  ret = request_threaded_irq(irq, NULL, handler,
1456  flags, name, dev_id);
1457  return !ret ? IRQC_IS_NESTED : ret;
1458  }
1459 
1460  ret = request_irq(irq, handler, flags, name, dev_id);
1461  return !ret ? IRQC_IS_HARDIRQ : ret;
1462 }
1464 
1465 void enable_percpu_irq(unsigned int irq, unsigned int type)
1466 {
1467  unsigned int cpu = smp_processor_id();
1468  unsigned long flags;
1469  struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1470 
1471  if (!desc)
1472  return;
1473 
1474  type &= IRQ_TYPE_SENSE_MASK;
1475  if (type != IRQ_TYPE_NONE) {
1476  int ret;
1477 
1478  ret = __irq_set_trigger(desc, irq, type);
1479 
1480  if (ret) {
1481  WARN(1, "failed to set type for IRQ%d\n", irq);
1482  goto out;
1483  }
1484  }
1485 
1486  irq_percpu_enable(desc, cpu);
1487 out:
1488  irq_put_desc_unlock(desc, flags);
1489 }
1490 
1491 void disable_percpu_irq(unsigned int irq)
1492 {
1493  unsigned int cpu = smp_processor_id();
1494  unsigned long flags;
1495  struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1496 
1497  if (!desc)
1498  return;
1499 
1500  irq_percpu_disable(desc, cpu);
1501  irq_put_desc_unlock(desc, flags);
1502 }
1503 
1504 /*
1505  * Internal function to unregister a percpu irqaction.
1506  */
1507 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1508 {
1509  struct irq_desc *desc = irq_to_desc(irq);
1510  struct irqaction *action;
1511  unsigned long flags;
1512 
1513  WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1514 
1515  if (!desc)
1516  return NULL;
1517 
1518  raw_spin_lock_irqsave(&desc->lock, flags);
1519 
1520  action = desc->action;
1521  if (!action || action->percpu_dev_id != dev_id) {
1522  WARN(1, "Trying to free already-free IRQ %d\n", irq);
1523  goto bad;
1524  }
1525 
1526  if (!cpumask_empty(desc->percpu_enabled)) {
1527  WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1528  irq, cpumask_first(desc->percpu_enabled));
1529  goto bad;
1530  }
1531 
1532  /* Found it - now remove it from the list of entries: */
1533  desc->action = NULL;
1534 
1535  raw_spin_unlock_irqrestore(&desc->lock, flags);
1536 
1537  unregister_handler_proc(irq, action);
1538 
1539  module_put(desc->owner);
1540  return action;
1541 
1542 bad:
1543  raw_spin_unlock_irqrestore(&desc->lock, flags);
1544  return NULL;
1545 }
1546 
1554 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1555 {
1556  struct irq_desc *desc = irq_to_desc(irq);
1557 
1558  if (desc && irq_settings_is_per_cpu_devid(desc))
1559  __free_percpu_irq(irq, act->percpu_dev_id);
1560 }
1561 
1574 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1575 {
1576  struct irq_desc *desc = irq_to_desc(irq);
1577 
1578  if (!desc || !irq_settings_is_per_cpu_devid(desc))
1579  return;
1580 
1581  chip_bus_lock(desc);
1582  kfree(__free_percpu_irq(irq, dev_id));
1583  chip_bus_sync_unlock(desc);
1584 }
1585 
1593 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1594 {
1595  struct irq_desc *desc = irq_to_desc(irq);
1596  int retval;
1597 
1598  if (!desc || !irq_settings_is_per_cpu_devid(desc))
1599  return -EINVAL;
1600  chip_bus_lock(desc);
1601  retval = __setup_irq(irq, desc, act);
1602  chip_bus_sync_unlock(desc);
1603 
1604  return retval;
1605 }
1606 
1622 int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1623  const char *devname, void __percpu *dev_id)
1624 {
1625  struct irqaction *action;
1626  struct irq_desc *desc;
1627  int retval;
1628 
1629  if (!dev_id)
1630  return -EINVAL;
1631 
1632  desc = irq_to_desc(irq);
1633  if (!desc || !irq_settings_can_request(desc) ||
1634  !irq_settings_is_per_cpu_devid(desc))
1635  return -EINVAL;
1636 
1637  action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1638  if (!action)
1639  return -ENOMEM;
1640 
1641  action->handler = handler;
1642  action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1643  action->name = devname;
1644  action->percpu_dev_id = dev_id;
1645 
1646  chip_bus_lock(desc);
1647  retval = __setup_irq(irq, desc, action);
1648  chip_bus_sync_unlock(desc);
1649 
1650  if (retval)
1651  kfree(action);
1652 
1653  return retval;
1654 }