10 #define pr_fmt(fmt) "genirq: " fmt
14 #include <linux/module.h>
15 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
23 #ifdef CONFIG_IRQ_FORCED_THREADING
26 static int __init setup_forced_irqthreads(
char *
arg)
59 while (irqd_irq_inprogress(&desc->
irq_data))
64 inprogress = irqd_irq_inprogress(&desc->
irq_data);
86 int irq_can_set_affinity(
unsigned int irq)
90 if (!desc || !irqd_can_balance(&desc->
irq_data) ||
113 action = action->
next;
117 #ifdef CONFIG_GENERIC_PENDING_IRQ
118 static inline bool irq_can_move_pcntxt(
struct irq_data *
data)
120 return irqd_can_move_in_process_context(data);
122 static inline bool irq_move_pending(
struct irq_data *
data)
124 return irqd_is_setaffinity_pending(data);
129 cpumask_copy(desc->pending_mask, mask);
134 cpumask_copy(mask, desc->pending_mask);
137 static inline bool irq_can_move_pcntxt(
struct irq_data *
data) {
return true; }
138 static inline bool irq_move_pending(
struct irq_data *
data) {
return false; }
166 struct irq_chip *chip = irq_data_get_irq_chip(data);
173 if (irq_can_move_pcntxt(data)) {
176 irqd_set_move_pending(data);
177 irq_copy_pending(desc, mask);
180 if (desc->affinity_notify) {
181 kref_get(&desc->affinity_notify->kref);
195 int irq_set_affinity(
unsigned int irq,
const struct cpumask *mask)
210 int irq_set_affinity_hint(
unsigned int irq,
const struct cpumask *
m)
217 desc->affinity_hint =
m;
218 irq_put_desc_unlock(desc, flags);
225 struct irq_affinity_notify *
notify =
231 if (!desc || !alloc_cpumask_var(&cpumask,
GFP_KERNEL))
235 if (irq_move_pending(&desc->
irq_data))
236 irq_get_pending(cpumask, desc);
238 cpumask_copy(cpumask, desc->
irq_data.affinity);
241 notify->notify(notify, cpumask);
243 free_cpumask_var(cpumask);
245 kref_put(¬ify->kref, notify->release);
260 irq_set_affinity_notifier(
unsigned int irq,
struct irq_affinity_notify *notify)
263 struct irq_affinity_notify *old_notify;
275 kref_init(¬ify->kref);
276 INIT_WORK(¬ify->work, irq_affinity_notify);
280 old_notify = desc->affinity_notify;
281 desc->affinity_notify = notify;
285 kref_put(&old_notify->kref, old_notify->release);
291 #ifndef CONFIG_AUTO_IRQ_AFFINITY
296 setup_affinity(
unsigned int irq,
struct irq_desc *desc,
struct cpumask *mask)
298 struct cpumask *
set = irq_default_affinity;
302 if (!irq_can_set_affinity(irq))
310 if (cpumask_intersects(desc->
irq_data.affinity,
317 cpumask_and(mask, cpu_online_mask,
set);
322 if (cpumask_intersects(mask, nodemask))
323 cpumask_and(mask, mask, nodemask);
330 setup_affinity(
unsigned int irq,
struct irq_desc *
d,
struct cpumask *mask)
332 return irq_select_affinity(irq);
346 ret = setup_affinity(irq, desc, mask);
353 setup_affinity(
unsigned int irq,
struct irq_desc *desc,
struct cpumask *mask)
371 static int __disable_irq_nosync(
unsigned int irq)
379 irq_put_desc_busunlock(desc, flags);
396 __disable_irq_nosync(irq);
414 if (!__disable_irq_nosync(irq))
433 switch (desc->
depth) {
442 irq_settings_set_noprobe(desc);
471 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
476 irq_put_desc_busunlock(desc, flags);
480 static int set_irq_wake_real(
unsigned int irq,
unsigned int on)
488 if (desc->
irq_data.chip->irq_set_wake)
520 ret = set_irq_wake_real(irq, on);
528 WARN(1,
"Unbalanced IRQ %d wake disable\n", irq);
530 ret = set_irq_wake_real(irq, on);
537 irq_put_desc_busunlock(desc, flags);
550 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
556 if (irq_settings_can_request(desc)) {
561 irq_put_desc_unlock(desc, flags);
576 pr_debug(
"No set_type function for IRQ %d (%s)\n", irq,
577 chip ? (chip->
name ? :
"unknown") :
"unknown");
584 if (!irqd_irq_masked(&desc->
irq_data))
586 if (!irqd_irq_disabled(&desc->
irq_data))
599 flags = irqd_get_trigger_type(&desc->
irq_data);
600 irq_settings_set_trigger_mask(desc, flags);
602 irq_settings_clr_level(desc);
604 irq_settings_set_level(desc);
611 pr_err(
"Setting trigger mode %lu for irq %u failed (%pF)\n",
635 WARN(1,
"Primary handler called for nested irq %d\n", irq);
639 static int irq_wait_for_interrupt(
struct irqaction *action)
662 static void irq_finalize_oneshot(
struct irq_desc *desc,
687 chip_bus_sync_unlock(desc);
708 chip_bus_sync_unlock(desc);
733 cpumask_copy(mask, desc->
irq_data.affinity);
736 set_cpus_allowed_ptr(
current, mask);
737 free_cpumask_var(mask);
757 irq_finalize_oneshot(desc, action);
773 irq_finalize_oneshot(desc, action);
777 static void wake_threads_waitq(
struct irq_desc *desc)
795 pr_err(
"exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
805 wake_threads_waitq(desc);
808 irq_finalize_oneshot(desc, action);
814 static int irq_thread(
void *data)
827 handler_fn = irq_forced_thread_fn;
829 handler_fn = irq_thread_fn;
833 init_task_work(&on_exit_work, irq_thread_dtor);
836 while (!irq_wait_for_interrupt(action)) {
839 irq_thread_check_affinity(desc, action);
841 action_ret = handler_fn(desc, action);
845 wake_threads_waitq(desc);
861 static void irq_setup_forced_threading(
struct irqaction *
new)
870 if (!new->thread_fn) {
872 new->thread_fn =
new->handler;
873 new->handler = irq_default_primary_handler;
894 if (!try_module_get(desc->
owner))
901 nested = irq_settings_is_nested_thread(desc);
903 if (!new->thread_fn) {
912 new->handler = irq_nested_primary_handler;
914 if (irq_settings_can_thread(desc))
915 irq_setup_forced_threading(
new);
923 if (new->thread_fn && !nested) {
990 old_ptr = &old->
next;
1006 if (thread_mask == ~0
UL) {
1030 new->thread_mask = 1 <<
ffz(thread_mask);
1032 }
else if (new->handler == irq_default_primary_handler &&
1049 pr_err(
"Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1073 irq_settings_set_per_cpu(desc);
1079 if (irq_settings_can_autoenable(desc))
1087 irq_settings_set_no_balancing(desc);
1092 setup_affinity(irq, desc, mask);
1096 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1100 pr_warning(
"irq %d uses trigger mode %u; requested %u\n",
1132 free_cpumask_var(mask);
1138 pr_err(
"Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1139 irq, new->flags, new->name, old->
flags, old->
name);
1140 #ifdef CONFIG_DEBUG_SHIRQ
1148 free_cpumask_var(mask);
1159 module_put(desc->
owner);
1175 if (
WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1177 chip_bus_lock(desc);
1178 retval = __setup_irq(irq, desc, act);
1179 chip_bus_sync_unlock(desc);
1193 unsigned long flags;
1206 action_ptr = &desc->
action;
1208 action = *action_ptr;
1211 WARN(1,
"Trying to free already-free IRQ %d\n", irq);
1217 if (action->
dev_id == dev_id)
1219 action_ptr = &action->
next;
1223 *action_ptr = action->
next;
1232 desc->affinity_hint =
NULL;
1242 #ifdef CONFIG_DEBUG_SHIRQ
1260 put_task_struct(action->
thread);
1263 module_put(desc->
owner);
1278 if (desc && !
WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1279 __free_irq(irq, act->
dev_id);
1301 if (!desc ||
WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1305 if (
WARN_ON(desc->affinity_notify))
1306 desc->affinity_notify =
NULL;
1309 chip_bus_lock(desc);
1310 kfree(__free_irq(irq, dev_id));
1311 chip_bus_sync_unlock(desc);
1359 const char *devname,
void *dev_id)
1378 if (!irq_settings_can_request(desc) ||
1379 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1385 handler = irq_default_primary_handler;
1394 action->
flags = irqflags;
1395 action->
name = devname;
1398 chip_bus_lock(desc);
1399 retval = __setup_irq(irq, desc, action);
1400 chip_bus_sync_unlock(desc);
1405 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1406 if (!retval && (irqflags & IRQF_SHARED)) {
1413 unsigned long flags;
1418 handler(irq, dev_id);
1446 unsigned long flags,
const char *
name,
void *dev_id)
1454 if (irq_settings_is_nested_thread(desc)) {
1456 flags, name, dev_id);
1460 ret =
request_irq(irq, handler, flags, name, dev_id);
1468 unsigned long flags;
1481 WARN(1,
"failed to set type for IRQ%d\n", irq);
1488 irq_put_desc_unlock(desc, flags);
1494 unsigned long flags;
1501 irq_put_desc_unlock(desc, flags);
1507 static struct irqaction *__free_percpu_irq(
unsigned int irq,
void __percpu *dev_id)
1511 unsigned long flags;
1522 WARN(1,
"Trying to free already-free IRQ %d\n", irq);
1527 WARN(1,
"percpu IRQ %d still enabled on CPU%d!\n",
1539 module_put(desc->
owner);
1558 if (desc && irq_settings_is_per_cpu_devid(desc))
1578 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1581 chip_bus_lock(desc);
1582 kfree(__free_percpu_irq(irq, dev_id));
1583 chip_bus_sync_unlock(desc);
1598 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1600 chip_bus_lock(desc);
1601 retval = __setup_irq(irq, desc, act);
1602 chip_bus_sync_unlock(desc);
1623 const char *devname,
void __percpu *dev_id)
1633 if (!desc || !irq_settings_can_request(desc) ||
1634 !irq_settings_is_per_cpu_devid(desc))
1643 action->
name = devname;
1646 chip_bus_lock(desc);
1647 retval = __setup_irq(irq, desc, action);
1648 chip_bus_sync_unlock(desc);