Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
core.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006 - 2007 Ivo van Doorn
3  * Copyright (C) 2007 Dmitry Torokhov
4  * Copyright 2009 Johannes Berg <[email protected]>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the
18  * Free Software Foundation, Inc.,
19  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  */
21 
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/workqueue.h>
26 #include <linux/capability.h>
27 #include <linux/list.h>
28 #include <linux/mutex.h>
29 #include <linux/rfkill.h>
30 #include <linux/sched.h>
31 #include <linux/spinlock.h>
32 #include <linux/device.h>
33 #include <linux/miscdevice.h>
34 #include <linux/wait.h>
35 #include <linux/poll.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 
39 #include "rfkill.h"
40 
41 #define POLL_INTERVAL (5 * HZ)
42 
43 #define RFKILL_BLOCK_HW BIT(0)
44 #define RFKILL_BLOCK_SW BIT(1)
45 #define RFKILL_BLOCK_SW_PREV BIT(2)
46 #define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\
47  RFKILL_BLOCK_SW |\
48  RFKILL_BLOCK_SW_PREV)
49 #define RFKILL_BLOCK_SW_SETCALL BIT(31)
50 
51 struct rfkill {
53 
54  const char *name;
56 
57  unsigned long state;
58 
60 
61  bool registered;
62  bool persistent;
63 
64  const struct rfkill_ops *ops;
65  void *data;
66 
67 #ifdef CONFIG_RFKILL_LEDS
68  struct led_trigger led_trigger;
69  const char *ledtrigname;
70 #endif
71 
72  struct device dev;
73  struct list_head node;
74 
78 };
79 #define to_rfkill(d) container_of(d, struct rfkill, dev)
80 
82  struct list_head list;
83  struct rfkill_event ev;
84 };
85 
86 struct rfkill_data {
87  struct list_head list;
88  struct list_head events;
89  struct mutex mtx;
92 };
93 
94 
95 MODULE_AUTHOR("Ivo van Doorn <[email protected]>");
96 MODULE_AUTHOR("Johannes Berg <[email protected]>");
97 MODULE_DESCRIPTION("RF switch support");
98 MODULE_LICENSE("GPL");
99 
100 
101 /*
102  * The locking here should be made much smarter, we currently have
103  * a bit of a stupid situation because drivers might want to register
104  * the rfkill struct under their own lock, and take this lock during
105  * rfkill method calls -- which will cause an AB-BA deadlock situation.
106  *
107  * To fix that, we need to rework this code here to be mostly lock-free
108  * and only use the mutex for list manipulations, not to protect the
109  * various other global variables. Then we can avoid holding the mutex
110  * around driver operations, and all is happy.
111  */
112 static LIST_HEAD(rfkill_list); /* list of registered rf switches */
113 static DEFINE_MUTEX(rfkill_global_mutex);
114 static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */
115 
116 static unsigned int rfkill_default_state = 1;
117 module_param_named(default_state, rfkill_default_state, uint, 0444);
118 MODULE_PARM_DESC(default_state,
119  "Default initial state for all radio types, 0 = radio off");
120 
121 static struct {
122  bool cur, sav;
123 } rfkill_global_states[NUM_RFKILL_TYPES];
124 
125 static bool rfkill_epo_lock_active;
126 
127 
128 #ifdef CONFIG_RFKILL_LEDS
129 static void rfkill_led_trigger_event(struct rfkill *rfkill)
130 {
131  struct led_trigger *trigger;
132 
133  if (!rfkill->registered)
134  return;
135 
136  trigger = &rfkill->led_trigger;
137 
138  if (rfkill->state & RFKILL_BLOCK_ANY)
139  led_trigger_event(trigger, LED_OFF);
140  else
141  led_trigger_event(trigger, LED_FULL);
142 }
143 
144 static void rfkill_led_trigger_activate(struct led_classdev *led)
145 {
146  struct rfkill *rfkill;
147 
148  rfkill = container_of(led->trigger, struct rfkill, led_trigger);
149 
150  rfkill_led_trigger_event(rfkill);
151 }
152 
153 const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
154 {
155  return rfkill->led_trigger.name;
156 }
157 EXPORT_SYMBOL(rfkill_get_led_trigger_name);
158 
159 void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
160 {
161  BUG_ON(!rfkill);
162 
163  rfkill->ledtrigname = name;
164 }
165 EXPORT_SYMBOL(rfkill_set_led_trigger_name);
166 
167 static int rfkill_led_trigger_register(struct rfkill *rfkill)
168 {
169  rfkill->led_trigger.name = rfkill->ledtrigname
170  ? : dev_name(&rfkill->dev);
171  rfkill->led_trigger.activate = rfkill_led_trigger_activate;
172  return led_trigger_register(&rfkill->led_trigger);
173 }
174 
175 static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
176 {
177  led_trigger_unregister(&rfkill->led_trigger);
178 }
179 #else
180 static void rfkill_led_trigger_event(struct rfkill *rfkill)
181 {
182 }
183 
184 static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
185 {
186  return 0;
187 }
188 
189 static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
190 {
191 }
192 #endif /* CONFIG_RFKILL_LEDS */
193 
194 static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
195  enum rfkill_operation op)
196 {
197  unsigned long flags;
198 
199  ev->idx = rfkill->idx;
200  ev->type = rfkill->type;
201  ev->op = op;
202 
203  spin_lock_irqsave(&rfkill->lock, flags);
204  ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
205  ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
207  spin_unlock_irqrestore(&rfkill->lock, flags);
208 }
209 
210 static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
211 {
212  struct rfkill_data *data;
213  struct rfkill_int_event *ev;
214 
215  list_for_each_entry(data, &rfkill_fds, list) {
216  ev = kzalloc(sizeof(*ev), GFP_KERNEL);
217  if (!ev)
218  continue;
219  rfkill_fill_event(&ev->ev, rfkill, op);
220  mutex_lock(&data->mtx);
221  list_add_tail(&ev->list, &data->events);
222  mutex_unlock(&data->mtx);
224  }
225 }
226 
227 static void rfkill_event(struct rfkill *rfkill)
228 {
229  if (!rfkill->registered)
230  return;
231 
232  kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
233 
234  /* also send event to /dev/rfkill */
235  rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
236 }
237 
238 static bool __rfkill_set_hw_state(struct rfkill *rfkill,
239  bool blocked, bool *change)
240 {
241  unsigned long flags;
242  bool prev, any;
243 
244  BUG_ON(!rfkill);
245 
246  spin_lock_irqsave(&rfkill->lock, flags);
247  prev = !!(rfkill->state & RFKILL_BLOCK_HW);
248  if (blocked)
249  rfkill->state |= RFKILL_BLOCK_HW;
250  else
251  rfkill->state &= ~RFKILL_BLOCK_HW;
252  *change = prev != blocked;
253  any = !!(rfkill->state & RFKILL_BLOCK_ANY);
254  spin_unlock_irqrestore(&rfkill->lock, flags);
255 
256  rfkill_led_trigger_event(rfkill);
257 
258  return any;
259 }
260 
270 static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
271 {
272  unsigned long flags;
273  bool prev, curr;
274  int err;
275 
276  if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
277  return;
278 
279  /*
280  * Some platforms (...!) generate input events which affect the
281  * _hard_ kill state -- whenever something tries to change the
282  * current software state query the hardware state too.
283  */
284  if (rfkill->ops->query)
285  rfkill->ops->query(rfkill, rfkill->data);
286 
287  spin_lock_irqsave(&rfkill->lock, flags);
288  prev = rfkill->state & RFKILL_BLOCK_SW;
289 
290  if (rfkill->state & RFKILL_BLOCK_SW)
291  rfkill->state |= RFKILL_BLOCK_SW_PREV;
292  else
293  rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
294 
295  if (blocked)
296  rfkill->state |= RFKILL_BLOCK_SW;
297  else
298  rfkill->state &= ~RFKILL_BLOCK_SW;
299 
300  rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
301  spin_unlock_irqrestore(&rfkill->lock, flags);
302 
303  err = rfkill->ops->set_block(rfkill->data, blocked);
304 
305  spin_lock_irqsave(&rfkill->lock, flags);
306  if (err) {
307  /*
308  * Failed -- reset status to _prev, this may be different
309  * from what set set _PREV to earlier in this function
310  * if rfkill_set_sw_state was invoked.
311  */
312  if (rfkill->state & RFKILL_BLOCK_SW_PREV)
313  rfkill->state |= RFKILL_BLOCK_SW;
314  else
315  rfkill->state &= ~RFKILL_BLOCK_SW;
316  }
317  rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
318  rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
319  curr = rfkill->state & RFKILL_BLOCK_SW;
320  spin_unlock_irqrestore(&rfkill->lock, flags);
321 
322  rfkill_led_trigger_event(rfkill);
323 
324  if (prev != curr)
325  rfkill_event(rfkill);
326 }
327 
328 #ifdef CONFIG_RFKILL_INPUT
329 static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
330 
342 static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
343 {
344  struct rfkill *rfkill;
345 
346  rfkill_global_states[type].cur = blocked;
347  list_for_each_entry(rfkill, &rfkill_list, node) {
348  if (rfkill->type != type && type != RFKILL_TYPE_ALL)
349  continue;
350 
351  rfkill_set_block(rfkill, blocked);
352  }
353 }
354 
365 void rfkill_switch_all(enum rfkill_type type, bool blocked)
366 {
367  if (atomic_read(&rfkill_input_disabled))
368  return;
369 
370  mutex_lock(&rfkill_global_mutex);
371 
372  if (!rfkill_epo_lock_active)
373  __rfkill_switch_all(type, blocked);
374 
375  mutex_unlock(&rfkill_global_mutex);
376 }
377 
387 void rfkill_epo(void)
388 {
389  struct rfkill *rfkill;
390  int i;
391 
392  if (atomic_read(&rfkill_input_disabled))
393  return;
394 
395  mutex_lock(&rfkill_global_mutex);
396 
397  rfkill_epo_lock_active = true;
398  list_for_each_entry(rfkill, &rfkill_list, node)
399  rfkill_set_block(rfkill, true);
400 
401  for (i = 0; i < NUM_RFKILL_TYPES; i++) {
402  rfkill_global_states[i].sav = rfkill_global_states[i].cur;
403  rfkill_global_states[i].cur = true;
404  }
405 
406  mutex_unlock(&rfkill_global_mutex);
407 }
408 
416 void rfkill_restore_states(void)
417 {
418  int i;
419 
420  if (atomic_read(&rfkill_input_disabled))
421  return;
422 
423  mutex_lock(&rfkill_global_mutex);
424 
425  rfkill_epo_lock_active = false;
426  for (i = 0; i < NUM_RFKILL_TYPES; i++)
427  __rfkill_switch_all(i, rfkill_global_states[i].sav);
428  mutex_unlock(&rfkill_global_mutex);
429 }
430 
437 void rfkill_remove_epo_lock(void)
438 {
439  if (atomic_read(&rfkill_input_disabled))
440  return;
441 
442  mutex_lock(&rfkill_global_mutex);
443  rfkill_epo_lock_active = false;
444  mutex_unlock(&rfkill_global_mutex);
445 }
446 
456 bool rfkill_is_epo_lock_active(void)
457 {
458  return rfkill_epo_lock_active;
459 }
460 
468 bool rfkill_get_global_sw_state(const enum rfkill_type type)
469 {
470  return rfkill_global_states[type].cur;
471 }
472 #endif
473 
474 
475 bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
476 {
477  bool ret, change;
478 
479  ret = __rfkill_set_hw_state(rfkill, blocked, &change);
480 
481  if (!rfkill->registered)
482  return ret;
483 
484  if (change)
485  schedule_work(&rfkill->uevent_work);
486 
487  return ret;
488 }
490 
491 static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
492 {
494 
495  /* if in a ops->set_block right now, use other bit */
496  if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
497  bit = RFKILL_BLOCK_SW_PREV;
498 
499  if (blocked)
500  rfkill->state |= bit;
501  else
502  rfkill->state &= ~bit;
503 }
504 
505 bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
506 {
507  unsigned long flags;
508  bool prev, hwblock;
509 
510  BUG_ON(!rfkill);
511 
512  spin_lock_irqsave(&rfkill->lock, flags);
513  prev = !!(rfkill->state & RFKILL_BLOCK_SW);
514  __rfkill_set_sw_state(rfkill, blocked);
515  hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
516  blocked = blocked || hwblock;
517  spin_unlock_irqrestore(&rfkill->lock, flags);
518 
519  if (!rfkill->registered)
520  return blocked;
521 
522  if (prev != blocked && !hwblock)
523  schedule_work(&rfkill->uevent_work);
524 
525  rfkill_led_trigger_event(rfkill);
526 
527  return blocked;
528 }
530 
531 void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
532 {
533  unsigned long flags;
534 
535  BUG_ON(!rfkill);
536  BUG_ON(rfkill->registered);
537 
538  spin_lock_irqsave(&rfkill->lock, flags);
539  __rfkill_set_sw_state(rfkill, blocked);
540  rfkill->persistent = true;
541  spin_unlock_irqrestore(&rfkill->lock, flags);
542 }
544 
545 void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
546 {
547  unsigned long flags;
548  bool swprev, hwprev;
549 
550  BUG_ON(!rfkill);
551 
552  spin_lock_irqsave(&rfkill->lock, flags);
553 
554  /*
555  * No need to care about prev/setblock ... this is for uevent only
556  * and that will get triggered by rfkill_set_block anyway.
557  */
558  swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
559  hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
560  __rfkill_set_sw_state(rfkill, sw);
561  if (hw)
562  rfkill->state |= RFKILL_BLOCK_HW;
563  else
564  rfkill->state &= ~RFKILL_BLOCK_HW;
565 
566  spin_unlock_irqrestore(&rfkill->lock, flags);
567 
568  if (!rfkill->registered) {
569  rfkill->persistent = true;
570  } else {
571  if (swprev != sw || hwprev != hw)
572  schedule_work(&rfkill->uevent_work);
573 
574  rfkill_led_trigger_event(rfkill);
575  }
576 }
578 
579 static ssize_t rfkill_name_show(struct device *dev,
580  struct device_attribute *attr,
581  char *buf)
582 {
583  struct rfkill *rfkill = to_rfkill(dev);
584 
585  return sprintf(buf, "%s\n", rfkill->name);
586 }
587 
588 static const char *rfkill_get_type_str(enum rfkill_type type)
589 {
590  BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1);
591 
592  switch (type) {
593  case RFKILL_TYPE_WLAN:
594  return "wlan";
596  return "bluetooth";
597  case RFKILL_TYPE_UWB:
598  return "ultrawideband";
599  case RFKILL_TYPE_WIMAX:
600  return "wimax";
601  case RFKILL_TYPE_WWAN:
602  return "wwan";
603  case RFKILL_TYPE_GPS:
604  return "gps";
605  case RFKILL_TYPE_FM:
606  return "fm";
607  default:
608  BUG();
609  }
610 }
611 
612 static ssize_t rfkill_type_show(struct device *dev,
613  struct device_attribute *attr,
614  char *buf)
615 {
616  struct rfkill *rfkill = to_rfkill(dev);
617 
618  return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
619 }
620 
621 static ssize_t rfkill_idx_show(struct device *dev,
622  struct device_attribute *attr,
623  char *buf)
624 {
625  struct rfkill *rfkill = to_rfkill(dev);
626 
627  return sprintf(buf, "%d\n", rfkill->idx);
628 }
629 
630 static ssize_t rfkill_persistent_show(struct device *dev,
631  struct device_attribute *attr,
632  char *buf)
633 {
634  struct rfkill *rfkill = to_rfkill(dev);
635 
636  return sprintf(buf, "%d\n", rfkill->persistent);
637 }
638 
639 static ssize_t rfkill_hard_show(struct device *dev,
640  struct device_attribute *attr,
641  char *buf)
642 {
643  struct rfkill *rfkill = to_rfkill(dev);
644 
645  return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
646 }
647 
648 static ssize_t rfkill_soft_show(struct device *dev,
649  struct device_attribute *attr,
650  char *buf)
651 {
652  struct rfkill *rfkill = to_rfkill(dev);
653 
654  return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
655 }
656 
657 static ssize_t rfkill_soft_store(struct device *dev,
658  struct device_attribute *attr,
659  const char *buf, size_t count)
660 {
661  struct rfkill *rfkill = to_rfkill(dev);
662  unsigned long state;
663  int err;
664 
665  if (!capable(CAP_NET_ADMIN))
666  return -EPERM;
667 
668  err = kstrtoul(buf, 0, &state);
669  if (err)
670  return err;
671 
672  if (state > 1 )
673  return -EINVAL;
674 
675  mutex_lock(&rfkill_global_mutex);
676  rfkill_set_block(rfkill, state);
677  mutex_unlock(&rfkill_global_mutex);
678 
679  return err ?: count;
680 }
681 
682 static u8 user_state_from_blocked(unsigned long state)
683 {
684  if (state & RFKILL_BLOCK_HW)
686  if (state & RFKILL_BLOCK_SW)
688 
690 }
691 
692 static ssize_t rfkill_state_show(struct device *dev,
693  struct device_attribute *attr,
694  char *buf)
695 {
696  struct rfkill *rfkill = to_rfkill(dev);
697 
698  return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
699 }
700 
701 static ssize_t rfkill_state_store(struct device *dev,
702  struct device_attribute *attr,
703  const char *buf, size_t count)
704 {
705  struct rfkill *rfkill = to_rfkill(dev);
706  unsigned long state;
707  int err;
708 
709  if (!capable(CAP_NET_ADMIN))
710  return -EPERM;
711 
712  err = kstrtoul(buf, 0, &state);
713  if (err)
714  return err;
715 
716  if (state != RFKILL_USER_STATE_SOFT_BLOCKED &&
718  return -EINVAL;
719 
720  mutex_lock(&rfkill_global_mutex);
721  rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
722  mutex_unlock(&rfkill_global_mutex);
723 
724  return err ?: count;
725 }
726 
727 static ssize_t rfkill_claim_show(struct device *dev,
728  struct device_attribute *attr,
729  char *buf)
730 {
731  return sprintf(buf, "%d\n", 0);
732 }
733 
734 static ssize_t rfkill_claim_store(struct device *dev,
735  struct device_attribute *attr,
736  const char *buf, size_t count)
737 {
738  return -EOPNOTSUPP;
739 }
740 
741 static struct device_attribute rfkill_dev_attrs[] = {
742  __ATTR(name, S_IRUGO, rfkill_name_show, NULL),
743  __ATTR(type, S_IRUGO, rfkill_type_show, NULL),
744  __ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
745  __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
746  __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
747  __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
748  __ATTR(soft, S_IRUGO|S_IWUSR, rfkill_soft_show, rfkill_soft_store),
749  __ATTR(hard, S_IRUGO, rfkill_hard_show, NULL),
751 };
752 
753 static void rfkill_release(struct device *dev)
754 {
755  struct rfkill *rfkill = to_rfkill(dev);
756 
757  kfree(rfkill);
758 }
759 
760 static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
761 {
762  struct rfkill *rfkill = to_rfkill(dev);
763  unsigned long flags;
764  u32 state;
765  int error;
766 
767  error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
768  if (error)
769  return error;
770  error = add_uevent_var(env, "RFKILL_TYPE=%s",
771  rfkill_get_type_str(rfkill->type));
772  if (error)
773  return error;
774  spin_lock_irqsave(&rfkill->lock, flags);
775  state = rfkill->state;
776  spin_unlock_irqrestore(&rfkill->lock, flags);
777  error = add_uevent_var(env, "RFKILL_STATE=%d",
778  user_state_from_blocked(state));
779  return error;
780 }
781 
782 void rfkill_pause_polling(struct rfkill *rfkill)
783 {
784  BUG_ON(!rfkill);
785 
786  if (!rfkill->ops->poll)
787  return;
788 
790 }
792 
793 void rfkill_resume_polling(struct rfkill *rfkill)
794 {
795  BUG_ON(!rfkill);
796 
797  if (!rfkill->ops->poll)
798  return;
799 
800  schedule_work(&rfkill->poll_work.work);
801 }
803 
804 static int rfkill_suspend(struct device *dev, pm_message_t state)
805 {
806  struct rfkill *rfkill = to_rfkill(dev);
807 
808  rfkill_pause_polling(rfkill);
809 
810  return 0;
811 }
812 
813 static int rfkill_resume(struct device *dev)
814 {
815  struct rfkill *rfkill = to_rfkill(dev);
816  bool cur;
817 
818  if (!rfkill->persistent) {
819  cur = !!(rfkill->state & RFKILL_BLOCK_SW);
820  rfkill_set_block(rfkill, cur);
821  }
822 
823  rfkill_resume_polling(rfkill);
824 
825  return 0;
826 }
827 
828 static struct class rfkill_class = {
829  .name = "rfkill",
830  .dev_release = rfkill_release,
831  .dev_attrs = rfkill_dev_attrs,
832  .dev_uevent = rfkill_dev_uevent,
833  .suspend = rfkill_suspend,
834  .resume = rfkill_resume,
835 };
836 
837 bool rfkill_blocked(struct rfkill *rfkill)
838 {
839  unsigned long flags;
840  u32 state;
841 
842  spin_lock_irqsave(&rfkill->lock, flags);
843  state = rfkill->state;
844  spin_unlock_irqrestore(&rfkill->lock, flags);
845 
846  return !!(state & RFKILL_BLOCK_ANY);
847 }
849 
850 
851 struct rfkill * __must_check rfkill_alloc(const char *name,
852  struct device *parent,
853  const enum rfkill_type type,
854  const struct rfkill_ops *ops,
855  void *ops_data)
856 {
857  struct rfkill *rfkill;
858  struct device *dev;
859 
860  if (WARN_ON(!ops))
861  return NULL;
862 
863  if (WARN_ON(!ops->set_block))
864  return NULL;
865 
866  if (WARN_ON(!name))
867  return NULL;
868 
869  if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
870  return NULL;
871 
872  rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
873  if (!rfkill)
874  return NULL;
875 
876  spin_lock_init(&rfkill->lock);
877  INIT_LIST_HEAD(&rfkill->node);
878  rfkill->type = type;
879  rfkill->name = name;
880  rfkill->ops = ops;
881  rfkill->data = ops_data;
882 
883  dev = &rfkill->dev;
884  dev->class = &rfkill_class;
885  dev->parent = parent;
886  device_initialize(dev);
887 
888  return rfkill;
889 }
891 
892 static void rfkill_poll(struct work_struct *work)
893 {
894  struct rfkill *rfkill;
895 
896  rfkill = container_of(work, struct rfkill, poll_work.work);
897 
898  /*
899  * Poll hardware state -- driver will use one of the
900  * rfkill_set{,_hw,_sw}_state functions and use its
901  * return value to update the current status.
902  */
903  rfkill->ops->poll(rfkill, rfkill->data);
904 
907 }
908 
909 static void rfkill_uevent_work(struct work_struct *work)
910 {
911  struct rfkill *rfkill;
912 
913  rfkill = container_of(work, struct rfkill, uevent_work);
914 
915  mutex_lock(&rfkill_global_mutex);
916  rfkill_event(rfkill);
917  mutex_unlock(&rfkill_global_mutex);
918 }
919 
920 static void rfkill_sync_work(struct work_struct *work)
921 {
922  struct rfkill *rfkill;
923  bool cur;
924 
925  rfkill = container_of(work, struct rfkill, sync_work);
926 
927  mutex_lock(&rfkill_global_mutex);
928  cur = rfkill_global_states[rfkill->type].cur;
929  rfkill_set_block(rfkill, cur);
930  mutex_unlock(&rfkill_global_mutex);
931 }
932 
933 int __must_check rfkill_register(struct rfkill *rfkill)
934 {
935  static unsigned long rfkill_no;
936  struct device *dev = &rfkill->dev;
937  int error;
938 
939  BUG_ON(!rfkill);
940 
941  mutex_lock(&rfkill_global_mutex);
942 
943  if (rfkill->registered) {
944  error = -EALREADY;
945  goto unlock;
946  }
947 
948  rfkill->idx = rfkill_no;
949  dev_set_name(dev, "rfkill%lu", rfkill_no);
950  rfkill_no++;
951 
952  list_add_tail(&rfkill->node, &rfkill_list);
953 
954  error = device_add(dev);
955  if (error)
956  goto remove;
957 
958  error = rfkill_led_trigger_register(rfkill);
959  if (error)
960  goto devdel;
961 
962  rfkill->registered = true;
963 
964  INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
965  INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
966  INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
967 
968  if (rfkill->ops->poll)
971 
972  if (!rfkill->persistent || rfkill_epo_lock_active) {
973  schedule_work(&rfkill->sync_work);
974  } else {
975 #ifdef CONFIG_RFKILL_INPUT
976  bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
977 
978  if (!atomic_read(&rfkill_input_disabled))
979  __rfkill_switch_all(rfkill->type, soft_blocked);
980 #endif
981  }
982 
983  rfkill_send_events(rfkill, RFKILL_OP_ADD);
984 
985  mutex_unlock(&rfkill_global_mutex);
986  return 0;
987 
988  devdel:
989  device_del(&rfkill->dev);
990  remove:
991  list_del_init(&rfkill->node);
992  unlock:
993  mutex_unlock(&rfkill_global_mutex);
994  return error;
995 }
997 
998 void rfkill_unregister(struct rfkill *rfkill)
999 {
1000  BUG_ON(!rfkill);
1001 
1002  if (rfkill->ops->poll)
1004 
1005  cancel_work_sync(&rfkill->uevent_work);
1006  cancel_work_sync(&rfkill->sync_work);
1007 
1008  rfkill->registered = false;
1009 
1010  device_del(&rfkill->dev);
1011 
1012  mutex_lock(&rfkill_global_mutex);
1013  rfkill_send_events(rfkill, RFKILL_OP_DEL);
1014  list_del_init(&rfkill->node);
1015  mutex_unlock(&rfkill_global_mutex);
1016 
1017  rfkill_led_trigger_unregister(rfkill);
1018 }
1020 
1021 void rfkill_destroy(struct rfkill *rfkill)
1022 {
1023  if (rfkill)
1024  put_device(&rfkill->dev);
1025 }
1027 
1028 static int rfkill_fop_open(struct inode *inode, struct file *file)
1029 {
1030  struct rfkill_data *data;
1031  struct rfkill *rfkill;
1032  struct rfkill_int_event *ev, *tmp;
1033 
1034  data = kzalloc(sizeof(*data), GFP_KERNEL);
1035  if (!data)
1036  return -ENOMEM;
1037 
1038  INIT_LIST_HEAD(&data->events);
1039  mutex_init(&data->mtx);
1041 
1042  mutex_lock(&rfkill_global_mutex);
1043  mutex_lock(&data->mtx);
1044  /*
1045  * start getting events from elsewhere but hold mtx to get
1046  * startup events added first
1047  */
1048 
1049  list_for_each_entry(rfkill, &rfkill_list, node) {
1050  ev = kzalloc(sizeof(*ev), GFP_KERNEL);
1051  if (!ev)
1052  goto free;
1053  rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
1054  list_add_tail(&ev->list, &data->events);
1055  }
1056  list_add(&data->list, &rfkill_fds);
1057  mutex_unlock(&data->mtx);
1058  mutex_unlock(&rfkill_global_mutex);
1059 
1060  file->private_data = data;
1061 
1062  return nonseekable_open(inode, file);
1063 
1064  free:
1065  mutex_unlock(&data->mtx);
1066  mutex_unlock(&rfkill_global_mutex);
1067  mutex_destroy(&data->mtx);
1068  list_for_each_entry_safe(ev, tmp, &data->events, list)
1069  kfree(ev);
1070  kfree(data);
1071  return -ENOMEM;
1072 }
1073 
1074 static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
1075 {
1076  struct rfkill_data *data = file->private_data;
1077  unsigned int res = POLLOUT | POLLWRNORM;
1078 
1079  poll_wait(file, &data->read_wait, wait);
1080 
1081  mutex_lock(&data->mtx);
1082  if (!list_empty(&data->events))
1083  res = POLLIN | POLLRDNORM;
1084  mutex_unlock(&data->mtx);
1085 
1086  return res;
1087 }
1088 
1089 static bool rfkill_readable(struct rfkill_data *data)
1090 {
1091  bool r;
1092 
1093  mutex_lock(&data->mtx);
1094  r = !list_empty(&data->events);
1095  mutex_unlock(&data->mtx);
1096 
1097  return r;
1098 }
1099 
1100 static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
1101  size_t count, loff_t *pos)
1102 {
1103  struct rfkill_data *data = file->private_data;
1104  struct rfkill_int_event *ev;
1105  unsigned long sz;
1106  int ret;
1107 
1108  mutex_lock(&data->mtx);
1109 
1110  while (list_empty(&data->events)) {
1111  if (file->f_flags & O_NONBLOCK) {
1112  ret = -EAGAIN;
1113  goto out;
1114  }
1115  mutex_unlock(&data->mtx);
1116  ret = wait_event_interruptible(data->read_wait,
1117  rfkill_readable(data));
1118  mutex_lock(&data->mtx);
1119 
1120  if (ret)
1121  goto out;
1122  }
1123 
1124  ev = list_first_entry(&data->events, struct rfkill_int_event,
1125  list);
1126 
1127  sz = min_t(unsigned long, sizeof(ev->ev), count);
1128  ret = sz;
1129  if (copy_to_user(buf, &ev->ev, sz))
1130  ret = -EFAULT;
1131 
1132  list_del(&ev->list);
1133  kfree(ev);
1134  out:
1135  mutex_unlock(&data->mtx);
1136  return ret;
1137 }
1138 
1139 static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
1140  size_t count, loff_t *pos)
1141 {
1142  struct rfkill *rfkill;
1143  struct rfkill_event ev;
1144 
1145  /* we don't need the 'hard' variable but accept it */
1146  if (count < RFKILL_EVENT_SIZE_V1 - 1)
1147  return -EINVAL;
1148 
1149  /*
1150  * Copy as much data as we can accept into our 'ev' buffer,
1151  * but tell userspace how much we've copied so it can determine
1152  * our API version even in a write() call, if it cares.
1153  */
1154  count = min(count, sizeof(ev));
1155  if (copy_from_user(&ev, buf, count))
1156  return -EFAULT;
1157 
1158  if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL)
1159  return -EINVAL;
1160 
1161  if (ev.type >= NUM_RFKILL_TYPES)
1162  return -EINVAL;
1163 
1164  mutex_lock(&rfkill_global_mutex);
1165 
1166  if (ev.op == RFKILL_OP_CHANGE_ALL) {
1167  if (ev.type == RFKILL_TYPE_ALL) {
1168  enum rfkill_type i;
1169  for (i = 0; i < NUM_RFKILL_TYPES; i++)
1170  rfkill_global_states[i].cur = ev.soft;
1171  } else {
1172  rfkill_global_states[ev.type].cur = ev.soft;
1173  }
1174  }
1175 
1176  list_for_each_entry(rfkill, &rfkill_list, node) {
1177  if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL)
1178  continue;
1179 
1180  if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL)
1181  continue;
1182 
1183  rfkill_set_block(rfkill, ev.soft);
1184  }
1185  mutex_unlock(&rfkill_global_mutex);
1186 
1187  return count;
1188 }
1189 
1190 static int rfkill_fop_release(struct inode *inode, struct file *file)
1191 {
1192  struct rfkill_data *data = file->private_data;
1193  struct rfkill_int_event *ev, *tmp;
1194 
1195  mutex_lock(&rfkill_global_mutex);
1196  list_del(&data->list);
1197  mutex_unlock(&rfkill_global_mutex);
1198 
1199  mutex_destroy(&data->mtx);
1200  list_for_each_entry_safe(ev, tmp, &data->events, list)
1201  kfree(ev);
1202 
1203 #ifdef CONFIG_RFKILL_INPUT
1204  if (data->input_handler)
1205  if (atomic_dec_return(&rfkill_input_disabled) == 0)
1206  printk(KERN_DEBUG "rfkill: input handler enabled\n");
1207 #endif
1208 
1209  kfree(data);
1210 
1211  return 0;
1212 }
1213 
1214 #ifdef CONFIG_RFKILL_INPUT
1215 static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1216  unsigned long arg)
1217 {
1218  struct rfkill_data *data = file->private_data;
1219 
1220  if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
1221  return -ENOSYS;
1222 
1223  if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
1224  return -ENOSYS;
1225 
1226  mutex_lock(&data->mtx);
1227 
1228  if (!data->input_handler) {
1229  if (atomic_inc_return(&rfkill_input_disabled) == 1)
1230  printk(KERN_DEBUG "rfkill: input handler disabled\n");
1231  data->input_handler = true;
1232  }
1233 
1234  mutex_unlock(&data->mtx);
1235 
1236  return 0;
1237 }
1238 #endif
1239 
1240 static const struct file_operations rfkill_fops = {
1241  .owner = THIS_MODULE,
1242  .open = rfkill_fop_open,
1243  .read = rfkill_fop_read,
1244  .write = rfkill_fop_write,
1245  .poll = rfkill_fop_poll,
1246  .release = rfkill_fop_release,
1247 #ifdef CONFIG_RFKILL_INPUT
1248  .unlocked_ioctl = rfkill_fop_ioctl,
1249  .compat_ioctl = rfkill_fop_ioctl,
1250 #endif
1251  .llseek = no_llseek,
1252 };
1253 
1254 static struct miscdevice rfkill_miscdev = {
1255  .name = "rfkill",
1256  .fops = &rfkill_fops,
1257  .minor = MISC_DYNAMIC_MINOR,
1258 };
1259 
1260 static int __init rfkill_init(void)
1261 {
1262  int error;
1263  int i;
1264 
1265  for (i = 0; i < NUM_RFKILL_TYPES; i++)
1266  rfkill_global_states[i].cur = !rfkill_default_state;
1267 
1268  error = class_register(&rfkill_class);
1269  if (error)
1270  goto out;
1271 
1272  error = misc_register(&rfkill_miscdev);
1273  if (error) {
1274  class_unregister(&rfkill_class);
1275  goto out;
1276  }
1277 
1278 #ifdef CONFIG_RFKILL_INPUT
1279  error = rfkill_handler_init();
1280  if (error) {
1281  misc_deregister(&rfkill_miscdev);
1282  class_unregister(&rfkill_class);
1283  goto out;
1284  }
1285 #endif
1286 
1287  out:
1288  return error;
1289 }
1290 subsys_initcall(rfkill_init);
1291 
1292 static void __exit rfkill_exit(void)
1293 {
1294 #ifdef CONFIG_RFKILL_INPUT
1296 #endif
1297  misc_deregister(&rfkill_miscdev);
1298  class_unregister(&rfkill_class);
1299 }
1300 module_exit(rfkill_exit);