2 * Copyright (C) 2006 - 2007 Ivo van Doorn
3 * Copyright (C) 2007 Dmitry Torokhov
4 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the
18 * Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/workqueue.h>
26 #include <linux/capability.h>
27 #include <linux/list.h>
28 #include <linux/mutex.h>
29 #include <linux/rfkill.h>
30 #include <linux/spinlock.h>
31 #include <linux/miscdevice.h>
32 #include <linux/wait.h>
33 #include <linux/poll.h>
38 #define POLL_INTERVAL (5 * HZ)
40 #define RFKILL_BLOCK_HW BIT(0)
41 #define RFKILL_BLOCK_SW BIT(1)
42 #define RFKILL_BLOCK_SW_PREV BIT(2)
43 #define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\
46 #define RFKILL_BLOCK_SW_SETCALL BIT(31)
52 enum rfkill_type type
;
62 const struct rfkill_ops
*ops
;
65 #ifdef CONFIG_RFKILL_LEDS
66 struct led_trigger led_trigger
;
67 const char *ledtrigname
;
71 struct list_head node
;
73 struct delayed_work poll_work
;
74 struct work_struct uevent_work
;
75 struct work_struct sync_work
;
77 #define to_rfkill(d) container_of(d, struct rfkill, dev)
79 struct rfkill_int_event
{
80 struct list_head list
;
81 struct rfkill_event ev
;
85 struct list_head list
;
86 struct list_head events
;
88 wait_queue_head_t read_wait
;
93 MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
94 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
95 MODULE_DESCRIPTION("RF switch support");
96 MODULE_LICENSE("GPL");
100 * The locking here should be made much smarter, we currently have
101 * a bit of a stupid situation because drivers might want to register
102 * the rfkill struct under their own lock, and take this lock during
103 * rfkill method calls -- which will cause an AB-BA deadlock situation.
105 * To fix that, we need to rework this code here to be mostly lock-free
106 * and only use the mutex for list manipulations, not to protect the
107 * various other global variables. Then we can avoid holding the mutex
108 * around driver operations, and all is happy.
110 static LIST_HEAD(rfkill_list
); /* list of registered rf switches */
111 static DEFINE_MUTEX(rfkill_global_mutex
);
112 static LIST_HEAD(rfkill_fds
); /* list of open fds of /dev/rfkill */
114 static unsigned int rfkill_default_state
= 1;
115 module_param_named(default_state
, rfkill_default_state
, uint
, 0444);
116 MODULE_PARM_DESC(default_state
,
117 "Default initial state for all radio types, 0 = radio off");
121 } rfkill_global_states
[NUM_RFKILL_TYPES
];
123 static bool rfkill_epo_lock_active
;
126 #ifdef CONFIG_RFKILL_LEDS
127 static void rfkill_led_trigger_event(struct rfkill
*rfkill
)
129 struct led_trigger
*trigger
;
131 if (!rfkill
->registered
)
134 trigger
= &rfkill
->led_trigger
;
136 if (rfkill
->state
& RFKILL_BLOCK_ANY
)
137 led_trigger_event(trigger
, LED_OFF
);
139 led_trigger_event(trigger
, LED_FULL
);
142 static void rfkill_led_trigger_activate(struct led_classdev
*led
)
144 struct rfkill
*rfkill
;
146 rfkill
= container_of(led
->trigger
, struct rfkill
, led_trigger
);
148 rfkill_led_trigger_event(rfkill
);
151 const char *rfkill_get_led_trigger_name(struct rfkill
*rfkill
)
153 return rfkill
->led_trigger
.name
;
155 EXPORT_SYMBOL(rfkill_get_led_trigger_name
);
157 void rfkill_set_led_trigger_name(struct rfkill
*rfkill
, const char *name
)
161 rfkill
->ledtrigname
= name
;
163 EXPORT_SYMBOL(rfkill_set_led_trigger_name
);
165 static int rfkill_led_trigger_register(struct rfkill
*rfkill
)
167 rfkill
->led_trigger
.name
= rfkill
->ledtrigname
168 ? : dev_name(&rfkill
->dev
);
169 rfkill
->led_trigger
.activate
= rfkill_led_trigger_activate
;
170 return led_trigger_register(&rfkill
->led_trigger
);
173 static void rfkill_led_trigger_unregister(struct rfkill
*rfkill
)
175 led_trigger_unregister(&rfkill
->led_trigger
);
178 static void rfkill_led_trigger_event(struct rfkill
*rfkill
)
182 static inline int rfkill_led_trigger_register(struct rfkill
*rfkill
)
187 static inline void rfkill_led_trigger_unregister(struct rfkill
*rfkill
)
190 #endif /* CONFIG_RFKILL_LEDS */
192 static void rfkill_fill_event(struct rfkill_event
*ev
, struct rfkill
*rfkill
,
193 enum rfkill_operation op
)
197 ev
->idx
= rfkill
->idx
;
198 ev
->type
= rfkill
->type
;
201 spin_lock_irqsave(&rfkill
->lock
, flags
);
202 ev
->hard
= !!(rfkill
->state
& RFKILL_BLOCK_HW
);
203 ev
->soft
= !!(rfkill
->state
& (RFKILL_BLOCK_SW
|
204 RFKILL_BLOCK_SW_PREV
));
205 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
208 static void rfkill_send_events(struct rfkill
*rfkill
, enum rfkill_operation op
)
210 struct rfkill_data
*data
;
211 struct rfkill_int_event
*ev
;
213 list_for_each_entry(data
, &rfkill_fds
, list
) {
214 ev
= kzalloc(sizeof(*ev
), GFP_KERNEL
);
217 rfkill_fill_event(&ev
->ev
, rfkill
, op
);
218 mutex_lock(&data
->mtx
);
219 list_add_tail(&ev
->list
, &data
->events
);
220 mutex_unlock(&data
->mtx
);
221 wake_up_interruptible(&data
->read_wait
);
225 static void rfkill_event(struct rfkill
*rfkill
)
227 if (!rfkill
->registered
|| rfkill
->suspended
)
230 kobject_uevent(&rfkill
->dev
.kobj
, KOBJ_CHANGE
);
232 /* also send event to /dev/rfkill */
233 rfkill_send_events(rfkill
, RFKILL_OP_CHANGE
);
236 static bool __rfkill_set_hw_state(struct rfkill
*rfkill
,
237 bool blocked
, bool *change
)
244 spin_lock_irqsave(&rfkill
->lock
, flags
);
245 prev
= !!(rfkill
->state
& RFKILL_BLOCK_HW
);
247 rfkill
->state
|= RFKILL_BLOCK_HW
;
249 rfkill
->state
&= ~RFKILL_BLOCK_HW
;
250 *change
= prev
!= blocked
;
251 any
= rfkill
->state
& RFKILL_BLOCK_ANY
;
252 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
254 rfkill_led_trigger_event(rfkill
);
260 * rfkill_set_block - wrapper for set_block method
262 * @rfkill: the rfkill struct to use
263 * @blocked: the new software state
265 * Calls the set_block method (when applicable) and handles notifications
268 static void rfkill_set_block(struct rfkill
*rfkill
, bool blocked
)
274 * Some platforms (...!) generate input events which affect the
275 * _hard_ kill state -- whenever something tries to change the
276 * current software state query the hardware state too.
278 if (rfkill
->ops
->query
)
279 rfkill
->ops
->query(rfkill
, rfkill
->data
);
281 spin_lock_irqsave(&rfkill
->lock
, flags
);
282 if (rfkill
->state
& RFKILL_BLOCK_SW
)
283 rfkill
->state
|= RFKILL_BLOCK_SW_PREV
;
285 rfkill
->state
&= ~RFKILL_BLOCK_SW_PREV
;
288 rfkill
->state
|= RFKILL_BLOCK_SW
;
290 rfkill
->state
&= ~RFKILL_BLOCK_SW
;
292 rfkill
->state
|= RFKILL_BLOCK_SW_SETCALL
;
293 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
295 if (unlikely(rfkill
->dev
.power
.power_state
.event
& PM_EVENT_SLEEP
))
298 err
= rfkill
->ops
->set_block(rfkill
->data
, blocked
);
300 spin_lock_irqsave(&rfkill
->lock
, flags
);
303 * Failed -- reset status to _prev, this may be different
304 * from what set set _PREV to earlier in this function
305 * if rfkill_set_sw_state was invoked.
307 if (rfkill
->state
& RFKILL_BLOCK_SW_PREV
)
308 rfkill
->state
|= RFKILL_BLOCK_SW
;
310 rfkill
->state
&= ~RFKILL_BLOCK_SW
;
312 rfkill
->state
&= ~RFKILL_BLOCK_SW_SETCALL
;
313 rfkill
->state
&= ~RFKILL_BLOCK_SW_PREV
;
314 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
316 rfkill_led_trigger_event(rfkill
);
317 rfkill_event(rfkill
);
320 #ifdef CONFIG_RFKILL_INPUT
321 static atomic_t rfkill_input_disabled
= ATOMIC_INIT(0);
324 * __rfkill_switch_all - Toggle state of all switches of given type
325 * @type: type of interfaces to be affected
326 * @state: the new state
328 * This function sets the state of all switches of given type,
329 * unless a specific switch is claimed by userspace (in which case,
330 * that switch is left alone) or suspended.
332 * Caller must have acquired rfkill_global_mutex.
334 static void __rfkill_switch_all(const enum rfkill_type type
, bool blocked
)
336 struct rfkill
*rfkill
;
338 rfkill_global_states
[type
].cur
= blocked
;
339 list_for_each_entry(rfkill
, &rfkill_list
, node
) {
340 if (rfkill
->type
!= type
)
343 rfkill_set_block(rfkill
, blocked
);
348 * rfkill_switch_all - Toggle state of all switches of given type
349 * @type: type of interfaces to be affected
350 * @state: the new state
352 * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
353 * Please refer to __rfkill_switch_all() for details.
355 * Does nothing if the EPO lock is active.
357 void rfkill_switch_all(enum rfkill_type type
, bool blocked
)
359 if (atomic_read(&rfkill_input_disabled
))
362 mutex_lock(&rfkill_global_mutex
);
364 if (!rfkill_epo_lock_active
)
365 __rfkill_switch_all(type
, blocked
);
367 mutex_unlock(&rfkill_global_mutex
);
371 * rfkill_epo - emergency power off all transmitters
373 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
374 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
376 * The global state before the EPO is saved and can be restored later
377 * using rfkill_restore_states().
379 void rfkill_epo(void)
381 struct rfkill
*rfkill
;
384 if (atomic_read(&rfkill_input_disabled
))
387 mutex_lock(&rfkill_global_mutex
);
389 rfkill_epo_lock_active
= true;
390 list_for_each_entry(rfkill
, &rfkill_list
, node
)
391 rfkill_set_block(rfkill
, true);
393 for (i
= 0; i
< NUM_RFKILL_TYPES
; i
++) {
394 rfkill_global_states
[i
].sav
= rfkill_global_states
[i
].cur
;
395 rfkill_global_states
[i
].cur
= true;
398 mutex_unlock(&rfkill_global_mutex
);
402 * rfkill_restore_states - restore global states
404 * Restore (and sync switches to) the global state from the
405 * states in rfkill_default_states. This can undo the effects of
406 * a call to rfkill_epo().
408 void rfkill_restore_states(void)
412 if (atomic_read(&rfkill_input_disabled
))
415 mutex_lock(&rfkill_global_mutex
);
417 rfkill_epo_lock_active
= false;
418 for (i
= 0; i
< NUM_RFKILL_TYPES
; i
++)
419 __rfkill_switch_all(i
, rfkill_global_states
[i
].sav
);
420 mutex_unlock(&rfkill_global_mutex
);
424 * rfkill_remove_epo_lock - unlock state changes
426 * Used by rfkill-input manually unlock state changes, when
427 * the EPO switch is deactivated.
429 void rfkill_remove_epo_lock(void)
431 if (atomic_read(&rfkill_input_disabled
))
434 mutex_lock(&rfkill_global_mutex
);
435 rfkill_epo_lock_active
= false;
436 mutex_unlock(&rfkill_global_mutex
);
440 * rfkill_is_epo_lock_active - returns true EPO is active
442 * Returns 0 (false) if there is NOT an active EPO contidion,
443 * and 1 (true) if there is an active EPO contition, which
444 * locks all radios in one of the BLOCKED states.
446 * Can be called in atomic context.
448 bool rfkill_is_epo_lock_active(void)
450 return rfkill_epo_lock_active
;
454 * rfkill_get_global_sw_state - returns global state for a type
455 * @type: the type to get the global state of
457 * Returns the current global state for a given wireless
460 bool rfkill_get_global_sw_state(const enum rfkill_type type
)
462 return rfkill_global_states
[type
].cur
;
467 bool rfkill_set_hw_state(struct rfkill
*rfkill
, bool blocked
)
471 ret
= __rfkill_set_hw_state(rfkill
, blocked
, &change
);
473 if (!rfkill
->registered
)
477 schedule_work(&rfkill
->uevent_work
);
481 EXPORT_SYMBOL(rfkill_set_hw_state
);
483 static void __rfkill_set_sw_state(struct rfkill
*rfkill
, bool blocked
)
485 u32 bit
= RFKILL_BLOCK_SW
;
487 /* if in a ops->set_block right now, use other bit */
488 if (rfkill
->state
& RFKILL_BLOCK_SW_SETCALL
)
489 bit
= RFKILL_BLOCK_SW_PREV
;
492 rfkill
->state
|= bit
;
494 rfkill
->state
&= ~bit
;
497 bool rfkill_set_sw_state(struct rfkill
*rfkill
, bool blocked
)
504 spin_lock_irqsave(&rfkill
->lock
, flags
);
505 prev
= !!(rfkill
->state
& RFKILL_BLOCK_SW
);
506 __rfkill_set_sw_state(rfkill
, blocked
);
507 hwblock
= !!(rfkill
->state
& RFKILL_BLOCK_HW
);
508 blocked
= blocked
|| hwblock
;
509 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
511 if (!rfkill
->registered
) {
512 rfkill
->persistent
= true;
514 if (prev
!= blocked
&& !hwblock
)
515 schedule_work(&rfkill
->uevent_work
);
517 rfkill_led_trigger_event(rfkill
);
522 EXPORT_SYMBOL(rfkill_set_sw_state
);
524 void rfkill_set_states(struct rfkill
*rfkill
, bool sw
, bool hw
)
531 spin_lock_irqsave(&rfkill
->lock
, flags
);
534 * No need to care about prev/setblock ... this is for uevent only
535 * and that will get triggered by rfkill_set_block anyway.
537 swprev
= !!(rfkill
->state
& RFKILL_BLOCK_SW
);
538 hwprev
= !!(rfkill
->state
& RFKILL_BLOCK_HW
);
539 __rfkill_set_sw_state(rfkill
, sw
);
541 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
543 if (!rfkill
->registered
) {
544 rfkill
->persistent
= true;
546 if (swprev
!= sw
|| hwprev
!= hw
)
547 schedule_work(&rfkill
->uevent_work
);
549 rfkill_led_trigger_event(rfkill
);
552 EXPORT_SYMBOL(rfkill_set_states
);
554 static ssize_t
rfkill_name_show(struct device
*dev
,
555 struct device_attribute
*attr
,
558 struct rfkill
*rfkill
= to_rfkill(dev
);
560 return sprintf(buf
, "%s\n", rfkill
->name
);
563 static const char *rfkill_get_type_str(enum rfkill_type type
)
566 case RFKILL_TYPE_WLAN
:
568 case RFKILL_TYPE_BLUETOOTH
:
570 case RFKILL_TYPE_UWB
:
571 return "ultrawideband";
572 case RFKILL_TYPE_WIMAX
:
574 case RFKILL_TYPE_WWAN
:
580 BUILD_BUG_ON(NUM_RFKILL_TYPES
!= RFKILL_TYPE_WWAN
+ 1);
583 static ssize_t
rfkill_type_show(struct device
*dev
,
584 struct device_attribute
*attr
,
587 struct rfkill
*rfkill
= to_rfkill(dev
);
589 return sprintf(buf
, "%s\n", rfkill_get_type_str(rfkill
->type
));
592 static ssize_t
rfkill_idx_show(struct device
*dev
,
593 struct device_attribute
*attr
,
596 struct rfkill
*rfkill
= to_rfkill(dev
);
598 return sprintf(buf
, "%d\n", rfkill
->idx
);
601 static u8
user_state_from_blocked(unsigned long state
)
603 if (state
& RFKILL_BLOCK_HW
)
604 return RFKILL_USER_STATE_HARD_BLOCKED
;
605 if (state
& RFKILL_BLOCK_SW
)
606 return RFKILL_USER_STATE_SOFT_BLOCKED
;
608 return RFKILL_USER_STATE_UNBLOCKED
;
611 static ssize_t
rfkill_state_show(struct device
*dev
,
612 struct device_attribute
*attr
,
615 struct rfkill
*rfkill
= to_rfkill(dev
);
619 spin_lock_irqsave(&rfkill
->lock
, flags
);
620 state
= rfkill
->state
;
621 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
623 return sprintf(buf
, "%d\n", user_state_from_blocked(state
));
626 static ssize_t
rfkill_state_store(struct device
*dev
,
627 struct device_attribute
*attr
,
628 const char *buf
, size_t count
)
631 * The intention was that userspace can only take control over
632 * a given device when/if rfkill-input doesn't control it due
633 * to user_claim. Since user_claim is currently unsupported,
634 * we never support changing the state from userspace -- this
635 * can be implemented again later.
641 static ssize_t
rfkill_claim_show(struct device
*dev
,
642 struct device_attribute
*attr
,
645 return sprintf(buf
, "%d\n", 0);
648 static ssize_t
rfkill_claim_store(struct device
*dev
,
649 struct device_attribute
*attr
,
650 const char *buf
, size_t count
)
655 static struct device_attribute rfkill_dev_attrs
[] = {
656 __ATTR(name
, S_IRUGO
, rfkill_name_show
, NULL
),
657 __ATTR(type
, S_IRUGO
, rfkill_type_show
, NULL
),
658 __ATTR(index
, S_IRUGO
, rfkill_idx_show
, NULL
),
659 __ATTR(state
, S_IRUGO
|S_IWUSR
, rfkill_state_show
, rfkill_state_store
),
660 __ATTR(claim
, S_IRUGO
|S_IWUSR
, rfkill_claim_show
, rfkill_claim_store
),
664 static void rfkill_release(struct device
*dev
)
666 struct rfkill
*rfkill
= to_rfkill(dev
);
671 static int rfkill_dev_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
673 struct rfkill
*rfkill
= to_rfkill(dev
);
678 error
= add_uevent_var(env
, "RFKILL_NAME=%s", rfkill
->name
);
681 error
= add_uevent_var(env
, "RFKILL_TYPE=%s",
682 rfkill_get_type_str(rfkill
->type
));
685 spin_lock_irqsave(&rfkill
->lock
, flags
);
686 state
= rfkill
->state
;
687 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
688 error
= add_uevent_var(env
, "RFKILL_STATE=%d",
689 user_state_from_blocked(state
));
693 void rfkill_pause_polling(struct rfkill
*rfkill
)
697 if (!rfkill
->ops
->poll
)
700 cancel_delayed_work_sync(&rfkill
->poll_work
);
702 EXPORT_SYMBOL(rfkill_pause_polling
);
704 void rfkill_resume_polling(struct rfkill
*rfkill
)
708 if (!rfkill
->ops
->poll
)
711 schedule_work(&rfkill
->poll_work
.work
);
713 EXPORT_SYMBOL(rfkill_resume_polling
);
715 static int rfkill_suspend(struct device
*dev
, pm_message_t state
)
717 struct rfkill
*rfkill
= to_rfkill(dev
);
719 rfkill_pause_polling(rfkill
);
721 rfkill
->suspended
= true;
726 static int rfkill_resume(struct device
*dev
)
728 struct rfkill
*rfkill
= to_rfkill(dev
);
731 cur
= !!(rfkill
->state
& RFKILL_BLOCK_SW
);
732 rfkill_set_block(rfkill
, cur
);
734 rfkill
->suspended
= false;
736 rfkill_resume_polling(rfkill
);
741 static struct class rfkill_class
= {
743 .dev_release
= rfkill_release
,
744 .dev_attrs
= rfkill_dev_attrs
,
745 .dev_uevent
= rfkill_dev_uevent
,
746 .suspend
= rfkill_suspend
,
747 .resume
= rfkill_resume
,
750 bool rfkill_blocked(struct rfkill
*rfkill
)
755 spin_lock_irqsave(&rfkill
->lock
, flags
);
756 state
= rfkill
->state
;
757 spin_unlock_irqrestore(&rfkill
->lock
, flags
);
759 return !!(state
& RFKILL_BLOCK_ANY
);
761 EXPORT_SYMBOL(rfkill_blocked
);
764 struct rfkill
* __must_check
rfkill_alloc(const char *name
,
765 struct device
*parent
,
766 const enum rfkill_type type
,
767 const struct rfkill_ops
*ops
,
770 struct rfkill
*rfkill
;
776 if (WARN_ON(!ops
->set_block
))
782 if (WARN_ON(type
== RFKILL_TYPE_ALL
|| type
>= NUM_RFKILL_TYPES
))
785 rfkill
= kzalloc(sizeof(*rfkill
), GFP_KERNEL
);
789 spin_lock_init(&rfkill
->lock
);
790 INIT_LIST_HEAD(&rfkill
->node
);
794 rfkill
->data
= ops_data
;
797 dev
->class = &rfkill_class
;
798 dev
->parent
= parent
;
799 device_initialize(dev
);
803 EXPORT_SYMBOL(rfkill_alloc
);
805 static void rfkill_poll(struct work_struct
*work
)
807 struct rfkill
*rfkill
;
809 rfkill
= container_of(work
, struct rfkill
, poll_work
.work
);
812 * Poll hardware state -- driver will use one of the
813 * rfkill_set{,_hw,_sw}_state functions and use its
814 * return value to update the current status.
816 rfkill
->ops
->poll(rfkill
, rfkill
->data
);
818 schedule_delayed_work(&rfkill
->poll_work
,
819 round_jiffies_relative(POLL_INTERVAL
));
822 static void rfkill_uevent_work(struct work_struct
*work
)
824 struct rfkill
*rfkill
;
826 rfkill
= container_of(work
, struct rfkill
, uevent_work
);
828 mutex_lock(&rfkill_global_mutex
);
829 rfkill_event(rfkill
);
830 mutex_unlock(&rfkill_global_mutex
);
833 static void rfkill_sync_work(struct work_struct
*work
)
835 struct rfkill
*rfkill
;
838 rfkill
= container_of(work
, struct rfkill
, sync_work
);
840 mutex_lock(&rfkill_global_mutex
);
841 cur
= rfkill_global_states
[rfkill
->type
].cur
;
842 rfkill_set_block(rfkill
, cur
);
843 mutex_unlock(&rfkill_global_mutex
);
846 int __must_check
rfkill_register(struct rfkill
*rfkill
)
848 static unsigned long rfkill_no
;
849 struct device
*dev
= &rfkill
->dev
;
854 mutex_lock(&rfkill_global_mutex
);
856 if (rfkill
->registered
) {
861 rfkill
->idx
= rfkill_no
;
862 dev_set_name(dev
, "rfkill%lu", rfkill_no
);
865 list_add_tail(&rfkill
->node
, &rfkill_list
);
867 error
= device_add(dev
);
871 error
= rfkill_led_trigger_register(rfkill
);
875 rfkill
->registered
= true;
877 INIT_DELAYED_WORK(&rfkill
->poll_work
, rfkill_poll
);
878 INIT_WORK(&rfkill
->uevent_work
, rfkill_uevent_work
);
879 INIT_WORK(&rfkill
->sync_work
, rfkill_sync_work
);
881 if (rfkill
->ops
->poll
)
882 schedule_delayed_work(&rfkill
->poll_work
,
883 round_jiffies_relative(POLL_INTERVAL
));
885 if (!rfkill
->persistent
|| rfkill_epo_lock_active
) {
886 schedule_work(&rfkill
->sync_work
);
888 #ifdef CONFIG_RFKILL_INPUT
889 bool soft_blocked
= !!(rfkill
->state
& RFKILL_BLOCK_SW
);
891 if (!atomic_read(&rfkill_input_disabled
))
892 __rfkill_switch_all(rfkill
->type
, soft_blocked
);
896 rfkill_send_events(rfkill
, RFKILL_OP_ADD
);
898 mutex_unlock(&rfkill_global_mutex
);
902 device_del(&rfkill
->dev
);
904 list_del_init(&rfkill
->node
);
906 mutex_unlock(&rfkill_global_mutex
);
909 EXPORT_SYMBOL(rfkill_register
);
911 void rfkill_unregister(struct rfkill
*rfkill
)
915 if (rfkill
->ops
->poll
)
916 cancel_delayed_work_sync(&rfkill
->poll_work
);
918 cancel_work_sync(&rfkill
->uevent_work
);
919 cancel_work_sync(&rfkill
->sync_work
);
921 rfkill
->registered
= false;
923 device_del(&rfkill
->dev
);
925 mutex_lock(&rfkill_global_mutex
);
926 rfkill_send_events(rfkill
, RFKILL_OP_DEL
);
927 list_del_init(&rfkill
->node
);
928 mutex_unlock(&rfkill_global_mutex
);
930 rfkill_led_trigger_unregister(rfkill
);
932 EXPORT_SYMBOL(rfkill_unregister
);
934 void rfkill_destroy(struct rfkill
*rfkill
)
937 put_device(&rfkill
->dev
);
939 EXPORT_SYMBOL(rfkill_destroy
);
941 static int rfkill_fop_open(struct inode
*inode
, struct file
*file
)
943 struct rfkill_data
*data
;
944 struct rfkill
*rfkill
;
945 struct rfkill_int_event
*ev
, *tmp
;
947 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
951 INIT_LIST_HEAD(&data
->events
);
952 mutex_init(&data
->mtx
);
953 init_waitqueue_head(&data
->read_wait
);
955 mutex_lock(&rfkill_global_mutex
);
956 mutex_lock(&data
->mtx
);
958 * start getting events from elsewhere but hold mtx to get
959 * startup events added first
961 list_add(&data
->list
, &rfkill_fds
);
963 list_for_each_entry(rfkill
, &rfkill_list
, node
) {
964 ev
= kzalloc(sizeof(*ev
), GFP_KERNEL
);
967 rfkill_fill_event(&ev
->ev
, rfkill
, RFKILL_OP_ADD
);
968 list_add_tail(&ev
->list
, &data
->events
);
970 mutex_unlock(&data
->mtx
);
971 mutex_unlock(&rfkill_global_mutex
);
973 file
->private_data
= data
;
975 return nonseekable_open(inode
, file
);
978 mutex_unlock(&data
->mtx
);
979 mutex_unlock(&rfkill_global_mutex
);
980 mutex_destroy(&data
->mtx
);
981 list_for_each_entry_safe(ev
, tmp
, &data
->events
, list
)
987 static unsigned int rfkill_fop_poll(struct file
*file
, poll_table
*wait
)
989 struct rfkill_data
*data
= file
->private_data
;
990 unsigned int res
= POLLOUT
| POLLWRNORM
;
992 poll_wait(file
, &data
->read_wait
, wait
);
994 mutex_lock(&data
->mtx
);
995 if (!list_empty(&data
->events
))
996 res
= POLLIN
| POLLRDNORM
;
997 mutex_unlock(&data
->mtx
);
1002 static bool rfkill_readable(struct rfkill_data
*data
)
1006 mutex_lock(&data
->mtx
);
1007 r
= !list_empty(&data
->events
);
1008 mutex_unlock(&data
->mtx
);
1013 static ssize_t
rfkill_fop_read(struct file
*file
, char __user
*buf
,
1014 size_t count
, loff_t
*pos
)
1016 struct rfkill_data
*data
= file
->private_data
;
1017 struct rfkill_int_event
*ev
;
1021 mutex_lock(&data
->mtx
);
1023 while (list_empty(&data
->events
)) {
1024 if (file
->f_flags
& O_NONBLOCK
) {
1028 mutex_unlock(&data
->mtx
);
1029 ret
= wait_event_interruptible(data
->read_wait
,
1030 rfkill_readable(data
));
1031 mutex_lock(&data
->mtx
);
1037 ev
= list_first_entry(&data
->events
, struct rfkill_int_event
,
1040 sz
= min_t(unsigned long, sizeof(ev
->ev
), count
);
1042 if (copy_to_user(buf
, &ev
->ev
, sz
))
1045 list_del(&ev
->list
);
1048 mutex_unlock(&data
->mtx
);
1052 static ssize_t
rfkill_fop_write(struct file
*file
, const char __user
*buf
,
1053 size_t count
, loff_t
*pos
)
1055 struct rfkill
*rfkill
;
1056 struct rfkill_event ev
;
1058 /* we don't need the 'hard' variable but accept it */
1059 if (count
< sizeof(ev
) - 1)
1062 if (copy_from_user(&ev
, buf
, sizeof(ev
) - 1))
1065 if (ev
.op
!= RFKILL_OP_CHANGE
&& ev
.op
!= RFKILL_OP_CHANGE_ALL
)
1068 if (ev
.type
>= NUM_RFKILL_TYPES
)
1071 mutex_lock(&rfkill_global_mutex
);
1073 if (ev
.op
== RFKILL_OP_CHANGE_ALL
) {
1074 if (ev
.type
== RFKILL_TYPE_ALL
) {
1076 for (i
= 0; i
< NUM_RFKILL_TYPES
; i
++)
1077 rfkill_global_states
[i
].cur
= ev
.soft
;
1079 rfkill_global_states
[ev
.type
].cur
= ev
.soft
;
1083 list_for_each_entry(rfkill
, &rfkill_list
, node
) {
1084 if (rfkill
->idx
!= ev
.idx
&& ev
.op
!= RFKILL_OP_CHANGE_ALL
)
1087 if (rfkill
->type
!= ev
.type
&& ev
.type
!= RFKILL_TYPE_ALL
)
1090 rfkill_set_block(rfkill
, ev
.soft
);
1092 mutex_unlock(&rfkill_global_mutex
);
1097 static int rfkill_fop_release(struct inode
*inode
, struct file
*file
)
1099 struct rfkill_data
*data
= file
->private_data
;
1100 struct rfkill_int_event
*ev
, *tmp
;
1102 mutex_lock(&rfkill_global_mutex
);
1103 list_del(&data
->list
);
1104 mutex_unlock(&rfkill_global_mutex
);
1106 mutex_destroy(&data
->mtx
);
1107 list_for_each_entry_safe(ev
, tmp
, &data
->events
, list
)
1110 #ifdef CONFIG_RFKILL_INPUT
1111 if (data
->input_handler
)
1112 if (atomic_dec_return(&rfkill_input_disabled
) == 0)
1113 printk(KERN_DEBUG
"rfkill: input handler enabled\n");
1121 #ifdef CONFIG_RFKILL_INPUT
1122 static long rfkill_fop_ioctl(struct file
*file
, unsigned int cmd
,
1125 struct rfkill_data
*data
= file
->private_data
;
1127 if (_IOC_TYPE(cmd
) != RFKILL_IOC_MAGIC
)
1130 if (_IOC_NR(cmd
) != RFKILL_IOC_NOINPUT
)
1133 mutex_lock(&data
->mtx
);
1135 if (!data
->input_handler
) {
1136 if (atomic_inc_return(&rfkill_input_disabled
) == 1)
1137 printk(KERN_DEBUG
"rfkill: input handler disabled\n");
1138 data
->input_handler
= true;
1141 mutex_unlock(&data
->mtx
);
1147 static const struct file_operations rfkill_fops
= {
1148 .open
= rfkill_fop_open
,
1149 .read
= rfkill_fop_read
,
1150 .write
= rfkill_fop_write
,
1151 .poll
= rfkill_fop_poll
,
1152 .release
= rfkill_fop_release
,
1153 #ifdef CONFIG_RFKILL_INPUT
1154 .unlocked_ioctl
= rfkill_fop_ioctl
,
1155 .compat_ioctl
= rfkill_fop_ioctl
,
1159 static struct miscdevice rfkill_miscdev
= {
1161 .fops
= &rfkill_fops
,
1162 .minor
= MISC_DYNAMIC_MINOR
,
1165 static int __init
rfkill_init(void)
1170 for (i
= 0; i
< NUM_RFKILL_TYPES
; i
++)
1171 rfkill_global_states
[i
].cur
= !rfkill_default_state
;
1173 error
= class_register(&rfkill_class
);
1177 error
= misc_register(&rfkill_miscdev
);
1179 class_unregister(&rfkill_class
);
1183 #ifdef CONFIG_RFKILL_INPUT
1184 error
= rfkill_handler_init();
1186 misc_deregister(&rfkill_miscdev
);
1187 class_unregister(&rfkill_class
);
1195 subsys_initcall(rfkill_init
);
1197 static void __exit
rfkill_exit(void)
1199 #ifdef CONFIG_RFKILL_INPUT
1200 rfkill_handler_exit();
1202 misc_deregister(&rfkill_miscdev
);
1203 class_unregister(&rfkill_class
);
1205 module_exit(rfkill_exit
);