added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / net / rfkill / rfkill-input.c
blob84efde97c5a773fe027d051cb3522e55534b8eb1
1 /*
2 * Input layer to RF Kill interface connector
4 * Copyright (c) 2007 Dmitry Torokhov
5 */
7 /*
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/input.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/init.h>
18 #include <linux/rfkill.h>
19 #include <linux/sched.h>
21 #include "rfkill-input.h"
23 MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>");
24 MODULE_DESCRIPTION("Input layer to RF switch connector");
25 MODULE_LICENSE("GPL");
27 enum rfkill_input_master_mode {
28 RFKILL_INPUT_MASTER_DONOTHING = 0,
29 RFKILL_INPUT_MASTER_RESTORE = 1,
30 RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
31 RFKILL_INPUT_MASTER_MAX, /* marker */
34 /* Delay (in ms) between consecutive switch ops */
35 #define RFKILL_OPS_DELAY 200
37 static enum rfkill_input_master_mode rfkill_master_switch_mode =
38 RFKILL_INPUT_MASTER_UNBLOCKALL;
39 module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
40 MODULE_PARM_DESC(master_switch_mode,
41 "SW_RFKILL_ALL ON should: 0=do nothing; 1=restore; 2=unblock all");
43 enum rfkill_global_sched_op {
44 RFKILL_GLOBAL_OP_EPO = 0,
45 RFKILL_GLOBAL_OP_RESTORE,
46 RFKILL_GLOBAL_OP_UNLOCK,
47 RFKILL_GLOBAL_OP_UNBLOCK,
51 * Currently, the code marked with RFKILL_NEED_SWSET is inactive.
52 * If handling of EV_SW SW_WLAN/WWAN/BLUETOOTH/etc is needed in the
53 * future, when such events are added, that code will be necessary.
56 struct rfkill_task {
57 struct delayed_work dwork;
59 /* ensures that task is serialized */
60 struct mutex mutex;
62 /* protects everything below */
63 spinlock_t lock;
65 /* pending regular switch operations (1=pending) */
66 unsigned long sw_pending[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
68 #ifdef RFKILL_NEED_SWSET
69 /* set operation pending (1=pending) */
70 unsigned long sw_setpending[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
72 /* desired state for pending set operation (1=unblock) */
73 unsigned long sw_newstate[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
74 #endif
76 /* should the state be complemented (1=yes) */
77 unsigned long sw_togglestate[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
79 bool global_op_pending;
80 enum rfkill_global_sched_op op;
82 /* last time it was scheduled */
83 unsigned long last_scheduled;
86 static void __rfkill_handle_global_op(enum rfkill_global_sched_op op)
88 unsigned int i;
90 switch (op) {
91 case RFKILL_GLOBAL_OP_EPO:
92 rfkill_epo();
93 break;
94 case RFKILL_GLOBAL_OP_RESTORE:
95 rfkill_restore_states();
96 break;
97 case RFKILL_GLOBAL_OP_UNLOCK:
98 rfkill_remove_epo_lock();
99 break;
100 case RFKILL_GLOBAL_OP_UNBLOCK:
101 rfkill_remove_epo_lock();
102 for (i = 0; i < RFKILL_TYPE_MAX; i++)
103 rfkill_switch_all(i, RFKILL_STATE_UNBLOCKED);
104 break;
105 default:
106 /* memory corruption or bug, fail safely */
107 rfkill_epo();
108 WARN(1, "Unknown requested operation %d! "
109 "rfkill Emergency Power Off activated\n",
110 op);
114 #ifdef RFKILL_NEED_SWSET
115 static void __rfkill_handle_normal_op(const enum rfkill_type type,
116 const bool sp, const bool s, const bool c)
118 enum rfkill_state state;
120 if (sp)
121 state = (s) ? RFKILL_STATE_UNBLOCKED :
122 RFKILL_STATE_SOFT_BLOCKED;
123 else
124 state = rfkill_get_global_state(type);
126 if (c)
127 state = rfkill_state_complement(state);
129 rfkill_switch_all(type, state);
131 #else
132 static void __rfkill_handle_normal_op(const enum rfkill_type type,
133 const bool c)
135 enum rfkill_state state;
137 state = rfkill_get_global_state(type);
138 if (c)
139 state = rfkill_state_complement(state);
141 rfkill_switch_all(type, state);
143 #endif
145 static void rfkill_task_handler(struct work_struct *work)
147 struct rfkill_task *task = container_of(work,
148 struct rfkill_task, dwork.work);
149 bool doit = true;
151 mutex_lock(&task->mutex);
153 spin_lock_irq(&task->lock);
154 while (doit) {
155 if (task->global_op_pending) {
156 enum rfkill_global_sched_op op = task->op;
157 task->global_op_pending = false;
158 memset(task->sw_pending, 0, sizeof(task->sw_pending));
159 spin_unlock_irq(&task->lock);
161 __rfkill_handle_global_op(op);
163 /* make sure we do at least one pass with
164 * !task->global_op_pending */
165 spin_lock_irq(&task->lock);
166 continue;
167 } else if (!rfkill_is_epo_lock_active()) {
168 unsigned int i = 0;
170 while (!task->global_op_pending &&
171 i < RFKILL_TYPE_MAX) {
172 if (test_and_clear_bit(i, task->sw_pending)) {
173 bool c;
174 #ifdef RFKILL_NEED_SWSET
175 bool sp, s;
176 sp = test_and_clear_bit(i,
177 task->sw_setpending);
178 s = test_bit(i, task->sw_newstate);
179 #endif
180 c = test_and_clear_bit(i,
181 task->sw_togglestate);
182 spin_unlock_irq(&task->lock);
184 #ifdef RFKILL_NEED_SWSET
185 __rfkill_handle_normal_op(i, sp, s, c);
186 #else
187 __rfkill_handle_normal_op(i, c);
188 #endif
190 spin_lock_irq(&task->lock);
192 i++;
195 doit = task->global_op_pending;
197 spin_unlock_irq(&task->lock);
199 mutex_unlock(&task->mutex);
202 static struct rfkill_task rfkill_task = {
203 .dwork = __DELAYED_WORK_INITIALIZER(rfkill_task.dwork,
204 rfkill_task_handler),
205 .mutex = __MUTEX_INITIALIZER(rfkill_task.mutex),
206 .lock = __SPIN_LOCK_UNLOCKED(rfkill_task.lock),
209 static unsigned long rfkill_ratelimit(const unsigned long last)
211 const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
212 return (time_after(jiffies, last + delay)) ? 0 : delay;
215 static void rfkill_schedule_ratelimited(void)
217 if (!delayed_work_pending(&rfkill_task.dwork)) {
218 schedule_delayed_work(&rfkill_task.dwork,
219 rfkill_ratelimit(rfkill_task.last_scheduled));
220 rfkill_task.last_scheduled = jiffies;
224 static void rfkill_schedule_global_op(enum rfkill_global_sched_op op)
226 unsigned long flags;
228 spin_lock_irqsave(&rfkill_task.lock, flags);
229 rfkill_task.op = op;
230 rfkill_task.global_op_pending = true;
231 if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
232 /* bypass the limiter for EPO */
233 cancel_delayed_work(&rfkill_task.dwork);
234 schedule_delayed_work(&rfkill_task.dwork, 0);
235 rfkill_task.last_scheduled = jiffies;
236 } else
237 rfkill_schedule_ratelimited();
238 spin_unlock_irqrestore(&rfkill_task.lock, flags);
241 #ifdef RFKILL_NEED_SWSET
242 /* Use this if you need to add EV_SW SW_WLAN/WWAN/BLUETOOTH/etc handling */
244 static void rfkill_schedule_set(enum rfkill_type type,
245 enum rfkill_state desired_state)
247 unsigned long flags;
249 if (rfkill_is_epo_lock_active())
250 return;
252 spin_lock_irqsave(&rfkill_task.lock, flags);
253 if (!rfkill_task.global_op_pending) {
254 set_bit(type, rfkill_task.sw_pending);
255 set_bit(type, rfkill_task.sw_setpending);
256 clear_bit(type, rfkill_task.sw_togglestate);
257 if (desired_state)
258 set_bit(type, rfkill_task.sw_newstate);
259 else
260 clear_bit(type, rfkill_task.sw_newstate);
261 rfkill_schedule_ratelimited();
263 spin_unlock_irqrestore(&rfkill_task.lock, flags);
265 #endif
267 static void rfkill_schedule_toggle(enum rfkill_type type)
269 unsigned long flags;
271 if (rfkill_is_epo_lock_active())
272 return;
274 spin_lock_irqsave(&rfkill_task.lock, flags);
275 if (!rfkill_task.global_op_pending) {
276 set_bit(type, rfkill_task.sw_pending);
277 change_bit(type, rfkill_task.sw_togglestate);
278 rfkill_schedule_ratelimited();
280 spin_unlock_irqrestore(&rfkill_task.lock, flags);
283 static void rfkill_schedule_evsw_rfkillall(int state)
285 if (state) {
286 switch (rfkill_master_switch_mode) {
287 case RFKILL_INPUT_MASTER_UNBLOCKALL:
288 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNBLOCK);
289 break;
290 case RFKILL_INPUT_MASTER_RESTORE:
291 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_RESTORE);
292 break;
293 case RFKILL_INPUT_MASTER_DONOTHING:
294 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNLOCK);
295 break;
296 default:
297 /* memory corruption or driver bug! fail safely */
298 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
299 WARN(1, "Unknown rfkill_master_switch_mode (%d), "
300 "driver bug or memory corruption detected!\n",
301 rfkill_master_switch_mode);
302 break;
304 } else
305 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
308 static void rfkill_event(struct input_handle *handle, unsigned int type,
309 unsigned int code, int data)
311 if (type == EV_KEY && data == 1) {
312 enum rfkill_type t;
314 switch (code) {
315 case KEY_WLAN:
316 t = RFKILL_TYPE_WLAN;
317 break;
318 case KEY_BLUETOOTH:
319 t = RFKILL_TYPE_BLUETOOTH;
320 break;
321 case KEY_UWB:
322 t = RFKILL_TYPE_UWB;
323 break;
324 case KEY_WIMAX:
325 t = RFKILL_TYPE_WIMAX;
326 break;
327 default:
328 return;
330 rfkill_schedule_toggle(t);
331 return;
332 } else if (type == EV_SW) {
333 switch (code) {
334 case SW_RFKILL_ALL:
335 rfkill_schedule_evsw_rfkillall(data);
336 return;
337 default:
338 return;
343 static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
344 const struct input_device_id *id)
346 struct input_handle *handle;
347 int error;
349 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
350 if (!handle)
351 return -ENOMEM;
353 handle->dev = dev;
354 handle->handler = handler;
355 handle->name = "rfkill";
357 /* causes rfkill_start() to be called */
358 error = input_register_handle(handle);
359 if (error)
360 goto err_free_handle;
362 error = input_open_device(handle);
363 if (error)
364 goto err_unregister_handle;
366 return 0;
368 err_unregister_handle:
369 input_unregister_handle(handle);
370 err_free_handle:
371 kfree(handle);
372 return error;
375 static void rfkill_start(struct input_handle *handle)
377 /* Take event_lock to guard against configuration changes, we
378 * should be able to deal with concurrency with rfkill_event()
379 * just fine (which event_lock will also avoid). */
380 spin_lock_irq(&handle->dev->event_lock);
382 if (test_bit(EV_SW, handle->dev->evbit)) {
383 if (test_bit(SW_RFKILL_ALL, handle->dev->swbit))
384 rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL,
385 handle->dev->sw));
386 /* add resync for further EV_SW events here */
389 spin_unlock_irq(&handle->dev->event_lock);
392 static void rfkill_disconnect(struct input_handle *handle)
394 input_close_device(handle);
395 input_unregister_handle(handle);
396 kfree(handle);
399 static const struct input_device_id rfkill_ids[] = {
401 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
402 .evbit = { BIT_MASK(EV_KEY) },
403 .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) },
406 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
407 .evbit = { BIT_MASK(EV_KEY) },
408 .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) },
411 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
412 .evbit = { BIT_MASK(EV_KEY) },
413 .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) },
416 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
417 .evbit = { BIT_MASK(EV_KEY) },
418 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
421 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
422 .evbit = { BIT(EV_SW) },
423 .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
428 static struct input_handler rfkill_handler = {
429 .event = rfkill_event,
430 .connect = rfkill_connect,
431 .disconnect = rfkill_disconnect,
432 .start = rfkill_start,
433 .name = "rfkill",
434 .id_table = rfkill_ids,
437 static int __init rfkill_handler_init(void)
439 if (rfkill_master_switch_mode >= RFKILL_INPUT_MASTER_MAX)
440 return -EINVAL;
443 * The penalty to not doing this is a possible RFKILL_OPS_DELAY delay
444 * at the first use. Acceptable, but if we can avoid it, why not?
446 rfkill_task.last_scheduled =
447 jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
448 return input_register_handler(&rfkill_handler);
451 static void __exit rfkill_handler_exit(void)
453 input_unregister_handler(&rfkill_handler);
454 cancel_delayed_work_sync(&rfkill_task.dwork);
455 rfkill_remove_epo_lock();
458 module_init(rfkill_handler_init);
459 module_exit(rfkill_handler_exit);