rfkill: rate-limit rfkill-input workqueue usage (v3)
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / rfkill / rfkill-input.c
blob6ddf3ceb23031bd7dae8ea2033dde0296744e75c
1 /*
2 * Input layer to RF Kill interface connector
4 * Copyright (c) 2007 Dmitry Torokhov
5 */
7 /*
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/input.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/init.h>
18 #include <linux/rfkill.h>
20 #include "rfkill-input.h"
22 MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>");
23 MODULE_DESCRIPTION("Input layer to RF switch connector");
24 MODULE_LICENSE("GPL");
26 enum rfkill_input_master_mode {
27 RFKILL_INPUT_MASTER_DONOTHING = 0,
28 RFKILL_INPUT_MASTER_RESTORE = 1,
29 RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
30 RFKILL_INPUT_MASTER_MAX, /* marker */
33 /* Delay (in ms) between consecutive switch ops */
34 #define RFKILL_OPS_DELAY 200
36 static enum rfkill_input_master_mode rfkill_master_switch_mode =
37 RFKILL_INPUT_MASTER_UNBLOCKALL;
38 module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
39 MODULE_PARM_DESC(master_switch_mode,
40 "SW_RFKILL_ALL ON should: 0=do nothing; 1=restore; 2=unblock all");
42 enum rfkill_global_sched_op {
43 RFKILL_GLOBAL_OP_EPO = 0,
44 RFKILL_GLOBAL_OP_RESTORE,
45 RFKILL_GLOBAL_OP_UNLOCK,
46 RFKILL_GLOBAL_OP_UNBLOCK,
50 * Currently, the code marked with RFKILL_NEED_SWSET is inactive.
51 * If handling of EV_SW SW_WLAN/WWAN/BLUETOOTH/etc is needed in the
52 * future, when such events are added, that code will be necessary.
55 struct rfkill_task {
56 struct delayed_work dwork;
58 /* ensures that task is serialized */
59 struct mutex mutex;
61 /* protects everything below */
62 spinlock_t lock;
64 /* pending regular switch operations (1=pending) */
65 unsigned long sw_pending[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
67 #ifdef RFKILL_NEED_SWSET
68 /* set operation pending (1=pending) */
69 unsigned long sw_setpending[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
71 /* desired state for pending set operation (1=unblock) */
72 unsigned long sw_newstate[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
73 #endif
75 /* should the state be complemented (1=yes) */
76 unsigned long sw_togglestate[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
78 bool global_op_pending;
79 enum rfkill_global_sched_op op;
81 /* last time it was scheduled */
82 unsigned long last_scheduled;
85 static void __rfkill_handle_global_op(enum rfkill_global_sched_op op)
87 unsigned int i;
89 switch (op) {
90 case RFKILL_GLOBAL_OP_EPO:
91 rfkill_epo();
92 break;
93 case RFKILL_GLOBAL_OP_RESTORE:
94 rfkill_restore_states();
95 break;
96 case RFKILL_GLOBAL_OP_UNLOCK:
97 rfkill_remove_epo_lock();
98 break;
99 case RFKILL_GLOBAL_OP_UNBLOCK:
100 rfkill_remove_epo_lock();
101 for (i = 0; i < RFKILL_TYPE_MAX; i++)
102 rfkill_switch_all(i, RFKILL_STATE_UNBLOCKED);
103 break;
104 default:
105 /* memory corruption or bug, fail safely */
106 rfkill_epo();
107 WARN(1, "Unknown requested operation %d! "
108 "rfkill Emergency Power Off activated\n",
109 op);
113 #ifdef RFKILL_NEED_SWSET
114 static void __rfkill_handle_normal_op(const enum rfkill_type type,
115 const bool sp, const bool s, const bool c)
117 enum rfkill_state state;
119 if (sp)
120 state = (s) ? RFKILL_STATE_UNBLOCKED :
121 RFKILL_STATE_SOFT_BLOCKED;
122 else
123 state = rfkill_get_global_state(type);
125 if (c)
126 state = rfkill_state_complement(state);
128 rfkill_switch_all(type, state);
130 #else
131 static void __rfkill_handle_normal_op(const enum rfkill_type type,
132 const bool c)
134 enum rfkill_state state;
136 state = rfkill_get_global_state(type);
137 if (c)
138 state = rfkill_state_complement(state);
140 rfkill_switch_all(type, state);
142 #endif
144 static void rfkill_task_handler(struct work_struct *work)
146 struct rfkill_task *task = container_of(work,
147 struct rfkill_task, dwork.work);
148 bool doit = true;
150 mutex_lock(&task->mutex);
152 spin_lock_irq(&task->lock);
153 while (doit) {
154 if (task->global_op_pending) {
155 enum rfkill_global_sched_op op = task->op;
156 task->global_op_pending = false;
157 memset(task->sw_pending, 0, sizeof(task->sw_pending));
158 spin_unlock_irq(&task->lock);
160 __rfkill_handle_global_op(op);
162 /* make sure we do at least one pass with
163 * !task->global_op_pending */
164 spin_lock_irq(&task->lock);
165 continue;
166 } else if (!rfkill_is_epo_lock_active()) {
167 unsigned int i = 0;
169 while (!task->global_op_pending &&
170 i < RFKILL_TYPE_MAX) {
171 if (test_and_clear_bit(i, task->sw_pending)) {
172 bool c;
173 #ifdef RFKILL_NEED_SWSET
174 bool sp, s;
175 sp = test_and_clear_bit(i,
176 task->sw_setpending);
177 s = test_bit(i, task->sw_newstate);
178 #endif
179 c = test_and_clear_bit(i,
180 task->sw_togglestate);
181 spin_unlock_irq(&task->lock);
183 #ifdef RFKILL_NEED_SWSET
184 __rfkill_handle_normal_op(i, sp, s, c);
185 #else
186 __rfkill_handle_normal_op(i, c);
187 #endif
189 spin_lock_irq(&task->lock);
191 i++;
194 doit = task->global_op_pending;
196 spin_unlock_irq(&task->lock);
198 mutex_unlock(&task->mutex);
201 static struct rfkill_task rfkill_task = {
202 .dwork = __DELAYED_WORK_INITIALIZER(rfkill_task.dwork,
203 rfkill_task_handler),
204 .mutex = __MUTEX_INITIALIZER(rfkill_task.mutex),
205 .lock = __SPIN_LOCK_UNLOCKED(rfkill_task.lock),
208 static unsigned long rfkill_ratelimit(const unsigned long last)
210 const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
211 return (time_after(jiffies, last + delay)) ? 0 : delay;
214 static void rfkill_schedule_ratelimited(void)
216 if (!delayed_work_pending(&rfkill_task.dwork)) {
217 schedule_delayed_work(&rfkill_task.dwork,
218 rfkill_ratelimit(rfkill_task.last_scheduled));
219 rfkill_task.last_scheduled = jiffies;
223 static void rfkill_schedule_global_op(enum rfkill_global_sched_op op)
225 unsigned long flags;
227 spin_lock_irqsave(&rfkill_task.lock, flags);
228 rfkill_task.op = op;
229 rfkill_task.global_op_pending = true;
230 if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
231 /* bypass the limiter for EPO */
232 cancel_delayed_work(&rfkill_task.dwork);
233 schedule_delayed_work(&rfkill_task.dwork, 0);
234 rfkill_task.last_scheduled = jiffies;
235 } else
236 rfkill_schedule_ratelimited();
237 spin_unlock_irqrestore(&rfkill_task.lock, flags);
240 #ifdef RFKILL_NEED_SWSET
241 /* Use this if you need to add EV_SW SW_WLAN/WWAN/BLUETOOTH/etc handling */
243 static void rfkill_schedule_set(enum rfkill_type type,
244 enum rfkill_state desired_state)
246 unsigned long flags;
248 if (rfkill_is_epo_lock_active())
249 return;
251 spin_lock_irqsave(&rfkill_task.lock, flags);
252 if (!rfkill_task.global_op_pending) {
253 set_bit(type, rfkill_task.sw_pending);
254 set_bit(type, rfkill_task.sw_setpending);
255 clear_bit(type, rfkill_task.sw_togglestate);
256 if (desired_state)
257 set_bit(type, rfkill_task.sw_newstate);
258 else
259 clear_bit(type, rfkill_task.sw_newstate);
260 rfkill_schedule_ratelimited();
262 spin_unlock_irqrestore(&rfkill_task.lock, flags);
264 #endif
266 static void rfkill_schedule_toggle(enum rfkill_type type)
268 unsigned long flags;
270 if (rfkill_is_epo_lock_active())
271 return;
273 spin_lock_irqsave(&rfkill_task.lock, flags);
274 if (!rfkill_task.global_op_pending) {
275 set_bit(type, rfkill_task.sw_pending);
276 change_bit(type, rfkill_task.sw_togglestate);
277 rfkill_schedule_ratelimited();
279 spin_unlock_irqrestore(&rfkill_task.lock, flags);
282 static void rfkill_schedule_evsw_rfkillall(int state)
284 if (state) {
285 switch (rfkill_master_switch_mode) {
286 case RFKILL_INPUT_MASTER_UNBLOCKALL:
287 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNBLOCK);
288 break;
289 case RFKILL_INPUT_MASTER_RESTORE:
290 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_RESTORE);
291 break;
292 case RFKILL_INPUT_MASTER_DONOTHING:
293 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNLOCK);
294 break;
295 default:
296 /* memory corruption or driver bug! fail safely */
297 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
298 WARN(1, "Unknown rfkill_master_switch_mode (%d), "
299 "driver bug or memory corruption detected!\n",
300 rfkill_master_switch_mode);
301 break;
303 } else
304 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
307 static void rfkill_event(struct input_handle *handle, unsigned int type,
308 unsigned int code, int data)
310 if (type == EV_KEY && data == 1) {
311 enum rfkill_type t;
313 switch (code) {
314 case KEY_WLAN:
315 t = RFKILL_TYPE_WLAN;
316 break;
317 case KEY_BLUETOOTH:
318 t = RFKILL_TYPE_BLUETOOTH;
319 break;
320 case KEY_UWB:
321 t = RFKILL_TYPE_UWB;
322 break;
323 case KEY_WIMAX:
324 t = RFKILL_TYPE_WIMAX;
325 break;
326 default:
327 return;
329 rfkill_schedule_toggle(t);
330 return;
331 } else if (type == EV_SW) {
332 switch (code) {
333 case SW_RFKILL_ALL:
334 rfkill_schedule_evsw_rfkillall(data);
335 return;
336 default:
337 return;
342 static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
343 const struct input_device_id *id)
345 struct input_handle *handle;
346 int error;
348 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
349 if (!handle)
350 return -ENOMEM;
352 handle->dev = dev;
353 handle->handler = handler;
354 handle->name = "rfkill";
356 /* causes rfkill_start() to be called */
357 error = input_register_handle(handle);
358 if (error)
359 goto err_free_handle;
361 error = input_open_device(handle);
362 if (error)
363 goto err_unregister_handle;
365 return 0;
367 err_unregister_handle:
368 input_unregister_handle(handle);
369 err_free_handle:
370 kfree(handle);
371 return error;
374 static void rfkill_start(struct input_handle *handle)
376 /* Take event_lock to guard against configuration changes, we
377 * should be able to deal with concurrency with rfkill_event()
378 * just fine (which event_lock will also avoid). */
379 spin_lock_irq(&handle->dev->event_lock);
381 if (test_bit(EV_SW, handle->dev->evbit)) {
382 if (test_bit(SW_RFKILL_ALL, handle->dev->swbit))
383 rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL,
384 handle->dev->sw));
385 /* add resync for further EV_SW events here */
388 spin_unlock_irq(&handle->dev->event_lock);
391 static void rfkill_disconnect(struct input_handle *handle)
393 input_close_device(handle);
394 input_unregister_handle(handle);
395 kfree(handle);
398 static const struct input_device_id rfkill_ids[] = {
400 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
401 .evbit = { BIT_MASK(EV_KEY) },
402 .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) },
405 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
406 .evbit = { BIT_MASK(EV_KEY) },
407 .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) },
410 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
411 .evbit = { BIT_MASK(EV_KEY) },
412 .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) },
415 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
416 .evbit = { BIT_MASK(EV_KEY) },
417 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
420 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
421 .evbit = { BIT(EV_SW) },
422 .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
427 static struct input_handler rfkill_handler = {
428 .event = rfkill_event,
429 .connect = rfkill_connect,
430 .disconnect = rfkill_disconnect,
431 .start = rfkill_start,
432 .name = "rfkill",
433 .id_table = rfkill_ids,
436 static int __init rfkill_handler_init(void)
438 if (rfkill_master_switch_mode >= RFKILL_INPUT_MASTER_MAX)
439 return -EINVAL;
442 * The penalty to not doing this is a possible RFKILL_OPS_DELAY delay
443 * at the first use. Acceptable, but if we can avoid it, why not?
445 rfkill_task.last_scheduled =
446 jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
447 return input_register_handler(&rfkill_handler);
450 static void __exit rfkill_handler_exit(void)
452 input_unregister_handler(&rfkill_handler);
453 cancel_delayed_work_sync(&rfkill_task.dwork);
454 rfkill_remove_epo_lock();
457 module_init(rfkill_handler_init);
458 module_exit(rfkill_handler_exit);