4 * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
26 #include <linux/workqueue.h>
27 #include <linux/spinlock.h>
28 #include <linux/slab.h>
29 #include <linux/skbuff.h>
30 #include <linux/suspend.h>
31 #include <linux/connector.h>
32 #include <linux/delay.h>
36 * This job is sent to the kevent workqueue.
37 * While no event is once sent to any callback, the connector workqueue
38 * is not created to avoid a useless waiting kernel task.
39 * Once the first event is received, we create this dedicated workqueue which
40 * is necessary because the flow of data can be high and we don't want
41 * to encumber keventd with that.
43 static void cn_queue_create(struct work_struct
*work
)
45 struct cn_queue_dev
*dev
;
47 dev
= container_of(work
, struct cn_queue_dev
, wq_creation
);
49 dev
->cn_queue
= create_singlethread_workqueue(dev
->name
);
50 /* If we fail, we will use keventd for all following connector jobs */
51 WARN_ON(!dev
->cn_queue
);
55 * Queue a data sent to a callback.
56 * If the connector workqueue is already created, we queue the job on it.
57 * Otherwise, we queue the job to kevent and queue the connector workqueue
60 int queue_cn_work(struct cn_callback_entry
*cbq
, struct work_struct
*work
)
62 struct cn_queue_dev
*pdev
= cbq
->pdev
;
64 if (likely(pdev
->cn_queue
))
65 return queue_work(pdev
->cn_queue
, work
);
67 /* Don't create the connector workqueue twice */
68 if (atomic_inc_return(&pdev
->wq_requested
) == 1)
69 schedule_work(&pdev
->wq_creation
);
71 atomic_dec(&pdev
->wq_requested
);
73 return schedule_work(work
);
76 void cn_queue_wrapper(struct work_struct
*work
)
78 struct cn_callback_entry
*cbq
=
79 container_of(work
, struct cn_callback_entry
, work
);
80 struct cn_callback_data
*d
= &cbq
->data
;
82 d
->callback(d
->callback_priv
);
84 d
->destruct_data(d
->ddata
);
90 static struct cn_callback_entry
*cn_queue_alloc_callback_entry(char *name
, struct cb_id
*id
, void (*callback
)(void *))
92 struct cn_callback_entry
*cbq
;
94 cbq
= kzalloc(sizeof(*cbq
), GFP_KERNEL
);
96 printk(KERN_ERR
"Failed to create new callback queue.\n");
100 snprintf(cbq
->id
.name
, sizeof(cbq
->id
.name
), "%s", name
);
101 memcpy(&cbq
->id
.id
, id
, sizeof(struct cb_id
));
102 cbq
->data
.callback
= callback
;
104 INIT_WORK(&cbq
->work
, &cn_queue_wrapper
);
108 static void cn_queue_free_callback(struct cn_callback_entry
*cbq
)
110 /* The first jobs have been sent to kevent, flush them too */
111 flush_scheduled_work();
112 if (cbq
->pdev
->cn_queue
)
113 flush_workqueue(cbq
->pdev
->cn_queue
);
118 int cn_cb_equal(struct cb_id
*i1
, struct cb_id
*i2
)
120 return ((i1
->idx
== i2
->idx
) && (i1
->val
== i2
->val
));
123 int cn_queue_add_callback(struct cn_queue_dev
*dev
, char *name
, struct cb_id
*id
, void (*callback
)(void *))
125 struct cn_callback_entry
*cbq
, *__cbq
;
128 cbq
= cn_queue_alloc_callback_entry(name
, id
, callback
);
132 atomic_inc(&dev
->refcnt
);
135 spin_lock_bh(&dev
->queue_lock
);
136 list_for_each_entry(__cbq
, &dev
->queue_list
, callback_entry
) {
137 if (cn_cb_equal(&__cbq
->id
.id
, id
)) {
143 list_add_tail(&cbq
->callback_entry
, &dev
->queue_list
);
144 spin_unlock_bh(&dev
->queue_lock
);
147 cn_queue_free_callback(cbq
);
148 atomic_dec(&dev
->refcnt
);
153 cbq
->group
= cbq
->id
.id
.idx
;
158 void cn_queue_del_callback(struct cn_queue_dev
*dev
, struct cb_id
*id
)
160 struct cn_callback_entry
*cbq
, *n
;
163 spin_lock_bh(&dev
->queue_lock
);
164 list_for_each_entry_safe(cbq
, n
, &dev
->queue_list
, callback_entry
) {
165 if (cn_cb_equal(&cbq
->id
.id
, id
)) {
166 list_del(&cbq
->callback_entry
);
171 spin_unlock_bh(&dev
->queue_lock
);
174 cn_queue_free_callback(cbq
);
175 atomic_dec(&dev
->refcnt
);
179 struct cn_queue_dev
*cn_queue_alloc_dev(char *name
, struct sock
*nls
)
181 struct cn_queue_dev
*dev
;
183 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
187 snprintf(dev
->name
, sizeof(dev
->name
), "%s", name
);
188 atomic_set(&dev
->refcnt
, 0);
189 INIT_LIST_HEAD(&dev
->queue_list
);
190 spin_lock_init(&dev
->queue_lock
);
191 init_waitqueue_head(&dev
->wq_created
);
195 INIT_WORK(&dev
->wq_creation
, cn_queue_create
);
200 void cn_queue_free_dev(struct cn_queue_dev
*dev
)
202 struct cn_callback_entry
*cbq
, *n
;
206 /* Flush the first pending jobs queued on kevent */
207 flush_scheduled_work();
209 /* If the connector workqueue creation is still pending, wait for it */
210 prepare_to_wait(&dev
->wq_created
, &wait
, TASK_UNINTERRUPTIBLE
);
211 if (atomic_read(&dev
->wq_requested
) && !dev
->cn_queue
) {
212 timeout
= schedule_timeout(HZ
* 2);
213 if (!timeout
&& !dev
->cn_queue
)
216 finish_wait(&dev
->wq_created
, &wait
);
219 flush_workqueue(dev
->cn_queue
);
220 destroy_workqueue(dev
->cn_queue
);
223 spin_lock_bh(&dev
->queue_lock
);
224 list_for_each_entry_safe(cbq
, n
, &dev
->queue_list
, callback_entry
)
225 list_del(&cbq
->callback_entry
);
226 spin_unlock_bh(&dev
->queue_lock
);
228 while (atomic_read(&dev
->refcnt
)) {
229 printk(KERN_INFO
"Waiting for %s to become free: refcnt=%d.\n",
230 dev
->name
, atomic_read(&dev
->refcnt
));