4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
26 #include <linux/workqueue.h>
27 #include <linux/spinlock.h>
28 #include <linux/slab.h>
29 #include <linux/skbuff.h>
30 #include <linux/suspend.h>
31 #include <linux/connector.h>
32 #include <linux/delay.h>
36 * This job is sent to the kevent workqueue.
37 * While no event is once sent to any callback, the connector workqueue
38 * is not created to avoid a useless waiting kernel task.
39 * Once the first event is received, we create this dedicated workqueue which
40 * is necessary because the flow of data can be high and we don't want
41 * to encumber keventd with that.
43 static void cn_queue_create(struct work_struct
*work
)
45 struct cn_queue_dev
*dev
;
47 dev
= container_of(work
, struct cn_queue_dev
, wq_creation
);
49 dev
->cn_queue
= create_singlethread_workqueue(dev
->name
);
50 /* If we fail, we will use keventd for all following connector jobs */
51 WARN_ON(!dev
->cn_queue
);
55 * Queue a data sent to a callback.
56 * If the connector workqueue is already created, we queue the job on it.
57 * Otherwise, we queue the job to kevent and queue the connector workqueue
60 int queue_cn_work(struct cn_callback_entry
*cbq
, struct work_struct
*work
)
62 struct cn_queue_dev
*pdev
= cbq
->pdev
;
64 if (likely(pdev
->cn_queue
))
65 return queue_work(pdev
->cn_queue
, work
);
67 /* Don't create the connector workqueue twice */
68 if (atomic_inc_return(&pdev
->wq_requested
) == 1)
69 schedule_work(&pdev
->wq_creation
);
71 atomic_dec(&pdev
->wq_requested
);
73 return schedule_work(work
);
76 void cn_queue_wrapper(struct work_struct
*work
)
78 struct cn_callback_entry
*cbq
=
79 container_of(work
, struct cn_callback_entry
, work
);
80 struct cn_callback_data
*d
= &cbq
->data
;
82 d
->callback(d
->callback_priv
);
84 d
->destruct_data(d
->ddata
);
90 static struct cn_callback_entry
*
91 cn_queue_alloc_callback_entry(char *name
, struct cb_id
*id
,
92 void (*callback
)(struct cn_msg
*))
94 struct cn_callback_entry
*cbq
;
96 cbq
= kzalloc(sizeof(*cbq
), GFP_KERNEL
);
98 printk(KERN_ERR
"Failed to create new callback queue.\n");
102 snprintf(cbq
->id
.name
, sizeof(cbq
->id
.name
), "%s", name
);
103 memcpy(&cbq
->id
.id
, id
, sizeof(struct cb_id
));
104 cbq
->data
.callback
= callback
;
106 INIT_WORK(&cbq
->work
, &cn_queue_wrapper
);
110 static void cn_queue_free_callback(struct cn_callback_entry
*cbq
)
112 /* The first jobs have been sent to kevent, flush them too */
113 flush_scheduled_work();
114 if (cbq
->pdev
->cn_queue
)
115 flush_workqueue(cbq
->pdev
->cn_queue
);
120 int cn_cb_equal(struct cb_id
*i1
, struct cb_id
*i2
)
122 return ((i1
->idx
== i2
->idx
) && (i1
->val
== i2
->val
));
125 int cn_queue_add_callback(struct cn_queue_dev
*dev
, char *name
, struct cb_id
*id
,
126 void (*callback
)(struct cn_msg
*))
128 struct cn_callback_entry
*cbq
, *__cbq
;
131 cbq
= cn_queue_alloc_callback_entry(name
, id
, callback
);
135 atomic_inc(&dev
->refcnt
);
138 spin_lock_bh(&dev
->queue_lock
);
139 list_for_each_entry(__cbq
, &dev
->queue_list
, callback_entry
) {
140 if (cn_cb_equal(&__cbq
->id
.id
, id
)) {
146 list_add_tail(&cbq
->callback_entry
, &dev
->queue_list
);
147 spin_unlock_bh(&dev
->queue_lock
);
150 cn_queue_free_callback(cbq
);
151 atomic_dec(&dev
->refcnt
);
156 cbq
->group
= cbq
->id
.id
.idx
;
161 void cn_queue_del_callback(struct cn_queue_dev
*dev
, struct cb_id
*id
)
163 struct cn_callback_entry
*cbq
, *n
;
166 spin_lock_bh(&dev
->queue_lock
);
167 list_for_each_entry_safe(cbq
, n
, &dev
->queue_list
, callback_entry
) {
168 if (cn_cb_equal(&cbq
->id
.id
, id
)) {
169 list_del(&cbq
->callback_entry
);
174 spin_unlock_bh(&dev
->queue_lock
);
177 cn_queue_free_callback(cbq
);
178 atomic_dec(&dev
->refcnt
);
182 struct cn_queue_dev
*cn_queue_alloc_dev(char *name
, struct sock
*nls
)
184 struct cn_queue_dev
*dev
;
186 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
190 snprintf(dev
->name
, sizeof(dev
->name
), "%s", name
);
191 atomic_set(&dev
->refcnt
, 0);
192 INIT_LIST_HEAD(&dev
->queue_list
);
193 spin_lock_init(&dev
->queue_lock
);
194 init_waitqueue_head(&dev
->wq_created
);
198 INIT_WORK(&dev
->wq_creation
, cn_queue_create
);
203 void cn_queue_free_dev(struct cn_queue_dev
*dev
)
205 struct cn_callback_entry
*cbq
, *n
;
209 /* Flush the first pending jobs queued on kevent */
210 flush_scheduled_work();
212 /* If the connector workqueue creation is still pending, wait for it */
213 prepare_to_wait(&dev
->wq_created
, &wait
, TASK_UNINTERRUPTIBLE
);
214 if (atomic_read(&dev
->wq_requested
) && !dev
->cn_queue
) {
215 timeout
= schedule_timeout(HZ
* 2);
216 if (!timeout
&& !dev
->cn_queue
)
219 finish_wait(&dev
->wq_created
, &wait
);
222 flush_workqueue(dev
->cn_queue
);
223 destroy_workqueue(dev
->cn_queue
);
226 spin_lock_bh(&dev
->queue_lock
);
227 list_for_each_entry_safe(cbq
, n
, &dev
->queue_list
, callback_entry
)
228 list_del(&cbq
->callback_entry
);
229 spin_unlock_bh(&dev
->queue_lock
);
231 while (atomic_read(&dev
->refcnt
)) {
232 printk(KERN_INFO
"Waiting for %s to become free: refcnt=%d.\n",
233 dev
->name
, atomic_read(&dev
->refcnt
));