4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
26 #include <linux/workqueue.h>
27 #include <linux/spinlock.h>
28 #include <linux/slab.h>
29 #include <linux/skbuff.h>
30 #include <linux/suspend.h>
31 #include <linux/connector.h>
32 #include <linux/delay.h>
36 * This job is sent to the kevent workqueue.
37 * While no event is once sent to any callback, the connector workqueue
38 * is not created to avoid a useless waiting kernel task.
39 * Once the first event is received, we create this dedicated workqueue which
40 * is necessary because the flow of data can be high and we don't want
41 * to encumber keventd with that.
43 static void cn_queue_create(struct work_struct
*work
)
45 struct cn_queue_dev
*dev
;
47 dev
= container_of(work
, struct cn_queue_dev
, wq_creation
);
49 dev
->cn_queue
= create_singlethread_workqueue(dev
->name
);
50 /* If we fail, we will use keventd for all following connector jobs */
51 WARN_ON(!dev
->cn_queue
);
55 * Queue a data sent to a callback.
56 * If the connector workqueue is already created, we queue the job on it.
57 * Otherwise, we queue the job to kevent and queue the connector workqueue
60 int queue_cn_work(struct cn_callback_entry
*cbq
, struct work_struct
*work
)
62 struct cn_queue_dev
*pdev
= cbq
->pdev
;
64 if (likely(pdev
->cn_queue
))
65 return queue_work(pdev
->cn_queue
, work
);
67 /* Don't create the connector workqueue twice */
68 if (atomic_inc_return(&pdev
->wq_requested
) == 1)
69 schedule_work(&pdev
->wq_creation
);
71 atomic_dec(&pdev
->wq_requested
);
73 return schedule_work(work
);
76 void cn_queue_wrapper(struct work_struct
*work
)
78 struct cn_callback_entry
*cbq
=
79 container_of(work
, struct cn_callback_entry
, work
);
80 struct cn_callback_data
*d
= &cbq
->data
;
81 struct cn_msg
*msg
= NLMSG_DATA(nlmsg_hdr(d
->skb
));
82 struct netlink_skb_parms
*nsp
= &NETLINK_CB(d
->skb
);
84 d
->callback(msg
, nsp
);
92 static struct cn_callback_entry
*
93 cn_queue_alloc_callback_entry(char *name
, struct cb_id
*id
,
94 void (*callback
)(struct cn_msg
*, struct netlink_skb_parms
*))
96 struct cn_callback_entry
*cbq
;
98 cbq
= kzalloc(sizeof(*cbq
), GFP_KERNEL
);
100 printk(KERN_ERR
"Failed to create new callback queue.\n");
104 snprintf(cbq
->id
.name
, sizeof(cbq
->id
.name
), "%s", name
);
105 memcpy(&cbq
->id
.id
, id
, sizeof(struct cb_id
));
106 cbq
->data
.callback
= callback
;
108 INIT_WORK(&cbq
->work
, &cn_queue_wrapper
);
112 static void cn_queue_free_callback(struct cn_callback_entry
*cbq
)
114 /* The first jobs have been sent to kevent, flush them too */
115 flush_scheduled_work();
116 if (cbq
->pdev
->cn_queue
)
117 flush_workqueue(cbq
->pdev
->cn_queue
);
122 int cn_cb_equal(struct cb_id
*i1
, struct cb_id
*i2
)
124 return ((i1
->idx
== i2
->idx
) && (i1
->val
== i2
->val
));
127 int cn_queue_add_callback(struct cn_queue_dev
*dev
, char *name
, struct cb_id
*id
,
128 void (*callback
)(struct cn_msg
*, struct netlink_skb_parms
*))
130 struct cn_callback_entry
*cbq
, *__cbq
;
133 cbq
= cn_queue_alloc_callback_entry(name
, id
, callback
);
137 atomic_inc(&dev
->refcnt
);
140 spin_lock_bh(&dev
->queue_lock
);
141 list_for_each_entry(__cbq
, &dev
->queue_list
, callback_entry
) {
142 if (cn_cb_equal(&__cbq
->id
.id
, id
)) {
148 list_add_tail(&cbq
->callback_entry
, &dev
->queue_list
);
149 spin_unlock_bh(&dev
->queue_lock
);
152 cn_queue_free_callback(cbq
);
153 atomic_dec(&dev
->refcnt
);
158 cbq
->group
= cbq
->id
.id
.idx
;
163 void cn_queue_del_callback(struct cn_queue_dev
*dev
, struct cb_id
*id
)
165 struct cn_callback_entry
*cbq
, *n
;
168 spin_lock_bh(&dev
->queue_lock
);
169 list_for_each_entry_safe(cbq
, n
, &dev
->queue_list
, callback_entry
) {
170 if (cn_cb_equal(&cbq
->id
.id
, id
)) {
171 list_del(&cbq
->callback_entry
);
176 spin_unlock_bh(&dev
->queue_lock
);
179 cn_queue_free_callback(cbq
);
180 atomic_dec(&dev
->refcnt
);
184 struct cn_queue_dev
*cn_queue_alloc_dev(char *name
, struct sock
*nls
)
186 struct cn_queue_dev
*dev
;
188 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
192 snprintf(dev
->name
, sizeof(dev
->name
), "%s", name
);
193 atomic_set(&dev
->refcnt
, 0);
194 INIT_LIST_HEAD(&dev
->queue_list
);
195 spin_lock_init(&dev
->queue_lock
);
196 init_waitqueue_head(&dev
->wq_created
);
200 INIT_WORK(&dev
->wq_creation
, cn_queue_create
);
205 void cn_queue_free_dev(struct cn_queue_dev
*dev
)
207 struct cn_callback_entry
*cbq
, *n
;
211 /* Flush the first pending jobs queued on kevent */
212 flush_scheduled_work();
214 /* If the connector workqueue creation is still pending, wait for it */
215 prepare_to_wait(&dev
->wq_created
, &wait
, TASK_UNINTERRUPTIBLE
);
216 if (atomic_read(&dev
->wq_requested
) && !dev
->cn_queue
) {
217 timeout
= schedule_timeout(HZ
* 2);
218 if (!timeout
&& !dev
->cn_queue
)
221 finish_wait(&dev
->wq_created
, &wait
);
224 flush_workqueue(dev
->cn_queue
);
225 destroy_workqueue(dev
->cn_queue
);
228 spin_lock_bh(&dev
->queue_lock
);
229 list_for_each_entry_safe(cbq
, n
, &dev
->queue_list
, callback_entry
)
230 list_del(&cbq
->callback_entry
);
231 spin_unlock_bh(&dev
->queue_lock
);
233 while (atomic_read(&dev
->refcnt
)) {
234 printk(KERN_INFO
"Waiting for %s to become free: refcnt=%d.\n",
235 dev
->name
, atomic_read(&dev
->refcnt
));