[Blackfin] arch: Functional power management support: Add support for cpu frequency...
[linux-2.6/openmoko-kernel.git] / net / bluetooth / hci_core.c
blobaec6929f5c166fd362df7e38faebb19f91f8a445
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
42 #include <net/sock.h>
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
46 #include <asm/unaligned.h>
48 #include <net/bluetooth/bluetooth.h>
49 #include <net/bluetooth/hci_core.h>
51 #ifndef CONFIG_BT_HCI_CORE_DEBUG
52 #undef BT_DBG
53 #define BT_DBG(D...)
54 #endif
56 static void hci_cmd_task(unsigned long arg);
57 static void hci_rx_task(unsigned long arg);
58 static void hci_tx_task(unsigned long arg);
59 static void hci_notify(struct hci_dev *hdev, int event);
61 static DEFINE_RWLOCK(hci_task_lock);
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
71 /* HCI protocols */
72 #define HCI_MAX_PROTO 2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
78 /* ---- HCI notifications ---- */
80 int hci_register_notifier(struct notifier_block *nb)
82 return atomic_notifier_chain_register(&hci_notifier, nb);
85 int hci_unregister_notifier(struct notifier_block *nb)
87 return atomic_notifier_chain_unregister(&hci_notifier, nb);
90 static void hci_notify(struct hci_dev *hdev, int event)
92 atomic_notifier_call_chain(&hci_notifier, event, hdev);
95 /* ---- HCI requests ---- */
97 void hci_req_complete(struct hci_dev *hdev, int result)
99 BT_DBG("%s result 0x%2.2x", hdev->name, result);
101 if (hdev->req_status == HCI_REQ_PEND) {
102 hdev->req_result = result;
103 hdev->req_status = HCI_REQ_DONE;
104 wake_up_interruptible(&hdev->req_wait_q);
108 static void hci_req_cancel(struct hci_dev *hdev, int err)
110 BT_DBG("%s err 0x%2.2x", hdev->name, err);
112 if (hdev->req_status == HCI_REQ_PEND) {
113 hdev->req_result = err;
114 hdev->req_status = HCI_REQ_CANCELED;
115 wake_up_interruptible(&hdev->req_wait_q);
119 /* Execute request and wait for completion. */
120 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
121 unsigned long opt, __u32 timeout)
123 DECLARE_WAITQUEUE(wait, current);
124 int err = 0;
126 BT_DBG("%s start", hdev->name);
128 hdev->req_status = HCI_REQ_PEND;
130 add_wait_queue(&hdev->req_wait_q, &wait);
131 set_current_state(TASK_INTERRUPTIBLE);
133 req(hdev, opt);
134 schedule_timeout(timeout);
136 remove_wait_queue(&hdev->req_wait_q, &wait);
138 if (signal_pending(current))
139 return -EINTR;
141 switch (hdev->req_status) {
142 case HCI_REQ_DONE:
143 err = -bt_err(hdev->req_result);
144 break;
146 case HCI_REQ_CANCELED:
147 err = -hdev->req_result;
148 break;
150 default:
151 err = -ETIMEDOUT;
152 break;
155 hdev->req_status = hdev->req_result = 0;
157 BT_DBG("%s end: err %d", hdev->name, err);
159 return err;
162 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
163 unsigned long opt, __u32 timeout)
165 int ret;
167 /* Serialize all requests */
168 hci_req_lock(hdev);
169 ret = __hci_request(hdev, req, opt, timeout);
170 hci_req_unlock(hdev);
172 return ret;
175 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
177 BT_DBG("%s %ld", hdev->name, opt);
179 /* Reset device */
180 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
183 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
185 struct sk_buff *skb;
186 __le16 param;
187 __u8 flt_type;
189 BT_DBG("%s %ld", hdev->name, opt);
191 /* Driver initialization */
193 /* Special commands */
194 while ((skb = skb_dequeue(&hdev->driver_init))) {
195 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
196 skb->dev = (void *) hdev;
197 skb_queue_tail(&hdev->cmd_q, skb);
198 hci_sched_cmd(hdev);
200 skb_queue_purge(&hdev->driver_init);
202 /* Mandatory initialization */
204 /* Reset */
205 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
206 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208 /* Read Local Supported Features */
209 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
211 /* Read Local Version */
212 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
214 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
215 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
217 #if 0
218 /* Host buffer size */
220 struct hci_cp_host_buffer_size cp;
221 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
222 cp.sco_mtu = HCI_MAX_SCO_SIZE;
223 cp.acl_max_pkt = cpu_to_le16(0xffff);
224 cp.sco_max_pkt = cpu_to_le16(0xffff);
225 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
227 #endif
229 /* Read BD Address */
230 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
232 /* Read Class of Device */
233 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
235 /* Read Local Name */
236 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
238 /* Read Voice Setting */
239 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
241 /* Optional initialization */
243 /* Clear Event Filters */
244 flt_type = HCI_FLT_CLEAR_ALL;
245 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
247 /* Page timeout ~20 secs */
248 param = cpu_to_le16(0x8000);
249 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
251 /* Connection accept timeout ~20 secs */
252 param = cpu_to_le16(0x7d00);
253 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
256 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
258 __u8 scan = opt;
260 BT_DBG("%s %x", hdev->name, scan);
262 /* Inquiry and Page scans */
263 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
266 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
268 __u8 auth = opt;
270 BT_DBG("%s %x", hdev->name, auth);
272 /* Authentication */
273 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
276 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
278 __u8 encrypt = opt;
280 BT_DBG("%s %x", hdev->name, encrypt);
282 /* Authentication */
283 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
286 /* Get HCI device by index.
287 * Device is held on return. */
288 struct hci_dev *hci_dev_get(int index)
290 struct hci_dev *hdev = NULL;
291 struct list_head *p;
293 BT_DBG("%d", index);
295 if (index < 0)
296 return NULL;
298 read_lock(&hci_dev_list_lock);
299 list_for_each(p, &hci_dev_list) {
300 struct hci_dev *d = list_entry(p, struct hci_dev, list);
301 if (d->id == index) {
302 hdev = hci_dev_hold(d);
303 break;
306 read_unlock(&hci_dev_list_lock);
307 return hdev;
310 /* ---- Inquiry support ---- */
311 static void inquiry_cache_flush(struct hci_dev *hdev)
313 struct inquiry_cache *cache = &hdev->inq_cache;
314 struct inquiry_entry *next = cache->list, *e;
316 BT_DBG("cache %p", cache);
318 cache->list = NULL;
319 while ((e = next)) {
320 next = e->next;
321 kfree(e);
325 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
327 struct inquiry_cache *cache = &hdev->inq_cache;
328 struct inquiry_entry *e;
330 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
332 for (e = cache->list; e; e = e->next)
333 if (!bacmp(&e->data.bdaddr, bdaddr))
334 break;
335 return e;
338 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
340 struct inquiry_cache *cache = &hdev->inq_cache;
341 struct inquiry_entry *e;
343 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
345 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
346 /* Entry not in the cache. Add new one. */
347 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
348 return;
349 e->next = cache->list;
350 cache->list = e;
353 memcpy(&e->data, data, sizeof(*data));
354 e->timestamp = jiffies;
355 cache->timestamp = jiffies;
358 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
360 struct inquiry_cache *cache = &hdev->inq_cache;
361 struct inquiry_info *info = (struct inquiry_info *) buf;
362 struct inquiry_entry *e;
363 int copied = 0;
365 for (e = cache->list; e && copied < num; e = e->next, copied++) {
366 struct inquiry_data *data = &e->data;
367 bacpy(&info->bdaddr, &data->bdaddr);
368 info->pscan_rep_mode = data->pscan_rep_mode;
369 info->pscan_period_mode = data->pscan_period_mode;
370 info->pscan_mode = data->pscan_mode;
371 memcpy(info->dev_class, data->dev_class, 3);
372 info->clock_offset = data->clock_offset;
373 info++;
376 BT_DBG("cache %p, copied %d", cache, copied);
377 return copied;
380 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
382 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
383 struct hci_cp_inquiry cp;
385 BT_DBG("%s", hdev->name);
387 if (test_bit(HCI_INQUIRY, &hdev->flags))
388 return;
390 /* Start Inquiry */
391 memcpy(&cp.lap, &ir->lap, 3);
392 cp.length = ir->length;
393 cp.num_rsp = ir->num_rsp;
394 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
397 int hci_inquiry(void __user *arg)
399 __u8 __user *ptr = arg;
400 struct hci_inquiry_req ir;
401 struct hci_dev *hdev;
402 int err = 0, do_inquiry = 0, max_rsp;
403 long timeo;
404 __u8 *buf;
406 if (copy_from_user(&ir, ptr, sizeof(ir)))
407 return -EFAULT;
409 if (!(hdev = hci_dev_get(ir.dev_id)))
410 return -ENODEV;
412 hci_dev_lock_bh(hdev);
413 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
414 inquiry_cache_empty(hdev) ||
415 ir.flags & IREQ_CACHE_FLUSH) {
416 inquiry_cache_flush(hdev);
417 do_inquiry = 1;
419 hci_dev_unlock_bh(hdev);
421 timeo = ir.length * msecs_to_jiffies(2000);
422 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
423 goto done;
425 /* for unlimited number of responses we will use buffer with 255 entries */
426 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
428 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
429 * copy it to the user space.
431 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
432 err = -ENOMEM;
433 goto done;
436 hci_dev_lock_bh(hdev);
437 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
438 hci_dev_unlock_bh(hdev);
440 BT_DBG("num_rsp %d", ir.num_rsp);
442 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
443 ptr += sizeof(ir);
444 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
445 ir.num_rsp))
446 err = -EFAULT;
447 } else
448 err = -EFAULT;
450 kfree(buf);
452 done:
453 hci_dev_put(hdev);
454 return err;
457 /* ---- HCI ioctl helpers ---- */
459 int hci_dev_open(__u16 dev)
461 struct hci_dev *hdev;
462 int ret = 0;
464 if (!(hdev = hci_dev_get(dev)))
465 return -ENODEV;
467 BT_DBG("%s %p", hdev->name, hdev);
469 hci_req_lock(hdev);
471 if (test_bit(HCI_UP, &hdev->flags)) {
472 ret = -EALREADY;
473 goto done;
476 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
477 set_bit(HCI_RAW, &hdev->flags);
479 if (hdev->open(hdev)) {
480 ret = -EIO;
481 goto done;
484 if (!test_bit(HCI_RAW, &hdev->flags)) {
485 atomic_set(&hdev->cmd_cnt, 1);
486 set_bit(HCI_INIT, &hdev->flags);
488 //__hci_request(hdev, hci_reset_req, 0, HZ);
489 ret = __hci_request(hdev, hci_init_req, 0,
490 msecs_to_jiffies(HCI_INIT_TIMEOUT));
492 clear_bit(HCI_INIT, &hdev->flags);
495 if (!ret) {
496 hci_dev_hold(hdev);
497 set_bit(HCI_UP, &hdev->flags);
498 hci_notify(hdev, HCI_DEV_UP);
499 } else {
500 /* Init failed, cleanup */
501 tasklet_kill(&hdev->rx_task);
502 tasklet_kill(&hdev->tx_task);
503 tasklet_kill(&hdev->cmd_task);
505 skb_queue_purge(&hdev->cmd_q);
506 skb_queue_purge(&hdev->rx_q);
508 if (hdev->flush)
509 hdev->flush(hdev);
511 if (hdev->sent_cmd) {
512 kfree_skb(hdev->sent_cmd);
513 hdev->sent_cmd = NULL;
516 hdev->close(hdev);
517 hdev->flags = 0;
520 done:
521 hci_req_unlock(hdev);
522 hci_dev_put(hdev);
523 return ret;
526 static int hci_dev_do_close(struct hci_dev *hdev)
528 BT_DBG("%s %p", hdev->name, hdev);
530 hci_req_cancel(hdev, ENODEV);
531 hci_req_lock(hdev);
533 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
534 hci_req_unlock(hdev);
535 return 0;
538 /* Kill RX and TX tasks */
539 tasklet_kill(&hdev->rx_task);
540 tasklet_kill(&hdev->tx_task);
542 hci_dev_lock_bh(hdev);
543 inquiry_cache_flush(hdev);
544 hci_conn_hash_flush(hdev);
545 hci_dev_unlock_bh(hdev);
547 hci_notify(hdev, HCI_DEV_DOWN);
549 if (hdev->flush)
550 hdev->flush(hdev);
552 /* Reset device */
553 skb_queue_purge(&hdev->cmd_q);
554 atomic_set(&hdev->cmd_cnt, 1);
555 if (!test_bit(HCI_RAW, &hdev->flags)) {
556 set_bit(HCI_INIT, &hdev->flags);
557 __hci_request(hdev, hci_reset_req, 0,
558 msecs_to_jiffies(250));
559 clear_bit(HCI_INIT, &hdev->flags);
562 /* Kill cmd task */
563 tasklet_kill(&hdev->cmd_task);
565 /* Drop queues */
566 skb_queue_purge(&hdev->rx_q);
567 skb_queue_purge(&hdev->cmd_q);
568 skb_queue_purge(&hdev->raw_q);
570 /* Drop last sent command */
571 if (hdev->sent_cmd) {
572 kfree_skb(hdev->sent_cmd);
573 hdev->sent_cmd = NULL;
576 /* After this point our queues are empty
577 * and no tasks are scheduled. */
578 hdev->close(hdev);
580 /* Clear flags */
581 hdev->flags = 0;
583 hci_req_unlock(hdev);
585 hci_dev_put(hdev);
586 return 0;
589 int hci_dev_close(__u16 dev)
591 struct hci_dev *hdev;
592 int err;
594 if (!(hdev = hci_dev_get(dev)))
595 return -ENODEV;
596 err = hci_dev_do_close(hdev);
597 hci_dev_put(hdev);
598 return err;
601 int hci_dev_reset(__u16 dev)
603 struct hci_dev *hdev;
604 int ret = 0;
606 if (!(hdev = hci_dev_get(dev)))
607 return -ENODEV;
609 hci_req_lock(hdev);
610 tasklet_disable(&hdev->tx_task);
612 if (!test_bit(HCI_UP, &hdev->flags))
613 goto done;
615 /* Drop queues */
616 skb_queue_purge(&hdev->rx_q);
617 skb_queue_purge(&hdev->cmd_q);
619 hci_dev_lock_bh(hdev);
620 inquiry_cache_flush(hdev);
621 hci_conn_hash_flush(hdev);
622 hci_dev_unlock_bh(hdev);
624 if (hdev->flush)
625 hdev->flush(hdev);
627 atomic_set(&hdev->cmd_cnt, 1);
628 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
630 if (!test_bit(HCI_RAW, &hdev->flags))
631 ret = __hci_request(hdev, hci_reset_req, 0,
632 msecs_to_jiffies(HCI_INIT_TIMEOUT));
634 done:
635 tasklet_enable(&hdev->tx_task);
636 hci_req_unlock(hdev);
637 hci_dev_put(hdev);
638 return ret;
641 int hci_dev_reset_stat(__u16 dev)
643 struct hci_dev *hdev;
644 int ret = 0;
646 if (!(hdev = hci_dev_get(dev)))
647 return -ENODEV;
649 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
651 hci_dev_put(hdev);
653 return ret;
656 int hci_dev_cmd(unsigned int cmd, void __user *arg)
658 struct hci_dev *hdev;
659 struct hci_dev_req dr;
660 int err = 0;
662 if (copy_from_user(&dr, arg, sizeof(dr)))
663 return -EFAULT;
665 if (!(hdev = hci_dev_get(dr.dev_id)))
666 return -ENODEV;
668 switch (cmd) {
669 case HCISETAUTH:
670 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
671 msecs_to_jiffies(HCI_INIT_TIMEOUT));
672 break;
674 case HCISETENCRYPT:
675 if (!lmp_encrypt_capable(hdev)) {
676 err = -EOPNOTSUPP;
677 break;
680 if (!test_bit(HCI_AUTH, &hdev->flags)) {
681 /* Auth must be enabled first */
682 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
683 msecs_to_jiffies(HCI_INIT_TIMEOUT));
684 if (err)
685 break;
688 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
689 msecs_to_jiffies(HCI_INIT_TIMEOUT));
690 break;
692 case HCISETSCAN:
693 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
694 msecs_to_jiffies(HCI_INIT_TIMEOUT));
695 break;
697 case HCISETPTYPE:
698 hdev->pkt_type = (__u16) dr.dev_opt;
699 break;
701 case HCISETLINKPOL:
702 hdev->link_policy = (__u16) dr.dev_opt;
703 break;
705 case HCISETLINKMODE:
706 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
707 break;
709 case HCISETACLMTU:
710 hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1);
711 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
712 break;
714 case HCISETSCOMTU:
715 hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1);
716 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
717 break;
719 default:
720 err = -EINVAL;
721 break;
723 hci_dev_put(hdev);
724 return err;
727 int hci_get_dev_list(void __user *arg)
729 struct hci_dev_list_req *dl;
730 struct hci_dev_req *dr;
731 struct list_head *p;
732 int n = 0, size, err;
733 __u16 dev_num;
735 if (get_user(dev_num, (__u16 __user *) arg))
736 return -EFAULT;
738 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
739 return -EINVAL;
741 size = sizeof(*dl) + dev_num * sizeof(*dr);
743 if (!(dl = kmalloc(size, GFP_KERNEL)))
744 return -ENOMEM;
746 dr = dl->dev_req;
748 read_lock_bh(&hci_dev_list_lock);
749 list_for_each(p, &hci_dev_list) {
750 struct hci_dev *hdev;
751 hdev = list_entry(p, struct hci_dev, list);
752 (dr + n)->dev_id = hdev->id;
753 (dr + n)->dev_opt = hdev->flags;
754 if (++n >= dev_num)
755 break;
757 read_unlock_bh(&hci_dev_list_lock);
759 dl->dev_num = n;
760 size = sizeof(*dl) + n * sizeof(*dr);
762 err = copy_to_user(arg, dl, size);
763 kfree(dl);
765 return err ? -EFAULT : 0;
768 int hci_get_dev_info(void __user *arg)
770 struct hci_dev *hdev;
771 struct hci_dev_info di;
772 int err = 0;
774 if (copy_from_user(&di, arg, sizeof(di)))
775 return -EFAULT;
777 if (!(hdev = hci_dev_get(di.dev_id)))
778 return -ENODEV;
780 strcpy(di.name, hdev->name);
781 di.bdaddr = hdev->bdaddr;
782 di.type = hdev->type;
783 di.flags = hdev->flags;
784 di.pkt_type = hdev->pkt_type;
785 di.acl_mtu = hdev->acl_mtu;
786 di.acl_pkts = hdev->acl_pkts;
787 di.sco_mtu = hdev->sco_mtu;
788 di.sco_pkts = hdev->sco_pkts;
789 di.link_policy = hdev->link_policy;
790 di.link_mode = hdev->link_mode;
792 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
793 memcpy(&di.features, &hdev->features, sizeof(di.features));
795 if (copy_to_user(arg, &di, sizeof(di)))
796 err = -EFAULT;
798 hci_dev_put(hdev);
800 return err;
803 /* ---- Interface to HCI drivers ---- */
805 /* Alloc HCI device */
806 struct hci_dev *hci_alloc_dev(void)
808 struct hci_dev *hdev;
810 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
811 if (!hdev)
812 return NULL;
814 skb_queue_head_init(&hdev->driver_init);
816 return hdev;
818 EXPORT_SYMBOL(hci_alloc_dev);
820 /* Free HCI device */
821 void hci_free_dev(struct hci_dev *hdev)
823 skb_queue_purge(&hdev->driver_init);
825 /* will free via device release */
826 put_device(&hdev->dev);
828 EXPORT_SYMBOL(hci_free_dev);
830 /* Register HCI device */
831 int hci_register_dev(struct hci_dev *hdev)
833 struct list_head *head = &hci_dev_list, *p;
834 int i, id = 0;
836 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
838 if (!hdev->open || !hdev->close || !hdev->destruct)
839 return -EINVAL;
841 write_lock_bh(&hci_dev_list_lock);
843 /* Find first available device id */
844 list_for_each(p, &hci_dev_list) {
845 if (list_entry(p, struct hci_dev, list)->id != id)
846 break;
847 head = p; id++;
850 sprintf(hdev->name, "hci%d", id);
851 hdev->id = id;
852 list_add(&hdev->list, head);
854 atomic_set(&hdev->refcnt, 1);
855 spin_lock_init(&hdev->lock);
857 hdev->flags = 0;
858 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
859 hdev->esco_type = (ESCO_HV1);
860 hdev->link_mode = (HCI_LM_ACCEPT);
862 hdev->idle_timeout = 0;
863 hdev->sniff_max_interval = 800;
864 hdev->sniff_min_interval = 80;
866 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
867 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
868 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
870 skb_queue_head_init(&hdev->rx_q);
871 skb_queue_head_init(&hdev->cmd_q);
872 skb_queue_head_init(&hdev->raw_q);
874 for (i = 0; i < 3; i++)
875 hdev->reassembly[i] = NULL;
877 init_waitqueue_head(&hdev->req_wait_q);
878 init_MUTEX(&hdev->req_lock);
880 inquiry_cache_init(hdev);
882 hci_conn_hash_init(hdev);
884 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
886 atomic_set(&hdev->promisc, 0);
888 write_unlock_bh(&hci_dev_list_lock);
890 hci_register_sysfs(hdev);
892 hci_notify(hdev, HCI_DEV_REG);
894 return id;
896 EXPORT_SYMBOL(hci_register_dev);
898 /* Unregister HCI device */
899 int hci_unregister_dev(struct hci_dev *hdev)
901 int i;
903 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
905 write_lock_bh(&hci_dev_list_lock);
906 list_del(&hdev->list);
907 write_unlock_bh(&hci_dev_list_lock);
909 hci_dev_do_close(hdev);
911 for (i = 0; i < 3; i++)
912 kfree_skb(hdev->reassembly[i]);
914 hci_notify(hdev, HCI_DEV_UNREG);
916 hci_unregister_sysfs(hdev);
918 __hci_dev_put(hdev);
920 return 0;
922 EXPORT_SYMBOL(hci_unregister_dev);
924 /* Suspend HCI device */
925 int hci_suspend_dev(struct hci_dev *hdev)
927 hci_notify(hdev, HCI_DEV_SUSPEND);
928 return 0;
930 EXPORT_SYMBOL(hci_suspend_dev);
932 /* Resume HCI device */
933 int hci_resume_dev(struct hci_dev *hdev)
935 hci_notify(hdev, HCI_DEV_RESUME);
936 return 0;
938 EXPORT_SYMBOL(hci_resume_dev);
940 /* Receive packet type fragment */
941 #define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
943 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
945 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
946 return -EILSEQ;
948 while (count) {
949 struct sk_buff *skb = __reassembly(hdev, type);
950 struct { int expect; } *scb;
951 int len = 0;
953 if (!skb) {
954 /* Start of the frame */
956 switch (type) {
957 case HCI_EVENT_PKT:
958 if (count >= HCI_EVENT_HDR_SIZE) {
959 struct hci_event_hdr *h = data;
960 len = HCI_EVENT_HDR_SIZE + h->plen;
961 } else
962 return -EILSEQ;
963 break;
965 case HCI_ACLDATA_PKT:
966 if (count >= HCI_ACL_HDR_SIZE) {
967 struct hci_acl_hdr *h = data;
968 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
969 } else
970 return -EILSEQ;
971 break;
973 case HCI_SCODATA_PKT:
974 if (count >= HCI_SCO_HDR_SIZE) {
975 struct hci_sco_hdr *h = data;
976 len = HCI_SCO_HDR_SIZE + h->dlen;
977 } else
978 return -EILSEQ;
979 break;
982 skb = bt_skb_alloc(len, GFP_ATOMIC);
983 if (!skb) {
984 BT_ERR("%s no memory for packet", hdev->name);
985 return -ENOMEM;
988 skb->dev = (void *) hdev;
989 bt_cb(skb)->pkt_type = type;
991 __reassembly(hdev, type) = skb;
993 scb = (void *) skb->cb;
994 scb->expect = len;
995 } else {
996 /* Continuation */
998 scb = (void *) skb->cb;
999 len = scb->expect;
1002 len = min(len, count);
1004 memcpy(skb_put(skb, len), data, len);
1006 scb->expect -= len;
1008 if (scb->expect == 0) {
1009 /* Complete frame */
1011 __reassembly(hdev, type) = NULL;
1013 bt_cb(skb)->pkt_type = type;
1014 hci_recv_frame(skb);
1017 count -= len; data += len;
1020 return 0;
1022 EXPORT_SYMBOL(hci_recv_fragment);
1024 /* ---- Interface to upper protocols ---- */
1026 /* Register/Unregister protocols.
1027 * hci_task_lock is used to ensure that no tasks are running. */
1028 int hci_register_proto(struct hci_proto *hp)
1030 int err = 0;
1032 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1034 if (hp->id >= HCI_MAX_PROTO)
1035 return -EINVAL;
1037 write_lock_bh(&hci_task_lock);
1039 if (!hci_proto[hp->id])
1040 hci_proto[hp->id] = hp;
1041 else
1042 err = -EEXIST;
1044 write_unlock_bh(&hci_task_lock);
1046 return err;
1048 EXPORT_SYMBOL(hci_register_proto);
1050 int hci_unregister_proto(struct hci_proto *hp)
1052 int err = 0;
1054 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1056 if (hp->id >= HCI_MAX_PROTO)
1057 return -EINVAL;
1059 write_lock_bh(&hci_task_lock);
1061 if (hci_proto[hp->id])
1062 hci_proto[hp->id] = NULL;
1063 else
1064 err = -ENOENT;
1066 write_unlock_bh(&hci_task_lock);
1068 return err;
1070 EXPORT_SYMBOL(hci_unregister_proto);
1072 int hci_register_cb(struct hci_cb *cb)
1074 BT_DBG("%p name %s", cb, cb->name);
1076 write_lock_bh(&hci_cb_list_lock);
1077 list_add(&cb->list, &hci_cb_list);
1078 write_unlock_bh(&hci_cb_list_lock);
1080 return 0;
1082 EXPORT_SYMBOL(hci_register_cb);
1084 int hci_unregister_cb(struct hci_cb *cb)
1086 BT_DBG("%p name %s", cb, cb->name);
1088 write_lock_bh(&hci_cb_list_lock);
1089 list_del(&cb->list);
1090 write_unlock_bh(&hci_cb_list_lock);
1092 return 0;
1094 EXPORT_SYMBOL(hci_unregister_cb);
1096 static int hci_send_frame(struct sk_buff *skb)
1098 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1100 if (!hdev) {
1101 kfree_skb(skb);
1102 return -ENODEV;
1105 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1107 if (atomic_read(&hdev->promisc)) {
1108 /* Time stamp */
1109 __net_timestamp(skb);
1111 hci_send_to_sock(hdev, skb);
1114 /* Get rid of skb owner, prior to sending to the driver. */
1115 skb_orphan(skb);
1117 return hdev->send(skb);
1120 /* Send HCI command */
1121 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1123 int len = HCI_COMMAND_HDR_SIZE + plen;
1124 struct hci_command_hdr *hdr;
1125 struct sk_buff *skb;
1127 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1129 skb = bt_skb_alloc(len, GFP_ATOMIC);
1130 if (!skb) {
1131 BT_ERR("%s no memory for command", hdev->name);
1132 return -ENOMEM;
1135 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1136 hdr->opcode = cpu_to_le16(opcode);
1137 hdr->plen = plen;
1139 if (plen)
1140 memcpy(skb_put(skb, plen), param, plen);
1142 BT_DBG("skb len %d", skb->len);
1144 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1145 skb->dev = (void *) hdev;
1146 skb_queue_tail(&hdev->cmd_q, skb);
1147 hci_sched_cmd(hdev);
1149 return 0;
1152 /* Get data from the previously sent command */
1153 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1155 struct hci_command_hdr *hdr;
1157 if (!hdev->sent_cmd)
1158 return NULL;
1160 hdr = (void *) hdev->sent_cmd->data;
1162 if (hdr->opcode != cpu_to_le16(opcode))
1163 return NULL;
1165 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1167 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1170 /* Send ACL data */
1171 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1173 struct hci_acl_hdr *hdr;
1174 int len = skb->len;
1176 skb_push(skb, HCI_ACL_HDR_SIZE);
1177 skb_reset_transport_header(skb);
1178 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1179 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1180 hdr->dlen = cpu_to_le16(len);
1183 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1185 struct hci_dev *hdev = conn->hdev;
1186 struct sk_buff *list;
1188 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1190 skb->dev = (void *) hdev;
1191 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1192 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1194 if (!(list = skb_shinfo(skb)->frag_list)) {
1195 /* Non fragmented */
1196 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1198 skb_queue_tail(&conn->data_q, skb);
1199 } else {
1200 /* Fragmented */
1201 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1203 skb_shinfo(skb)->frag_list = NULL;
1205 /* Queue all fragments atomically */
1206 spin_lock_bh(&conn->data_q.lock);
1208 __skb_queue_tail(&conn->data_q, skb);
1209 do {
1210 skb = list; list = list->next;
1212 skb->dev = (void *) hdev;
1213 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1214 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1216 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1218 __skb_queue_tail(&conn->data_q, skb);
1219 } while (list);
1221 spin_unlock_bh(&conn->data_q.lock);
1224 hci_sched_tx(hdev);
1225 return 0;
1227 EXPORT_SYMBOL(hci_send_acl);
1229 /* Send SCO data */
1230 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1232 struct hci_dev *hdev = conn->hdev;
1233 struct hci_sco_hdr hdr;
1235 BT_DBG("%s len %d", hdev->name, skb->len);
1237 if (skb->len > hdev->sco_mtu) {
1238 kfree_skb(skb);
1239 return -EINVAL;
1242 hdr.handle = cpu_to_le16(conn->handle);
1243 hdr.dlen = skb->len;
1245 skb_push(skb, HCI_SCO_HDR_SIZE);
1246 skb_reset_transport_header(skb);
1247 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1249 skb->dev = (void *) hdev;
1250 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1251 skb_queue_tail(&conn->data_q, skb);
1252 hci_sched_tx(hdev);
1253 return 0;
1255 EXPORT_SYMBOL(hci_send_sco);
1257 /* ---- HCI TX task (outgoing data) ---- */
1259 /* HCI Connection scheduler */
1260 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1262 struct hci_conn_hash *h = &hdev->conn_hash;
1263 struct hci_conn *conn = NULL;
1264 int num = 0, min = ~0;
1265 struct list_head *p;
1267 /* We don't have to lock device here. Connections are always
1268 * added and removed with TX task disabled. */
1269 list_for_each(p, &h->list) {
1270 struct hci_conn *c;
1271 c = list_entry(p, struct hci_conn, list);
1273 if (c->type != type || c->state != BT_CONNECTED
1274 || skb_queue_empty(&c->data_q))
1275 continue;
1276 num++;
1278 if (c->sent < min) {
1279 min = c->sent;
1280 conn = c;
1284 if (conn) {
1285 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1286 int q = cnt / num;
1287 *quote = q ? q : 1;
1288 } else
1289 *quote = 0;
1291 BT_DBG("conn %p quote %d", conn, *quote);
1292 return conn;
1295 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1297 struct hci_conn_hash *h = &hdev->conn_hash;
1298 struct list_head *p;
1299 struct hci_conn *c;
1301 BT_ERR("%s ACL tx timeout", hdev->name);
1303 /* Kill stalled connections */
1304 list_for_each(p, &h->list) {
1305 c = list_entry(p, struct hci_conn, list);
1306 if (c->type == ACL_LINK && c->sent) {
1307 BT_ERR("%s killing stalled ACL connection %s",
1308 hdev->name, batostr(&c->dst));
1309 hci_acl_disconn(c, 0x13);
1314 static inline void hci_sched_acl(struct hci_dev *hdev)
1316 struct hci_conn *conn;
1317 struct sk_buff *skb;
1318 int quote;
1320 BT_DBG("%s", hdev->name);
1322 if (!test_bit(HCI_RAW, &hdev->flags)) {
1323 /* ACL tx timeout must be longer than maximum
1324 * link supervision timeout (40.9 seconds) */
1325 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1326 hci_acl_tx_to(hdev);
1329 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1330 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1331 BT_DBG("skb %p len %d", skb, skb->len);
1333 hci_conn_enter_active_mode(conn);
1335 hci_send_frame(skb);
1336 hdev->acl_last_tx = jiffies;
1338 hdev->acl_cnt--;
1339 conn->sent++;
1344 /* Schedule SCO */
1345 static inline void hci_sched_sco(struct hci_dev *hdev)
1347 struct hci_conn *conn;
1348 struct sk_buff *skb;
1349 int quote;
1351 BT_DBG("%s", hdev->name);
1353 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1354 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1355 BT_DBG("skb %p len %d", skb, skb->len);
1356 hci_send_frame(skb);
1358 conn->sent++;
1359 if (conn->sent == ~0)
1360 conn->sent = 0;
1365 static inline void hci_sched_esco(struct hci_dev *hdev)
1367 struct hci_conn *conn;
1368 struct sk_buff *skb;
1369 int quote;
1371 BT_DBG("%s", hdev->name);
1373 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1374 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1375 BT_DBG("skb %p len %d", skb, skb->len);
1376 hci_send_frame(skb);
1378 conn->sent++;
1379 if (conn->sent == ~0)
1380 conn->sent = 0;
1385 static void hci_tx_task(unsigned long arg)
1387 struct hci_dev *hdev = (struct hci_dev *) arg;
1388 struct sk_buff *skb;
1390 read_lock(&hci_task_lock);
1392 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1394 /* Schedule queues and send stuff to HCI driver */
1396 hci_sched_acl(hdev);
1398 hci_sched_sco(hdev);
1400 hci_sched_esco(hdev);
1402 /* Send next queued raw (unknown type) packet */
1403 while ((skb = skb_dequeue(&hdev->raw_q)))
1404 hci_send_frame(skb);
1406 read_unlock(&hci_task_lock);
1409 /* ----- HCI RX task (incoming data proccessing) ----- */
1411 /* ACL data packet */
1412 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1414 struct hci_acl_hdr *hdr = (void *) skb->data;
1415 struct hci_conn *conn;
1416 __u16 handle, flags;
1418 skb_pull(skb, HCI_ACL_HDR_SIZE);
1420 handle = __le16_to_cpu(hdr->handle);
1421 flags = hci_flags(handle);
1422 handle = hci_handle(handle);
1424 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1426 hdev->stat.acl_rx++;
1428 hci_dev_lock(hdev);
1429 conn = hci_conn_hash_lookup_handle(hdev, handle);
1430 hci_dev_unlock(hdev);
1432 if (conn) {
1433 register struct hci_proto *hp;
1435 hci_conn_enter_active_mode(conn);
1437 /* Send to upper protocol */
1438 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1439 hp->recv_acldata(conn, skb, flags);
1440 return;
1442 } else {
1443 BT_ERR("%s ACL packet for unknown connection handle %d",
1444 hdev->name, handle);
1447 kfree_skb(skb);
1450 /* SCO data packet */
1451 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1453 struct hci_sco_hdr *hdr = (void *) skb->data;
1454 struct hci_conn *conn;
1455 __u16 handle;
1457 skb_pull(skb, HCI_SCO_HDR_SIZE);
1459 handle = __le16_to_cpu(hdr->handle);
1461 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1463 hdev->stat.sco_rx++;
1465 hci_dev_lock(hdev);
1466 conn = hci_conn_hash_lookup_handle(hdev, handle);
1467 hci_dev_unlock(hdev);
1469 if (conn) {
1470 register struct hci_proto *hp;
1472 /* Send to upper protocol */
1473 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1474 hp->recv_scodata(conn, skb);
1475 return;
1477 } else {
1478 BT_ERR("%s SCO packet for unknown connection handle %d",
1479 hdev->name, handle);
1482 kfree_skb(skb);
1485 static void hci_rx_task(unsigned long arg)
1487 struct hci_dev *hdev = (struct hci_dev *) arg;
1488 struct sk_buff *skb;
1490 BT_DBG("%s", hdev->name);
1492 read_lock(&hci_task_lock);
1494 while ((skb = skb_dequeue(&hdev->rx_q))) {
1495 if (atomic_read(&hdev->promisc)) {
1496 /* Send copy to the sockets */
1497 hci_send_to_sock(hdev, skb);
1500 if (test_bit(HCI_RAW, &hdev->flags)) {
1501 kfree_skb(skb);
1502 continue;
1505 if (test_bit(HCI_INIT, &hdev->flags)) {
1506 /* Don't process data packets in this states. */
1507 switch (bt_cb(skb)->pkt_type) {
1508 case HCI_ACLDATA_PKT:
1509 case HCI_SCODATA_PKT:
1510 kfree_skb(skb);
1511 continue;
1515 /* Process frame */
1516 switch (bt_cb(skb)->pkt_type) {
1517 case HCI_EVENT_PKT:
1518 hci_event_packet(hdev, skb);
1519 break;
1521 case HCI_ACLDATA_PKT:
1522 BT_DBG("%s ACL data packet", hdev->name);
1523 hci_acldata_packet(hdev, skb);
1524 break;
1526 case HCI_SCODATA_PKT:
1527 BT_DBG("%s SCO data packet", hdev->name);
1528 hci_scodata_packet(hdev, skb);
1529 break;
1531 default:
1532 kfree_skb(skb);
1533 break;
1537 read_unlock(&hci_task_lock);
1540 static void hci_cmd_task(unsigned long arg)
1542 struct hci_dev *hdev = (struct hci_dev *) arg;
1543 struct sk_buff *skb;
1545 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1547 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1548 BT_ERR("%s command tx timeout", hdev->name);
1549 atomic_set(&hdev->cmd_cnt, 1);
1552 /* Send queued commands */
1553 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1554 if (hdev->sent_cmd)
1555 kfree_skb(hdev->sent_cmd);
1557 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1558 atomic_dec(&hdev->cmd_cnt);
1559 hci_send_frame(skb);
1560 hdev->cmd_last_tx = jiffies;
1561 } else {
1562 skb_queue_head(&hdev->cmd_q, skb);
1563 hci_sched_cmd(hdev);