perf_counter: powerpc: Change how processor-specific back-ends get selected
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / hci_core.c
blob406ad07cdea1d21de0642ead005881c96e3d0716
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
42 #include <linux/rfkill.h>
43 #include <net/sock.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
52 static void hci_cmd_task(unsigned long arg);
53 static void hci_rx_task(unsigned long arg);
54 static void hci_tx_task(unsigned long arg);
55 static void hci_notify(struct hci_dev *hdev, int event);
57 static DEFINE_RWLOCK(hci_task_lock);
59 /* HCI device list */
60 LIST_HEAD(hci_dev_list);
61 DEFINE_RWLOCK(hci_dev_list_lock);
63 /* HCI callback list */
64 LIST_HEAD(hci_cb_list);
65 DEFINE_RWLOCK(hci_cb_list_lock);
67 /* HCI protocols */
68 #define HCI_MAX_PROTO 2
69 struct hci_proto *hci_proto[HCI_MAX_PROTO];
71 /* HCI notifiers list */
72 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74 /* ---- HCI notifications ---- */
76 int hci_register_notifier(struct notifier_block *nb)
78 return atomic_notifier_chain_register(&hci_notifier, nb);
81 int hci_unregister_notifier(struct notifier_block *nb)
83 return atomic_notifier_chain_unregister(&hci_notifier, nb);
86 static void hci_notify(struct hci_dev *hdev, int event)
88 atomic_notifier_call_chain(&hci_notifier, event, hdev);
91 /* ---- HCI requests ---- */
93 void hci_req_complete(struct hci_dev *hdev, int result)
95 BT_DBG("%s result 0x%2.2x", hdev->name, result);
97 if (hdev->req_status == HCI_REQ_PEND) {
98 hdev->req_result = result;
99 hdev->req_status = HCI_REQ_DONE;
100 wake_up_interruptible(&hdev->req_wait_q);
104 static void hci_req_cancel(struct hci_dev *hdev, int err)
106 BT_DBG("%s err 0x%2.2x", hdev->name, err);
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = err;
110 hdev->req_status = HCI_REQ_CANCELED;
111 wake_up_interruptible(&hdev->req_wait_q);
115 /* Execute request and wait for completion. */
116 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
117 unsigned long opt, __u32 timeout)
119 DECLARE_WAITQUEUE(wait, current);
120 int err = 0;
122 BT_DBG("%s start", hdev->name);
124 hdev->req_status = HCI_REQ_PEND;
126 add_wait_queue(&hdev->req_wait_q, &wait);
127 set_current_state(TASK_INTERRUPTIBLE);
129 req(hdev, opt);
130 schedule_timeout(timeout);
132 remove_wait_queue(&hdev->req_wait_q, &wait);
134 if (signal_pending(current))
135 return -EINTR;
137 switch (hdev->req_status) {
138 case HCI_REQ_DONE:
139 err = -bt_err(hdev->req_result);
140 break;
142 case HCI_REQ_CANCELED:
143 err = -hdev->req_result;
144 break;
146 default:
147 err = -ETIMEDOUT;
148 break;
151 hdev->req_status = hdev->req_result = 0;
153 BT_DBG("%s end: err %d", hdev->name, err);
155 return err;
158 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
159 unsigned long opt, __u32 timeout)
161 int ret;
163 if (!test_bit(HCI_UP, &hdev->flags))
164 return -ENETDOWN;
166 /* Serialize all requests */
167 hci_req_lock(hdev);
168 ret = __hci_request(hdev, req, opt, timeout);
169 hci_req_unlock(hdev);
171 return ret;
174 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176 BT_DBG("%s %ld", hdev->name, opt);
178 /* Reset device */
179 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
182 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184 struct sk_buff *skb;
185 __le16 param;
186 __u8 flt_type;
188 BT_DBG("%s %ld", hdev->name, opt);
190 /* Driver initialization */
192 /* Special commands */
193 while ((skb = skb_dequeue(&hdev->driver_init))) {
194 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
195 skb->dev = (void *) hdev;
196 skb_queue_tail(&hdev->cmd_q, skb);
197 hci_sched_cmd(hdev);
199 skb_queue_purge(&hdev->driver_init);
201 /* Mandatory initialization */
203 /* Reset */
204 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
205 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
207 /* Read Local Supported Features */
208 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
210 /* Read Local Version */
211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
213 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
214 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
216 #if 0
217 /* Host buffer size */
219 struct hci_cp_host_buffer_size cp;
220 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
221 cp.sco_mtu = HCI_MAX_SCO_SIZE;
222 cp.acl_max_pkt = cpu_to_le16(0xffff);
223 cp.sco_max_pkt = cpu_to_le16(0xffff);
224 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
226 #endif
228 /* Read BD Address */
229 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
231 /* Read Class of Device */
232 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
234 /* Read Local Name */
235 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
237 /* Read Voice Setting */
238 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
240 /* Optional initialization */
242 /* Clear Event Filters */
243 flt_type = HCI_FLT_CLEAR_ALL;
244 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
246 /* Page timeout ~20 secs */
247 param = cpu_to_le16(0x8000);
248 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
250 /* Connection accept timeout ~20 secs */
251 param = cpu_to_le16(0x7d00);
252 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
255 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
257 __u8 scan = opt;
259 BT_DBG("%s %x", hdev->name, scan);
261 /* Inquiry and Page scans */
262 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
265 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
267 __u8 auth = opt;
269 BT_DBG("%s %x", hdev->name, auth);
271 /* Authentication */
272 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
275 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
277 __u8 encrypt = opt;
279 BT_DBG("%s %x", hdev->name, encrypt);
281 /* Encryption */
282 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
285 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
287 __le16 policy = cpu_to_le16(opt);
289 BT_DBG("%s %x", hdev->name, policy);
291 /* Default link policy */
292 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
295 /* Get HCI device by index.
296 * Device is held on return. */
297 struct hci_dev *hci_dev_get(int index)
299 struct hci_dev *hdev = NULL;
300 struct list_head *p;
302 BT_DBG("%d", index);
304 if (index < 0)
305 return NULL;
307 read_lock(&hci_dev_list_lock);
308 list_for_each(p, &hci_dev_list) {
309 struct hci_dev *d = list_entry(p, struct hci_dev, list);
310 if (d->id == index) {
311 hdev = hci_dev_hold(d);
312 break;
315 read_unlock(&hci_dev_list_lock);
316 return hdev;
319 /* ---- Inquiry support ---- */
320 static void inquiry_cache_flush(struct hci_dev *hdev)
322 struct inquiry_cache *cache = &hdev->inq_cache;
323 struct inquiry_entry *next = cache->list, *e;
325 BT_DBG("cache %p", cache);
327 cache->list = NULL;
328 while ((e = next)) {
329 next = e->next;
330 kfree(e);
334 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
336 struct inquiry_cache *cache = &hdev->inq_cache;
337 struct inquiry_entry *e;
339 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
341 for (e = cache->list; e; e = e->next)
342 if (!bacmp(&e->data.bdaddr, bdaddr))
343 break;
344 return e;
347 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
349 struct inquiry_cache *cache = &hdev->inq_cache;
350 struct inquiry_entry *e;
352 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
354 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
355 /* Entry not in the cache. Add new one. */
356 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
357 return;
358 e->next = cache->list;
359 cache->list = e;
362 memcpy(&e->data, data, sizeof(*data));
363 e->timestamp = jiffies;
364 cache->timestamp = jiffies;
367 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
369 struct inquiry_cache *cache = &hdev->inq_cache;
370 struct inquiry_info *info = (struct inquiry_info *) buf;
371 struct inquiry_entry *e;
372 int copied = 0;
374 for (e = cache->list; e && copied < num; e = e->next, copied++) {
375 struct inquiry_data *data = &e->data;
376 bacpy(&info->bdaddr, &data->bdaddr);
377 info->pscan_rep_mode = data->pscan_rep_mode;
378 info->pscan_period_mode = data->pscan_period_mode;
379 info->pscan_mode = data->pscan_mode;
380 memcpy(info->dev_class, data->dev_class, 3);
381 info->clock_offset = data->clock_offset;
382 info++;
385 BT_DBG("cache %p, copied %d", cache, copied);
386 return copied;
389 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
391 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
392 struct hci_cp_inquiry cp;
394 BT_DBG("%s", hdev->name);
396 if (test_bit(HCI_INQUIRY, &hdev->flags))
397 return;
399 /* Start Inquiry */
400 memcpy(&cp.lap, &ir->lap, 3);
401 cp.length = ir->length;
402 cp.num_rsp = ir->num_rsp;
403 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
406 int hci_inquiry(void __user *arg)
408 __u8 __user *ptr = arg;
409 struct hci_inquiry_req ir;
410 struct hci_dev *hdev;
411 int err = 0, do_inquiry = 0, max_rsp;
412 long timeo;
413 __u8 *buf;
415 if (copy_from_user(&ir, ptr, sizeof(ir)))
416 return -EFAULT;
418 if (!(hdev = hci_dev_get(ir.dev_id)))
419 return -ENODEV;
421 hci_dev_lock_bh(hdev);
422 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
423 inquiry_cache_empty(hdev) ||
424 ir.flags & IREQ_CACHE_FLUSH) {
425 inquiry_cache_flush(hdev);
426 do_inquiry = 1;
428 hci_dev_unlock_bh(hdev);
430 timeo = ir.length * msecs_to_jiffies(2000);
431 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
432 goto done;
434 /* for unlimited number of responses we will use buffer with 255 entries */
435 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
437 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
438 * copy it to the user space.
440 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
441 err = -ENOMEM;
442 goto done;
445 hci_dev_lock_bh(hdev);
446 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
447 hci_dev_unlock_bh(hdev);
449 BT_DBG("num_rsp %d", ir.num_rsp);
451 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
452 ptr += sizeof(ir);
453 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
454 ir.num_rsp))
455 err = -EFAULT;
456 } else
457 err = -EFAULT;
459 kfree(buf);
461 done:
462 hci_dev_put(hdev);
463 return err;
466 /* ---- HCI ioctl helpers ---- */
468 int hci_dev_open(__u16 dev)
470 struct hci_dev *hdev;
471 int ret = 0;
473 if (!(hdev = hci_dev_get(dev)))
474 return -ENODEV;
476 BT_DBG("%s %p", hdev->name, hdev);
478 hci_req_lock(hdev);
480 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
481 ret = -ERFKILL;
482 goto done;
485 if (test_bit(HCI_UP, &hdev->flags)) {
486 ret = -EALREADY;
487 goto done;
490 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
491 set_bit(HCI_RAW, &hdev->flags);
493 if (hdev->open(hdev)) {
494 ret = -EIO;
495 goto done;
498 if (!test_bit(HCI_RAW, &hdev->flags)) {
499 atomic_set(&hdev->cmd_cnt, 1);
500 set_bit(HCI_INIT, &hdev->flags);
502 //__hci_request(hdev, hci_reset_req, 0, HZ);
503 ret = __hci_request(hdev, hci_init_req, 0,
504 msecs_to_jiffies(HCI_INIT_TIMEOUT));
506 clear_bit(HCI_INIT, &hdev->flags);
509 if (!ret) {
510 hci_dev_hold(hdev);
511 set_bit(HCI_UP, &hdev->flags);
512 hci_notify(hdev, HCI_DEV_UP);
513 } else {
514 /* Init failed, cleanup */
515 tasklet_kill(&hdev->rx_task);
516 tasklet_kill(&hdev->tx_task);
517 tasklet_kill(&hdev->cmd_task);
519 skb_queue_purge(&hdev->cmd_q);
520 skb_queue_purge(&hdev->rx_q);
522 if (hdev->flush)
523 hdev->flush(hdev);
525 if (hdev->sent_cmd) {
526 kfree_skb(hdev->sent_cmd);
527 hdev->sent_cmd = NULL;
530 hdev->close(hdev);
531 hdev->flags = 0;
534 done:
535 hci_req_unlock(hdev);
536 hci_dev_put(hdev);
537 return ret;
540 static int hci_dev_do_close(struct hci_dev *hdev)
542 BT_DBG("%s %p", hdev->name, hdev);
544 hci_req_cancel(hdev, ENODEV);
545 hci_req_lock(hdev);
547 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
548 hci_req_unlock(hdev);
549 return 0;
552 /* Kill RX and TX tasks */
553 tasklet_kill(&hdev->rx_task);
554 tasklet_kill(&hdev->tx_task);
556 hci_dev_lock_bh(hdev);
557 inquiry_cache_flush(hdev);
558 hci_conn_hash_flush(hdev);
559 hci_dev_unlock_bh(hdev);
561 hci_notify(hdev, HCI_DEV_DOWN);
563 if (hdev->flush)
564 hdev->flush(hdev);
566 /* Reset device */
567 skb_queue_purge(&hdev->cmd_q);
568 atomic_set(&hdev->cmd_cnt, 1);
569 if (!test_bit(HCI_RAW, &hdev->flags)) {
570 set_bit(HCI_INIT, &hdev->flags);
571 __hci_request(hdev, hci_reset_req, 0,
572 msecs_to_jiffies(250));
573 clear_bit(HCI_INIT, &hdev->flags);
576 /* Kill cmd task */
577 tasklet_kill(&hdev->cmd_task);
579 /* Drop queues */
580 skb_queue_purge(&hdev->rx_q);
581 skb_queue_purge(&hdev->cmd_q);
582 skb_queue_purge(&hdev->raw_q);
584 /* Drop last sent command */
585 if (hdev->sent_cmd) {
586 kfree_skb(hdev->sent_cmd);
587 hdev->sent_cmd = NULL;
590 /* After this point our queues are empty
591 * and no tasks are scheduled. */
592 hdev->close(hdev);
594 /* Clear flags */
595 hdev->flags = 0;
597 hci_req_unlock(hdev);
599 hci_dev_put(hdev);
600 return 0;
603 int hci_dev_close(__u16 dev)
605 struct hci_dev *hdev;
606 int err;
608 if (!(hdev = hci_dev_get(dev)))
609 return -ENODEV;
610 err = hci_dev_do_close(hdev);
611 hci_dev_put(hdev);
612 return err;
615 int hci_dev_reset(__u16 dev)
617 struct hci_dev *hdev;
618 int ret = 0;
620 if (!(hdev = hci_dev_get(dev)))
621 return -ENODEV;
623 hci_req_lock(hdev);
624 tasklet_disable(&hdev->tx_task);
626 if (!test_bit(HCI_UP, &hdev->flags))
627 goto done;
629 /* Drop queues */
630 skb_queue_purge(&hdev->rx_q);
631 skb_queue_purge(&hdev->cmd_q);
633 hci_dev_lock_bh(hdev);
634 inquiry_cache_flush(hdev);
635 hci_conn_hash_flush(hdev);
636 hci_dev_unlock_bh(hdev);
638 if (hdev->flush)
639 hdev->flush(hdev);
641 atomic_set(&hdev->cmd_cnt, 1);
642 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
644 if (!test_bit(HCI_RAW, &hdev->flags))
645 ret = __hci_request(hdev, hci_reset_req, 0,
646 msecs_to_jiffies(HCI_INIT_TIMEOUT));
648 done:
649 tasklet_enable(&hdev->tx_task);
650 hci_req_unlock(hdev);
651 hci_dev_put(hdev);
652 return ret;
655 int hci_dev_reset_stat(__u16 dev)
657 struct hci_dev *hdev;
658 int ret = 0;
660 if (!(hdev = hci_dev_get(dev)))
661 return -ENODEV;
663 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
665 hci_dev_put(hdev);
667 return ret;
670 int hci_dev_cmd(unsigned int cmd, void __user *arg)
672 struct hci_dev *hdev;
673 struct hci_dev_req dr;
674 int err = 0;
676 if (copy_from_user(&dr, arg, sizeof(dr)))
677 return -EFAULT;
679 if (!(hdev = hci_dev_get(dr.dev_id)))
680 return -ENODEV;
682 switch (cmd) {
683 case HCISETAUTH:
684 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
685 msecs_to_jiffies(HCI_INIT_TIMEOUT));
686 break;
688 case HCISETENCRYPT:
689 if (!lmp_encrypt_capable(hdev)) {
690 err = -EOPNOTSUPP;
691 break;
694 if (!test_bit(HCI_AUTH, &hdev->flags)) {
695 /* Auth must be enabled first */
696 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
697 msecs_to_jiffies(HCI_INIT_TIMEOUT));
698 if (err)
699 break;
702 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
703 msecs_to_jiffies(HCI_INIT_TIMEOUT));
704 break;
706 case HCISETSCAN:
707 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
708 msecs_to_jiffies(HCI_INIT_TIMEOUT));
709 break;
711 case HCISETLINKPOL:
712 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
713 msecs_to_jiffies(HCI_INIT_TIMEOUT));
714 break;
716 case HCISETLINKMODE:
717 hdev->link_mode = ((__u16) dr.dev_opt) &
718 (HCI_LM_MASTER | HCI_LM_ACCEPT);
719 break;
721 case HCISETPTYPE:
722 hdev->pkt_type = (__u16) dr.dev_opt;
723 break;
725 case HCISETACLMTU:
726 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
727 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
728 break;
730 case HCISETSCOMTU:
731 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
732 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
733 break;
735 default:
736 err = -EINVAL;
737 break;
740 hci_dev_put(hdev);
741 return err;
744 int hci_get_dev_list(void __user *arg)
746 struct hci_dev_list_req *dl;
747 struct hci_dev_req *dr;
748 struct list_head *p;
749 int n = 0, size, err;
750 __u16 dev_num;
752 if (get_user(dev_num, (__u16 __user *) arg))
753 return -EFAULT;
755 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
756 return -EINVAL;
758 size = sizeof(*dl) + dev_num * sizeof(*dr);
760 if (!(dl = kzalloc(size, GFP_KERNEL)))
761 return -ENOMEM;
763 dr = dl->dev_req;
765 read_lock_bh(&hci_dev_list_lock);
766 list_for_each(p, &hci_dev_list) {
767 struct hci_dev *hdev;
768 hdev = list_entry(p, struct hci_dev, list);
769 (dr + n)->dev_id = hdev->id;
770 (dr + n)->dev_opt = hdev->flags;
771 if (++n >= dev_num)
772 break;
774 read_unlock_bh(&hci_dev_list_lock);
776 dl->dev_num = n;
777 size = sizeof(*dl) + n * sizeof(*dr);
779 err = copy_to_user(arg, dl, size);
780 kfree(dl);
782 return err ? -EFAULT : 0;
785 int hci_get_dev_info(void __user *arg)
787 struct hci_dev *hdev;
788 struct hci_dev_info di;
789 int err = 0;
791 if (copy_from_user(&di, arg, sizeof(di)))
792 return -EFAULT;
794 if (!(hdev = hci_dev_get(di.dev_id)))
795 return -ENODEV;
797 strcpy(di.name, hdev->name);
798 di.bdaddr = hdev->bdaddr;
799 di.type = hdev->type;
800 di.flags = hdev->flags;
801 di.pkt_type = hdev->pkt_type;
802 di.acl_mtu = hdev->acl_mtu;
803 di.acl_pkts = hdev->acl_pkts;
804 di.sco_mtu = hdev->sco_mtu;
805 di.sco_pkts = hdev->sco_pkts;
806 di.link_policy = hdev->link_policy;
807 di.link_mode = hdev->link_mode;
809 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
810 memcpy(&di.features, &hdev->features, sizeof(di.features));
812 if (copy_to_user(arg, &di, sizeof(di)))
813 err = -EFAULT;
815 hci_dev_put(hdev);
817 return err;
820 /* ---- Interface to HCI drivers ---- */
822 static int hci_rfkill_set_block(void *data, bool blocked)
824 struct hci_dev *hdev = data;
826 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
828 if (!blocked)
829 return 0;
831 hci_dev_do_close(hdev);
833 return 0;
836 static const struct rfkill_ops hci_rfkill_ops = {
837 .set_block = hci_rfkill_set_block,
840 /* Alloc HCI device */
841 struct hci_dev *hci_alloc_dev(void)
843 struct hci_dev *hdev;
845 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
846 if (!hdev)
847 return NULL;
849 skb_queue_head_init(&hdev->driver_init);
851 return hdev;
853 EXPORT_SYMBOL(hci_alloc_dev);
855 /* Free HCI device */
856 void hci_free_dev(struct hci_dev *hdev)
858 skb_queue_purge(&hdev->driver_init);
860 /* will free via device release */
861 put_device(&hdev->dev);
863 EXPORT_SYMBOL(hci_free_dev);
865 /* Register HCI device */
866 int hci_register_dev(struct hci_dev *hdev)
868 struct list_head *head = &hci_dev_list, *p;
869 int i, id = 0;
871 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name,
872 hdev->type, hdev->owner);
874 if (!hdev->open || !hdev->close || !hdev->destruct)
875 return -EINVAL;
877 write_lock_bh(&hci_dev_list_lock);
879 /* Find first available device id */
880 list_for_each(p, &hci_dev_list) {
881 if (list_entry(p, struct hci_dev, list)->id != id)
882 break;
883 head = p; id++;
886 sprintf(hdev->name, "hci%d", id);
887 hdev->id = id;
888 list_add(&hdev->list, head);
890 atomic_set(&hdev->refcnt, 1);
891 spin_lock_init(&hdev->lock);
893 hdev->flags = 0;
894 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
895 hdev->esco_type = (ESCO_HV1);
896 hdev->link_mode = (HCI_LM_ACCEPT);
898 hdev->idle_timeout = 0;
899 hdev->sniff_max_interval = 800;
900 hdev->sniff_min_interval = 80;
902 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
903 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
904 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
906 skb_queue_head_init(&hdev->rx_q);
907 skb_queue_head_init(&hdev->cmd_q);
908 skb_queue_head_init(&hdev->raw_q);
910 for (i = 0; i < 3; i++)
911 hdev->reassembly[i] = NULL;
913 init_waitqueue_head(&hdev->req_wait_q);
914 init_MUTEX(&hdev->req_lock);
916 inquiry_cache_init(hdev);
918 hci_conn_hash_init(hdev);
920 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
922 atomic_set(&hdev->promisc, 0);
924 write_unlock_bh(&hci_dev_list_lock);
926 hci_register_sysfs(hdev);
928 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
929 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
930 if (hdev->rfkill) {
931 if (rfkill_register(hdev->rfkill) < 0) {
932 rfkill_destroy(hdev->rfkill);
933 hdev->rfkill = NULL;
937 hci_notify(hdev, HCI_DEV_REG);
939 return id;
941 EXPORT_SYMBOL(hci_register_dev);
943 /* Unregister HCI device */
944 int hci_unregister_dev(struct hci_dev *hdev)
946 int i;
948 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
950 write_lock_bh(&hci_dev_list_lock);
951 list_del(&hdev->list);
952 write_unlock_bh(&hci_dev_list_lock);
954 hci_dev_do_close(hdev);
956 for (i = 0; i < 3; i++)
957 kfree_skb(hdev->reassembly[i]);
959 hci_notify(hdev, HCI_DEV_UNREG);
961 if (hdev->rfkill) {
962 rfkill_unregister(hdev->rfkill);
963 rfkill_destroy(hdev->rfkill);
966 hci_unregister_sysfs(hdev);
968 __hci_dev_put(hdev);
970 return 0;
972 EXPORT_SYMBOL(hci_unregister_dev);
974 /* Suspend HCI device */
975 int hci_suspend_dev(struct hci_dev *hdev)
977 hci_notify(hdev, HCI_DEV_SUSPEND);
978 return 0;
980 EXPORT_SYMBOL(hci_suspend_dev);
982 /* Resume HCI device */
983 int hci_resume_dev(struct hci_dev *hdev)
985 hci_notify(hdev, HCI_DEV_RESUME);
986 return 0;
988 EXPORT_SYMBOL(hci_resume_dev);
990 /* Receive packet type fragment */
991 #define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
993 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
995 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
996 return -EILSEQ;
998 while (count) {
999 struct sk_buff *skb = __reassembly(hdev, type);
1000 struct { int expect; } *scb;
1001 int len = 0;
1003 if (!skb) {
1004 /* Start of the frame */
1006 switch (type) {
1007 case HCI_EVENT_PKT:
1008 if (count >= HCI_EVENT_HDR_SIZE) {
1009 struct hci_event_hdr *h = data;
1010 len = HCI_EVENT_HDR_SIZE + h->plen;
1011 } else
1012 return -EILSEQ;
1013 break;
1015 case HCI_ACLDATA_PKT:
1016 if (count >= HCI_ACL_HDR_SIZE) {
1017 struct hci_acl_hdr *h = data;
1018 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
1019 } else
1020 return -EILSEQ;
1021 break;
1023 case HCI_SCODATA_PKT:
1024 if (count >= HCI_SCO_HDR_SIZE) {
1025 struct hci_sco_hdr *h = data;
1026 len = HCI_SCO_HDR_SIZE + h->dlen;
1027 } else
1028 return -EILSEQ;
1029 break;
1032 skb = bt_skb_alloc(len, GFP_ATOMIC);
1033 if (!skb) {
1034 BT_ERR("%s no memory for packet", hdev->name);
1035 return -ENOMEM;
1038 skb->dev = (void *) hdev;
1039 bt_cb(skb)->pkt_type = type;
1041 __reassembly(hdev, type) = skb;
1043 scb = (void *) skb->cb;
1044 scb->expect = len;
1045 } else {
1046 /* Continuation */
1048 scb = (void *) skb->cb;
1049 len = scb->expect;
1052 len = min(len, count);
1054 memcpy(skb_put(skb, len), data, len);
1056 scb->expect -= len;
1058 if (scb->expect == 0) {
1059 /* Complete frame */
1061 __reassembly(hdev, type) = NULL;
1063 bt_cb(skb)->pkt_type = type;
1064 hci_recv_frame(skb);
1067 count -= len; data += len;
1070 return 0;
1072 EXPORT_SYMBOL(hci_recv_fragment);
1074 /* ---- Interface to upper protocols ---- */
1076 /* Register/Unregister protocols.
1077 * hci_task_lock is used to ensure that no tasks are running. */
1078 int hci_register_proto(struct hci_proto *hp)
1080 int err = 0;
1082 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1084 if (hp->id >= HCI_MAX_PROTO)
1085 return -EINVAL;
1087 write_lock_bh(&hci_task_lock);
1089 if (!hci_proto[hp->id])
1090 hci_proto[hp->id] = hp;
1091 else
1092 err = -EEXIST;
1094 write_unlock_bh(&hci_task_lock);
1096 return err;
1098 EXPORT_SYMBOL(hci_register_proto);
1100 int hci_unregister_proto(struct hci_proto *hp)
1102 int err = 0;
1104 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1106 if (hp->id >= HCI_MAX_PROTO)
1107 return -EINVAL;
1109 write_lock_bh(&hci_task_lock);
1111 if (hci_proto[hp->id])
1112 hci_proto[hp->id] = NULL;
1113 else
1114 err = -ENOENT;
1116 write_unlock_bh(&hci_task_lock);
1118 return err;
1120 EXPORT_SYMBOL(hci_unregister_proto);
1122 int hci_register_cb(struct hci_cb *cb)
1124 BT_DBG("%p name %s", cb, cb->name);
1126 write_lock_bh(&hci_cb_list_lock);
1127 list_add(&cb->list, &hci_cb_list);
1128 write_unlock_bh(&hci_cb_list_lock);
1130 return 0;
1132 EXPORT_SYMBOL(hci_register_cb);
1134 int hci_unregister_cb(struct hci_cb *cb)
1136 BT_DBG("%p name %s", cb, cb->name);
1138 write_lock_bh(&hci_cb_list_lock);
1139 list_del(&cb->list);
1140 write_unlock_bh(&hci_cb_list_lock);
1142 return 0;
1144 EXPORT_SYMBOL(hci_unregister_cb);
1146 static int hci_send_frame(struct sk_buff *skb)
1148 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1150 if (!hdev) {
1151 kfree_skb(skb);
1152 return -ENODEV;
1155 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1157 if (atomic_read(&hdev->promisc)) {
1158 /* Time stamp */
1159 __net_timestamp(skb);
1161 hci_send_to_sock(hdev, skb);
1164 /* Get rid of skb owner, prior to sending to the driver. */
1165 skb_orphan(skb);
1167 return hdev->send(skb);
1170 /* Send HCI command */
1171 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1173 int len = HCI_COMMAND_HDR_SIZE + plen;
1174 struct hci_command_hdr *hdr;
1175 struct sk_buff *skb;
1177 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1179 skb = bt_skb_alloc(len, GFP_ATOMIC);
1180 if (!skb) {
1181 BT_ERR("%s no memory for command", hdev->name);
1182 return -ENOMEM;
1185 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1186 hdr->opcode = cpu_to_le16(opcode);
1187 hdr->plen = plen;
1189 if (plen)
1190 memcpy(skb_put(skb, plen), param, plen);
1192 BT_DBG("skb len %d", skb->len);
1194 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1195 skb->dev = (void *) hdev;
1196 skb_queue_tail(&hdev->cmd_q, skb);
1197 hci_sched_cmd(hdev);
1199 return 0;
1202 /* Get data from the previously sent command */
1203 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1205 struct hci_command_hdr *hdr;
1207 if (!hdev->sent_cmd)
1208 return NULL;
1210 hdr = (void *) hdev->sent_cmd->data;
1212 if (hdr->opcode != cpu_to_le16(opcode))
1213 return NULL;
1215 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1217 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1220 /* Send ACL data */
1221 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1223 struct hci_acl_hdr *hdr;
1224 int len = skb->len;
1226 skb_push(skb, HCI_ACL_HDR_SIZE);
1227 skb_reset_transport_header(skb);
1228 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1229 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1230 hdr->dlen = cpu_to_le16(len);
1233 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1235 struct hci_dev *hdev = conn->hdev;
1236 struct sk_buff *list;
1238 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1240 skb->dev = (void *) hdev;
1241 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1242 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1244 if (!(list = skb_shinfo(skb)->frag_list)) {
1245 /* Non fragmented */
1246 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1248 skb_queue_tail(&conn->data_q, skb);
1249 } else {
1250 /* Fragmented */
1251 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1253 skb_shinfo(skb)->frag_list = NULL;
1255 /* Queue all fragments atomically */
1256 spin_lock_bh(&conn->data_q.lock);
1258 __skb_queue_tail(&conn->data_q, skb);
1259 do {
1260 skb = list; list = list->next;
1262 skb->dev = (void *) hdev;
1263 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1264 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1266 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1268 __skb_queue_tail(&conn->data_q, skb);
1269 } while (list);
1271 spin_unlock_bh(&conn->data_q.lock);
1274 hci_sched_tx(hdev);
1275 return 0;
1277 EXPORT_SYMBOL(hci_send_acl);
1279 /* Send SCO data */
1280 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1282 struct hci_dev *hdev = conn->hdev;
1283 struct hci_sco_hdr hdr;
1285 BT_DBG("%s len %d", hdev->name, skb->len);
1287 if (skb->len > hdev->sco_mtu) {
1288 kfree_skb(skb);
1289 return -EINVAL;
1292 hdr.handle = cpu_to_le16(conn->handle);
1293 hdr.dlen = skb->len;
1295 skb_push(skb, HCI_SCO_HDR_SIZE);
1296 skb_reset_transport_header(skb);
1297 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1299 skb->dev = (void *) hdev;
1300 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1301 skb_queue_tail(&conn->data_q, skb);
1302 hci_sched_tx(hdev);
1303 return 0;
1305 EXPORT_SYMBOL(hci_send_sco);
1307 /* ---- HCI TX task (outgoing data) ---- */
1309 /* HCI Connection scheduler */
1310 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1312 struct hci_conn_hash *h = &hdev->conn_hash;
1313 struct hci_conn *conn = NULL;
1314 int num = 0, min = ~0;
1315 struct list_head *p;
1317 /* We don't have to lock device here. Connections are always
1318 * added and removed with TX task disabled. */
1319 list_for_each(p, &h->list) {
1320 struct hci_conn *c;
1321 c = list_entry(p, struct hci_conn, list);
1323 if (c->type != type || skb_queue_empty(&c->data_q))
1324 continue;
1326 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1327 continue;
1329 num++;
1331 if (c->sent < min) {
1332 min = c->sent;
1333 conn = c;
1337 if (conn) {
1338 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1339 int q = cnt / num;
1340 *quote = q ? q : 1;
1341 } else
1342 *quote = 0;
1344 BT_DBG("conn %p quote %d", conn, *quote);
1345 return conn;
1348 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1350 struct hci_conn_hash *h = &hdev->conn_hash;
1351 struct list_head *p;
1352 struct hci_conn *c;
1354 BT_ERR("%s ACL tx timeout", hdev->name);
1356 /* Kill stalled connections */
1357 list_for_each(p, &h->list) {
1358 c = list_entry(p, struct hci_conn, list);
1359 if (c->type == ACL_LINK && c->sent) {
1360 BT_ERR("%s killing stalled ACL connection %s",
1361 hdev->name, batostr(&c->dst));
1362 hci_acl_disconn(c, 0x13);
1367 static inline void hci_sched_acl(struct hci_dev *hdev)
1369 struct hci_conn *conn;
1370 struct sk_buff *skb;
1371 int quote;
1373 BT_DBG("%s", hdev->name);
1375 if (!test_bit(HCI_RAW, &hdev->flags)) {
1376 /* ACL tx timeout must be longer than maximum
1377 * link supervision timeout (40.9 seconds) */
1378 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1379 hci_acl_tx_to(hdev);
1382 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1383 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1384 BT_DBG("skb %p len %d", skb, skb->len);
1386 hci_conn_enter_active_mode(conn);
1388 hci_send_frame(skb);
1389 hdev->acl_last_tx = jiffies;
1391 hdev->acl_cnt--;
1392 conn->sent++;
1397 /* Schedule SCO */
1398 static inline void hci_sched_sco(struct hci_dev *hdev)
1400 struct hci_conn *conn;
1401 struct sk_buff *skb;
1402 int quote;
1404 BT_DBG("%s", hdev->name);
1406 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1407 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1408 BT_DBG("skb %p len %d", skb, skb->len);
1409 hci_send_frame(skb);
1411 conn->sent++;
1412 if (conn->sent == ~0)
1413 conn->sent = 0;
1418 static inline void hci_sched_esco(struct hci_dev *hdev)
1420 struct hci_conn *conn;
1421 struct sk_buff *skb;
1422 int quote;
1424 BT_DBG("%s", hdev->name);
1426 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1427 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1428 BT_DBG("skb %p len %d", skb, skb->len);
1429 hci_send_frame(skb);
1431 conn->sent++;
1432 if (conn->sent == ~0)
1433 conn->sent = 0;
1438 static void hci_tx_task(unsigned long arg)
1440 struct hci_dev *hdev = (struct hci_dev *) arg;
1441 struct sk_buff *skb;
1443 read_lock(&hci_task_lock);
1445 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1447 /* Schedule queues and send stuff to HCI driver */
1449 hci_sched_acl(hdev);
1451 hci_sched_sco(hdev);
1453 hci_sched_esco(hdev);
1455 /* Send next queued raw (unknown type) packet */
1456 while ((skb = skb_dequeue(&hdev->raw_q)))
1457 hci_send_frame(skb);
1459 read_unlock(&hci_task_lock);
1462 /* ----- HCI RX task (incoming data proccessing) ----- */
1464 /* ACL data packet */
1465 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1467 struct hci_acl_hdr *hdr = (void *) skb->data;
1468 struct hci_conn *conn;
1469 __u16 handle, flags;
1471 skb_pull(skb, HCI_ACL_HDR_SIZE);
1473 handle = __le16_to_cpu(hdr->handle);
1474 flags = hci_flags(handle);
1475 handle = hci_handle(handle);
1477 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1479 hdev->stat.acl_rx++;
1481 hci_dev_lock(hdev);
1482 conn = hci_conn_hash_lookup_handle(hdev, handle);
1483 hci_dev_unlock(hdev);
1485 if (conn) {
1486 register struct hci_proto *hp;
1488 hci_conn_enter_active_mode(conn);
1490 /* Send to upper protocol */
1491 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1492 hp->recv_acldata(conn, skb, flags);
1493 return;
1495 } else {
1496 BT_ERR("%s ACL packet for unknown connection handle %d",
1497 hdev->name, handle);
1500 kfree_skb(skb);
1503 /* SCO data packet */
1504 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1506 struct hci_sco_hdr *hdr = (void *) skb->data;
1507 struct hci_conn *conn;
1508 __u16 handle;
1510 skb_pull(skb, HCI_SCO_HDR_SIZE);
1512 handle = __le16_to_cpu(hdr->handle);
1514 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1516 hdev->stat.sco_rx++;
1518 hci_dev_lock(hdev);
1519 conn = hci_conn_hash_lookup_handle(hdev, handle);
1520 hci_dev_unlock(hdev);
1522 if (conn) {
1523 register struct hci_proto *hp;
1525 /* Send to upper protocol */
1526 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1527 hp->recv_scodata(conn, skb);
1528 return;
1530 } else {
1531 BT_ERR("%s SCO packet for unknown connection handle %d",
1532 hdev->name, handle);
1535 kfree_skb(skb);
1538 static void hci_rx_task(unsigned long arg)
1540 struct hci_dev *hdev = (struct hci_dev *) arg;
1541 struct sk_buff *skb;
1543 BT_DBG("%s", hdev->name);
1545 read_lock(&hci_task_lock);
1547 while ((skb = skb_dequeue(&hdev->rx_q))) {
1548 if (atomic_read(&hdev->promisc)) {
1549 /* Send copy to the sockets */
1550 hci_send_to_sock(hdev, skb);
1553 if (test_bit(HCI_RAW, &hdev->flags)) {
1554 kfree_skb(skb);
1555 continue;
1558 if (test_bit(HCI_INIT, &hdev->flags)) {
1559 /* Don't process data packets in this states. */
1560 switch (bt_cb(skb)->pkt_type) {
1561 case HCI_ACLDATA_PKT:
1562 case HCI_SCODATA_PKT:
1563 kfree_skb(skb);
1564 continue;
1568 /* Process frame */
1569 switch (bt_cb(skb)->pkt_type) {
1570 case HCI_EVENT_PKT:
1571 hci_event_packet(hdev, skb);
1572 break;
1574 case HCI_ACLDATA_PKT:
1575 BT_DBG("%s ACL data packet", hdev->name);
1576 hci_acldata_packet(hdev, skb);
1577 break;
1579 case HCI_SCODATA_PKT:
1580 BT_DBG("%s SCO data packet", hdev->name);
1581 hci_scodata_packet(hdev, skb);
1582 break;
1584 default:
1585 kfree_skb(skb);
1586 break;
1590 read_unlock(&hci_task_lock);
1593 static void hci_cmd_task(unsigned long arg)
1595 struct hci_dev *hdev = (struct hci_dev *) arg;
1596 struct sk_buff *skb;
1598 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1600 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1601 BT_ERR("%s command tx timeout", hdev->name);
1602 atomic_set(&hdev->cmd_cnt, 1);
1605 /* Send queued commands */
1606 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1607 kfree_skb(hdev->sent_cmd);
1609 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1610 atomic_dec(&hdev->cmd_cnt);
1611 hci_send_frame(skb);
1612 hdev->cmd_last_tx = jiffies;
1613 } else {
1614 skb_queue_head(&hdev->cmd_q, skb);
1615 hci_sched_cmd(hdev);