GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / net / bluetooth / hci_core.c
blob0be811f08c57371c580d946917407c6709c479dd
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <net/sock.h>
46 #include <asm/system.h>
47 #include <asm/uaccess.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
53 static void hci_cmd_task(unsigned long arg);
54 static void hci_rx_task(unsigned long arg);
55 static void hci_tx_task(unsigned long arg);
56 static void hci_notify(struct hci_dev *hdev, int event);
58 static DEFINE_RWLOCK(hci_task_lock);
60 /* HCI device list */
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
68 /* HCI protocols */
69 #define HCI_MAX_PROTO 2
70 struct hci_proto *hci_proto[HCI_MAX_PROTO];
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block *nb)
79 return atomic_notifier_chain_register(&hci_notifier, nb);
82 int hci_unregister_notifier(struct notifier_block *nb)
84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 static void hci_notify(struct hci_dev *hdev, int event)
89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev *hdev, int result)
96 BT_DBG("%s result 0x%2.2x", hdev->name, result);
98 if (hdev->req_status == HCI_REQ_PEND) {
99 hdev->req_result = result;
100 hdev->req_status = HCI_REQ_DONE;
101 wake_up_interruptible(&hdev->req_wait_q);
105 static void hci_req_cancel(struct hci_dev *hdev, int err)
107 BT_DBG("%s err 0x%2.2x", hdev->name, err);
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = err;
111 hdev->req_status = HCI_REQ_CANCELED;
112 wake_up_interruptible(&hdev->req_wait_q);
116 /* Execute request and wait for completion. */
117 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
118 unsigned long opt, __u32 timeout)
120 DECLARE_WAITQUEUE(wait, current);
121 int err = 0;
123 BT_DBG("%s start", hdev->name);
125 hdev->req_status = HCI_REQ_PEND;
127 add_wait_queue(&hdev->req_wait_q, &wait);
128 set_current_state(TASK_INTERRUPTIBLE);
130 req(hdev, opt);
131 schedule_timeout(timeout);
133 remove_wait_queue(&hdev->req_wait_q, &wait);
135 if (signal_pending(current))
136 return -EINTR;
138 switch (hdev->req_status) {
139 case HCI_REQ_DONE:
140 err = -bt_err(hdev->req_result);
141 break;
143 case HCI_REQ_CANCELED:
144 err = -hdev->req_result;
145 break;
147 default:
148 err = -ETIMEDOUT;
149 break;
152 hdev->req_status = hdev->req_result = 0;
154 BT_DBG("%s end: err %d", hdev->name, err);
156 return err;
159 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160 unsigned long opt, __u32 timeout)
162 int ret;
164 if (!test_bit(HCI_UP, &hdev->flags))
165 return -ENETDOWN;
167 /* Serialize all requests */
168 hci_req_lock(hdev);
169 ret = __hci_request(hdev, req, opt, timeout);
170 hci_req_unlock(hdev);
172 return ret;
175 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
177 BT_DBG("%s %ld", hdev->name, opt);
179 /* Reset device */
180 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
183 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
185 struct sk_buff *skb;
186 __le16 param;
187 __u8 flt_type;
189 BT_DBG("%s %ld", hdev->name, opt);
191 /* Driver initialization */
193 /* Special commands */
194 while ((skb = skb_dequeue(&hdev->driver_init))) {
195 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
196 skb->dev = (void *) hdev;
198 skb_queue_tail(&hdev->cmd_q, skb);
199 tasklet_schedule(&hdev->cmd_task);
201 skb_queue_purge(&hdev->driver_init);
203 /* Mandatory initialization */
205 /* Reset */
206 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
209 /* Read Local Supported Features */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
212 /* Read Local Version */
213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
219 /* Read BD Address */
220 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
222 /* Read Class of Device */
223 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
225 /* Read Local Name */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
228 /* Read Voice Setting */
229 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
231 /* Optional initialization */
233 /* Clear Event Filters */
234 flt_type = HCI_FLT_CLEAR_ALL;
235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
237 /* Page timeout ~20 secs */
238 param = cpu_to_le16(0x8000);
239 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
241 /* Connection accept timeout ~20 secs */
242 param = cpu_to_le16(0x7d00);
243 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
246 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
248 __u8 scan = opt;
250 BT_DBG("%s %x", hdev->name, scan);
252 /* Inquiry and Page scans */
253 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
256 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
258 __u8 auth = opt;
260 BT_DBG("%s %x", hdev->name, auth);
262 /* Authentication */
263 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
266 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
268 __u8 encrypt = opt;
270 BT_DBG("%s %x", hdev->name, encrypt);
272 /* Encryption */
273 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
276 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
278 __le16 policy = cpu_to_le16(opt);
280 BT_DBG("%s %x", hdev->name, policy);
282 /* Default link policy */
283 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
286 /* Get HCI device by index.
287 * Device is held on return. */
288 struct hci_dev *hci_dev_get(int index)
290 struct hci_dev *hdev = NULL;
291 struct list_head *p;
293 BT_DBG("%d", index);
295 if (index < 0)
296 return NULL;
298 read_lock(&hci_dev_list_lock);
299 list_for_each(p, &hci_dev_list) {
300 struct hci_dev *d = list_entry(p, struct hci_dev, list);
301 if (d->id == index) {
302 hdev = hci_dev_hold(d);
303 break;
306 read_unlock(&hci_dev_list_lock);
307 return hdev;
310 /* ---- Inquiry support ---- */
311 static void inquiry_cache_flush(struct hci_dev *hdev)
313 struct inquiry_cache *cache = &hdev->inq_cache;
314 struct inquiry_entry *next = cache->list, *e;
316 BT_DBG("cache %p", cache);
318 cache->list = NULL;
319 while ((e = next)) {
320 next = e->next;
321 kfree(e);
325 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
327 struct inquiry_cache *cache = &hdev->inq_cache;
328 struct inquiry_entry *e;
330 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
332 for (e = cache->list; e; e = e->next)
333 if (!bacmp(&e->data.bdaddr, bdaddr))
334 break;
335 return e;
338 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
340 struct inquiry_cache *cache = &hdev->inq_cache;
341 struct inquiry_entry *e;
343 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
345 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
346 /* Entry not in the cache. Add new one. */
347 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
348 return;
349 e->next = cache->list;
350 cache->list = e;
353 memcpy(&e->data, data, sizeof(*data));
354 e->timestamp = jiffies;
355 cache->timestamp = jiffies;
358 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
360 struct inquiry_cache *cache = &hdev->inq_cache;
361 struct inquiry_info *info = (struct inquiry_info *) buf;
362 struct inquiry_entry *e;
363 int copied = 0;
365 for (e = cache->list; e && copied < num; e = e->next, copied++) {
366 struct inquiry_data *data = &e->data;
367 bacpy(&info->bdaddr, &data->bdaddr);
368 info->pscan_rep_mode = data->pscan_rep_mode;
369 info->pscan_period_mode = data->pscan_period_mode;
370 info->pscan_mode = data->pscan_mode;
371 memcpy(info->dev_class, data->dev_class, 3);
372 info->clock_offset = data->clock_offset;
373 info++;
376 BT_DBG("cache %p, copied %d", cache, copied);
377 return copied;
380 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
382 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
383 struct hci_cp_inquiry cp;
385 BT_DBG("%s", hdev->name);
387 if (test_bit(HCI_INQUIRY, &hdev->flags))
388 return;
390 /* Start Inquiry */
391 memcpy(&cp.lap, &ir->lap, 3);
392 cp.length = ir->length;
393 cp.num_rsp = ir->num_rsp;
394 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
397 int hci_inquiry(void __user *arg)
399 __u8 __user *ptr = arg;
400 struct hci_inquiry_req ir;
401 struct hci_dev *hdev;
402 int err = 0, do_inquiry = 0, max_rsp;
403 long timeo;
404 __u8 *buf;
406 if (copy_from_user(&ir, ptr, sizeof(ir)))
407 return -EFAULT;
409 if (!(hdev = hci_dev_get(ir.dev_id)))
410 return -ENODEV;
412 hci_dev_lock_bh(hdev);
413 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
414 inquiry_cache_empty(hdev) ||
415 ir.flags & IREQ_CACHE_FLUSH) {
416 inquiry_cache_flush(hdev);
417 do_inquiry = 1;
419 hci_dev_unlock_bh(hdev);
421 timeo = ir.length * msecs_to_jiffies(2000);
422 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
423 goto done;
425 /* for unlimited number of responses we will use buffer with 255 entries */
426 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
428 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
429 * copy it to the user space.
431 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
432 err = -ENOMEM;
433 goto done;
436 hci_dev_lock_bh(hdev);
437 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
438 hci_dev_unlock_bh(hdev);
440 BT_DBG("num_rsp %d", ir.num_rsp);
442 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
443 ptr += sizeof(ir);
444 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
445 ir.num_rsp))
446 err = -EFAULT;
447 } else
448 err = -EFAULT;
450 kfree(buf);
452 done:
453 hci_dev_put(hdev);
454 return err;
457 /* ---- HCI ioctl helpers ---- */
459 int hci_dev_open(__u16 dev)
461 struct hci_dev *hdev;
462 int ret = 0;
464 if (!(hdev = hci_dev_get(dev)))
465 return -ENODEV;
467 BT_DBG("%s %p", hdev->name, hdev);
469 hci_req_lock(hdev);
471 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
472 ret = -ERFKILL;
473 goto done;
476 if (test_bit(HCI_UP, &hdev->flags)) {
477 ret = -EALREADY;
478 goto done;
481 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
482 set_bit(HCI_RAW, &hdev->flags);
484 /* Treat all non BR/EDR controllers as raw devices for now */
485 if (hdev->dev_type != HCI_BREDR)
486 set_bit(HCI_RAW, &hdev->flags);
488 if (hdev->open(hdev)) {
489 ret = -EIO;
490 goto done;
493 if (!test_bit(HCI_RAW, &hdev->flags)) {
494 atomic_set(&hdev->cmd_cnt, 1);
495 set_bit(HCI_INIT, &hdev->flags);
497 //__hci_request(hdev, hci_reset_req, 0, HZ);
498 ret = __hci_request(hdev, hci_init_req, 0,
499 msecs_to_jiffies(HCI_INIT_TIMEOUT));
501 clear_bit(HCI_INIT, &hdev->flags);
504 if (!ret) {
505 hci_dev_hold(hdev);
506 set_bit(HCI_UP, &hdev->flags);
507 hci_notify(hdev, HCI_DEV_UP);
508 } else {
509 /* Init failed, cleanup */
510 tasklet_kill(&hdev->rx_task);
511 tasklet_kill(&hdev->tx_task);
512 tasklet_kill(&hdev->cmd_task);
514 skb_queue_purge(&hdev->cmd_q);
515 skb_queue_purge(&hdev->rx_q);
517 if (hdev->flush)
518 hdev->flush(hdev);
520 if (hdev->sent_cmd) {
521 kfree_skb(hdev->sent_cmd);
522 hdev->sent_cmd = NULL;
525 hdev->close(hdev);
526 hdev->flags = 0;
529 done:
530 hci_req_unlock(hdev);
531 hci_dev_put(hdev);
532 return ret;
535 static int hci_dev_do_close(struct hci_dev *hdev)
537 BT_DBG("%s %p", hdev->name, hdev);
539 hci_req_cancel(hdev, ENODEV);
540 hci_req_lock(hdev);
542 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
543 hci_req_unlock(hdev);
544 return 0;
547 /* Kill RX and TX tasks */
548 tasklet_kill(&hdev->rx_task);
549 tasklet_kill(&hdev->tx_task);
551 hci_dev_lock_bh(hdev);
552 inquiry_cache_flush(hdev);
553 hci_conn_hash_flush(hdev);
554 hci_blacklist_clear(hdev);
555 hci_dev_unlock_bh(hdev);
557 hci_notify(hdev, HCI_DEV_DOWN);
559 if (hdev->flush)
560 hdev->flush(hdev);
562 /* Reset device */
563 skb_queue_purge(&hdev->cmd_q);
564 atomic_set(&hdev->cmd_cnt, 1);
565 if (!test_bit(HCI_RAW, &hdev->flags)) {
566 set_bit(HCI_INIT, &hdev->flags);
567 __hci_request(hdev, hci_reset_req, 0,
568 msecs_to_jiffies(250));
569 clear_bit(HCI_INIT, &hdev->flags);
572 /* Kill cmd task */
573 tasklet_kill(&hdev->cmd_task);
575 /* Drop queues */
576 skb_queue_purge(&hdev->rx_q);
577 skb_queue_purge(&hdev->cmd_q);
578 skb_queue_purge(&hdev->raw_q);
580 /* Drop last sent command */
581 if (hdev->sent_cmd) {
582 kfree_skb(hdev->sent_cmd);
583 hdev->sent_cmd = NULL;
586 /* After this point our queues are empty
587 * and no tasks are scheduled. */
588 hdev->close(hdev);
590 /* Clear flags */
591 hdev->flags = 0;
593 hci_req_unlock(hdev);
595 hci_dev_put(hdev);
596 return 0;
599 int hci_dev_close(__u16 dev)
601 struct hci_dev *hdev;
602 int err;
604 if (!(hdev = hci_dev_get(dev)))
605 return -ENODEV;
606 err = hci_dev_do_close(hdev);
607 hci_dev_put(hdev);
608 return err;
611 int hci_dev_reset(__u16 dev)
613 struct hci_dev *hdev;
614 int ret = 0;
616 if (!(hdev = hci_dev_get(dev)))
617 return -ENODEV;
619 hci_req_lock(hdev);
620 tasklet_disable(&hdev->tx_task);
622 if (!test_bit(HCI_UP, &hdev->flags))
623 goto done;
625 /* Drop queues */
626 skb_queue_purge(&hdev->rx_q);
627 skb_queue_purge(&hdev->cmd_q);
629 hci_dev_lock_bh(hdev);
630 inquiry_cache_flush(hdev);
631 hci_conn_hash_flush(hdev);
632 hci_dev_unlock_bh(hdev);
634 if (hdev->flush)
635 hdev->flush(hdev);
637 atomic_set(&hdev->cmd_cnt, 1);
638 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
640 if (!test_bit(HCI_RAW, &hdev->flags))
641 ret = __hci_request(hdev, hci_reset_req, 0,
642 msecs_to_jiffies(HCI_INIT_TIMEOUT));
644 done:
645 tasklet_enable(&hdev->tx_task);
646 hci_req_unlock(hdev);
647 hci_dev_put(hdev);
648 return ret;
651 int hci_dev_reset_stat(__u16 dev)
653 struct hci_dev *hdev;
654 int ret = 0;
656 if (!(hdev = hci_dev_get(dev)))
657 return -ENODEV;
659 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
661 hci_dev_put(hdev);
663 return ret;
666 int hci_dev_cmd(unsigned int cmd, void __user *arg)
668 struct hci_dev *hdev;
669 struct hci_dev_req dr;
670 int err = 0;
672 if (copy_from_user(&dr, arg, sizeof(dr)))
673 return -EFAULT;
675 if (!(hdev = hci_dev_get(dr.dev_id)))
676 return -ENODEV;
678 switch (cmd) {
679 case HCISETAUTH:
680 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
681 msecs_to_jiffies(HCI_INIT_TIMEOUT));
682 break;
684 case HCISETENCRYPT:
685 if (!lmp_encrypt_capable(hdev)) {
686 err = -EOPNOTSUPP;
687 break;
690 if (!test_bit(HCI_AUTH, &hdev->flags)) {
691 /* Auth must be enabled first */
692 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
693 msecs_to_jiffies(HCI_INIT_TIMEOUT));
694 if (err)
695 break;
698 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
699 msecs_to_jiffies(HCI_INIT_TIMEOUT));
700 break;
702 case HCISETSCAN:
703 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
704 msecs_to_jiffies(HCI_INIT_TIMEOUT));
705 break;
707 case HCISETLINKPOL:
708 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT));
710 break;
712 case HCISETLINKMODE:
713 hdev->link_mode = ((__u16) dr.dev_opt) &
714 (HCI_LM_MASTER | HCI_LM_ACCEPT);
715 break;
717 case HCISETPTYPE:
718 hdev->pkt_type = (__u16) dr.dev_opt;
719 break;
721 case HCISETACLMTU:
722 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
723 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
724 break;
726 case HCISETSCOMTU:
727 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
728 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
729 break;
731 default:
732 err = -EINVAL;
733 break;
736 hci_dev_put(hdev);
737 return err;
740 int hci_get_dev_list(void __user *arg)
742 struct hci_dev_list_req *dl;
743 struct hci_dev_req *dr;
744 struct list_head *p;
745 int n = 0, size, err;
746 __u16 dev_num;
748 if (get_user(dev_num, (__u16 __user *) arg))
749 return -EFAULT;
751 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
752 return -EINVAL;
754 size = sizeof(*dl) + dev_num * sizeof(*dr);
756 if (!(dl = kzalloc(size, GFP_KERNEL)))
757 return -ENOMEM;
759 dr = dl->dev_req;
761 read_lock_bh(&hci_dev_list_lock);
762 list_for_each(p, &hci_dev_list) {
763 struct hci_dev *hdev;
764 hdev = list_entry(p, struct hci_dev, list);
765 (dr + n)->dev_id = hdev->id;
766 (dr + n)->dev_opt = hdev->flags;
767 if (++n >= dev_num)
768 break;
770 read_unlock_bh(&hci_dev_list_lock);
772 dl->dev_num = n;
773 size = sizeof(*dl) + n * sizeof(*dr);
775 err = copy_to_user(arg, dl, size);
776 kfree(dl);
778 return err ? -EFAULT : 0;
781 int hci_get_dev_info(void __user *arg)
783 struct hci_dev *hdev;
784 struct hci_dev_info di;
785 int err = 0;
787 if (copy_from_user(&di, arg, sizeof(di)))
788 return -EFAULT;
790 if (!(hdev = hci_dev_get(di.dev_id)))
791 return -ENODEV;
793 strcpy(di.name, hdev->name);
794 di.bdaddr = hdev->bdaddr;
795 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
796 di.flags = hdev->flags;
797 di.pkt_type = hdev->pkt_type;
798 di.acl_mtu = hdev->acl_mtu;
799 di.acl_pkts = hdev->acl_pkts;
800 di.sco_mtu = hdev->sco_mtu;
801 di.sco_pkts = hdev->sco_pkts;
802 di.link_policy = hdev->link_policy;
803 di.link_mode = hdev->link_mode;
805 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
806 memcpy(&di.features, &hdev->features, sizeof(di.features));
808 if (copy_to_user(arg, &di, sizeof(di)))
809 err = -EFAULT;
811 hci_dev_put(hdev);
813 return err;
816 /* ---- Interface to HCI drivers ---- */
818 static int hci_rfkill_set_block(void *data, bool blocked)
820 struct hci_dev *hdev = data;
822 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
824 if (!blocked)
825 return 0;
827 hci_dev_do_close(hdev);
829 return 0;
832 static const struct rfkill_ops hci_rfkill_ops = {
833 .set_block = hci_rfkill_set_block,
836 /* Alloc HCI device */
837 struct hci_dev *hci_alloc_dev(void)
839 struct hci_dev *hdev;
841 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
842 if (!hdev)
843 return NULL;
845 skb_queue_head_init(&hdev->driver_init);
847 return hdev;
849 EXPORT_SYMBOL(hci_alloc_dev);
851 /* Free HCI device */
852 void hci_free_dev(struct hci_dev *hdev)
854 skb_queue_purge(&hdev->driver_init);
856 /* will free via device release */
857 put_device(&hdev->dev);
859 EXPORT_SYMBOL(hci_free_dev);
861 /* Register HCI device */
862 int hci_register_dev(struct hci_dev *hdev)
864 struct list_head *head = &hci_dev_list, *p;
865 int i, id = 0;
867 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
868 hdev->bus, hdev->owner);
870 if (!hdev->open || !hdev->close || !hdev->destruct)
871 return -EINVAL;
873 write_lock_bh(&hci_dev_list_lock);
875 /* Find first available device id */
876 list_for_each(p, &hci_dev_list) {
877 if (list_entry(p, struct hci_dev, list)->id != id)
878 break;
879 head = p; id++;
882 sprintf(hdev->name, "hci%d", id);
883 hdev->id = id;
884 list_add(&hdev->list, head);
886 atomic_set(&hdev->refcnt, 1);
887 spin_lock_init(&hdev->lock);
889 hdev->flags = 0;
890 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
891 hdev->esco_type = (ESCO_HV1);
892 hdev->link_mode = (HCI_LM_ACCEPT);
894 hdev->idle_timeout = 0;
895 hdev->sniff_max_interval = 800;
896 hdev->sniff_min_interval = 80;
898 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
899 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
900 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
902 skb_queue_head_init(&hdev->rx_q);
903 skb_queue_head_init(&hdev->cmd_q);
904 skb_queue_head_init(&hdev->raw_q);
906 for (i = 0; i < NUM_REASSEMBLY; i++)
907 hdev->reassembly[i] = NULL;
909 init_waitqueue_head(&hdev->req_wait_q);
910 mutex_init(&hdev->req_lock);
912 inquiry_cache_init(hdev);
914 hci_conn_hash_init(hdev);
916 INIT_LIST_HEAD(&hdev->blacklist);
918 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
920 atomic_set(&hdev->promisc, 0);
922 write_unlock_bh(&hci_dev_list_lock);
924 hdev->workqueue = create_singlethread_workqueue(hdev->name);
925 if (!hdev->workqueue)
926 goto nomem;
928 hci_register_sysfs(hdev);
930 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
931 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
932 if (hdev->rfkill) {
933 if (rfkill_register(hdev->rfkill) < 0) {
934 rfkill_destroy(hdev->rfkill);
935 hdev->rfkill = NULL;
939 hci_notify(hdev, HCI_DEV_REG);
941 return id;
943 nomem:
944 write_lock_bh(&hci_dev_list_lock);
945 list_del(&hdev->list);
946 write_unlock_bh(&hci_dev_list_lock);
948 return -ENOMEM;
950 EXPORT_SYMBOL(hci_register_dev);
952 /* Unregister HCI device */
953 int hci_unregister_dev(struct hci_dev *hdev)
955 int i;
957 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
959 write_lock_bh(&hci_dev_list_lock);
960 list_del(&hdev->list);
961 write_unlock_bh(&hci_dev_list_lock);
963 hci_dev_do_close(hdev);
965 for (i = 0; i < NUM_REASSEMBLY; i++)
966 kfree_skb(hdev->reassembly[i]);
968 hci_notify(hdev, HCI_DEV_UNREG);
970 if (hdev->rfkill) {
971 rfkill_unregister(hdev->rfkill);
972 rfkill_destroy(hdev->rfkill);
975 hci_unregister_sysfs(hdev);
977 destroy_workqueue(hdev->workqueue);
979 __hci_dev_put(hdev);
981 return 0;
983 EXPORT_SYMBOL(hci_unregister_dev);
985 /* Suspend HCI device */
986 int hci_suspend_dev(struct hci_dev *hdev)
988 hci_notify(hdev, HCI_DEV_SUSPEND);
989 return 0;
991 EXPORT_SYMBOL(hci_suspend_dev);
993 /* Resume HCI device */
994 int hci_resume_dev(struct hci_dev *hdev)
996 hci_notify(hdev, HCI_DEV_RESUME);
997 return 0;
999 EXPORT_SYMBOL(hci_resume_dev);
1001 /* Receive frame from HCI drivers */
1002 int hci_recv_frame(struct sk_buff *skb)
1004 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1005 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1006 && !test_bit(HCI_INIT, &hdev->flags))) {
1007 kfree_skb(skb);
1008 return -ENXIO;
1011 /* Incomming skb */
1012 bt_cb(skb)->incoming = 1;
1014 /* Time stamp */
1015 __net_timestamp(skb);
1017 /* Queue frame for rx task */
1018 skb_queue_tail(&hdev->rx_q, skb);
1019 tasklet_schedule(&hdev->rx_task);
1021 return 0;
1023 EXPORT_SYMBOL(hci_recv_frame);
1025 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1026 int count, __u8 index, gfp_t gfp_mask)
1028 int len = 0;
1029 int hlen = 0;
1030 int remain = count;
1031 struct sk_buff *skb;
1032 struct bt_skb_cb *scb;
1034 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1035 index >= NUM_REASSEMBLY)
1036 return -EILSEQ;
1038 skb = hdev->reassembly[index];
1040 if (!skb) {
1041 switch (type) {
1042 case HCI_ACLDATA_PKT:
1043 len = HCI_MAX_FRAME_SIZE;
1044 hlen = HCI_ACL_HDR_SIZE;
1045 break;
1046 case HCI_EVENT_PKT:
1047 len = HCI_MAX_EVENT_SIZE;
1048 hlen = HCI_EVENT_HDR_SIZE;
1049 break;
1050 case HCI_SCODATA_PKT:
1051 len = HCI_MAX_SCO_SIZE;
1052 hlen = HCI_SCO_HDR_SIZE;
1053 break;
1056 skb = bt_skb_alloc(len, gfp_mask);
1057 if (!skb)
1058 return -ENOMEM;
1060 scb = (void *) skb->cb;
1061 scb->expect = hlen;
1062 scb->pkt_type = type;
1064 skb->dev = (void *) hdev;
1065 hdev->reassembly[index] = skb;
1068 while (count) {
1069 scb = (void *) skb->cb;
1070 len = min(scb->expect, (__u16)count);
1072 memcpy(skb_put(skb, len), data, len);
1074 count -= len;
1075 data += len;
1076 scb->expect -= len;
1077 remain = count;
1079 switch (type) {
1080 case HCI_EVENT_PKT:
1081 if (skb->len == HCI_EVENT_HDR_SIZE) {
1082 struct hci_event_hdr *h = hci_event_hdr(skb);
1083 scb->expect = h->plen;
1085 if (skb_tailroom(skb) < scb->expect) {
1086 kfree_skb(skb);
1087 hdev->reassembly[index] = NULL;
1088 return -ENOMEM;
1091 break;
1093 case HCI_ACLDATA_PKT:
1094 if (skb->len == HCI_ACL_HDR_SIZE) {
1095 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1096 scb->expect = __le16_to_cpu(h->dlen);
1098 if (skb_tailroom(skb) < scb->expect) {
1099 kfree_skb(skb);
1100 hdev->reassembly[index] = NULL;
1101 return -ENOMEM;
1104 break;
1106 case HCI_SCODATA_PKT:
1107 if (skb->len == HCI_SCO_HDR_SIZE) {
1108 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1109 scb->expect = h->dlen;
1111 if (skb_tailroom(skb) < scb->expect) {
1112 kfree_skb(skb);
1113 hdev->reassembly[index] = NULL;
1114 return -ENOMEM;
1117 break;
1120 if (scb->expect == 0) {
1121 /* Complete frame */
1123 bt_cb(skb)->pkt_type = type;
1124 hci_recv_frame(skb);
1126 hdev->reassembly[index] = NULL;
1127 return remain;
1131 return remain;
1134 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1136 int rem = 0;
1138 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1139 return -EILSEQ;
1141 while (count) {
1142 rem = hci_reassembly(hdev, type, data, count,
1143 type - 1, GFP_ATOMIC);
1144 if (rem < 0)
1145 return rem;
1147 data += (count - rem);
1148 count = rem;
1151 return rem;
1153 EXPORT_SYMBOL(hci_recv_fragment);
1155 #define STREAM_REASSEMBLY 0
1157 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1159 int type;
1160 int rem = 0;
1162 while (count) {
1163 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1165 if (!skb) {
1166 struct { char type; } *pkt;
1168 /* Start of the frame */
1169 pkt = data;
1170 type = pkt->type;
1172 data++;
1173 count--;
1174 } else
1175 type = bt_cb(skb)->pkt_type;
1177 rem = hci_reassembly(hdev, type, data,
1178 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1179 if (rem < 0)
1180 return rem;
1182 data += (count - rem);
1183 count = rem;
1186 return rem;
1188 EXPORT_SYMBOL(hci_recv_stream_fragment);
1190 /* ---- Interface to upper protocols ---- */
1192 /* Register/Unregister protocols.
1193 * hci_task_lock is used to ensure that no tasks are running. */
1194 int hci_register_proto(struct hci_proto *hp)
1196 int err = 0;
1198 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1200 if (hp->id >= HCI_MAX_PROTO)
1201 return -EINVAL;
1203 write_lock_bh(&hci_task_lock);
1205 if (!hci_proto[hp->id])
1206 hci_proto[hp->id] = hp;
1207 else
1208 err = -EEXIST;
1210 write_unlock_bh(&hci_task_lock);
1212 return err;
1214 EXPORT_SYMBOL(hci_register_proto);
1216 int hci_unregister_proto(struct hci_proto *hp)
1218 int err = 0;
1220 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1222 if (hp->id >= HCI_MAX_PROTO)
1223 return -EINVAL;
1225 write_lock_bh(&hci_task_lock);
1227 if (hci_proto[hp->id])
1228 hci_proto[hp->id] = NULL;
1229 else
1230 err = -ENOENT;
1232 write_unlock_bh(&hci_task_lock);
1234 return err;
1236 EXPORT_SYMBOL(hci_unregister_proto);
1238 int hci_register_cb(struct hci_cb *cb)
1240 BT_DBG("%p name %s", cb, cb->name);
1242 write_lock_bh(&hci_cb_list_lock);
1243 list_add(&cb->list, &hci_cb_list);
1244 write_unlock_bh(&hci_cb_list_lock);
1246 return 0;
1248 EXPORT_SYMBOL(hci_register_cb);
1250 int hci_unregister_cb(struct hci_cb *cb)
1252 BT_DBG("%p name %s", cb, cb->name);
1254 write_lock_bh(&hci_cb_list_lock);
1255 list_del(&cb->list);
1256 write_unlock_bh(&hci_cb_list_lock);
1258 return 0;
1260 EXPORT_SYMBOL(hci_unregister_cb);
1262 static int hci_send_frame(struct sk_buff *skb)
1264 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1266 if (!hdev) {
1267 kfree_skb(skb);
1268 return -ENODEV;
1271 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1273 if (atomic_read(&hdev->promisc)) {
1274 /* Time stamp */
1275 __net_timestamp(skb);
1277 hci_send_to_sock(hdev, skb);
1280 /* Get rid of skb owner, prior to sending to the driver. */
1281 skb_orphan(skb);
1283 return hdev->send(skb);
1286 /* Send HCI command */
1287 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1289 int len = HCI_COMMAND_HDR_SIZE + plen;
1290 struct hci_command_hdr *hdr;
1291 struct sk_buff *skb;
1293 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1295 skb = bt_skb_alloc(len, GFP_ATOMIC);
1296 if (!skb) {
1297 BT_ERR("%s no memory for command", hdev->name);
1298 return -ENOMEM;
1301 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1302 hdr->opcode = cpu_to_le16(opcode);
1303 hdr->plen = plen;
1305 if (plen)
1306 memcpy(skb_put(skb, plen), param, plen);
1308 BT_DBG("skb len %d", skb->len);
1310 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1311 skb->dev = (void *) hdev;
1313 skb_queue_tail(&hdev->cmd_q, skb);
1314 tasklet_schedule(&hdev->cmd_task);
1316 return 0;
1319 /* Get data from the previously sent command */
1320 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1322 struct hci_command_hdr *hdr;
1324 if (!hdev->sent_cmd)
1325 return NULL;
1327 hdr = (void *) hdev->sent_cmd->data;
1329 if (hdr->opcode != cpu_to_le16(opcode))
1330 return NULL;
1332 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1334 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1337 /* Send ACL data */
1338 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1340 struct hci_acl_hdr *hdr;
1341 int len = skb->len;
1343 skb_push(skb, HCI_ACL_HDR_SIZE);
1344 skb_reset_transport_header(skb);
1345 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1346 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1347 hdr->dlen = cpu_to_le16(len);
1350 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1352 struct hci_dev *hdev = conn->hdev;
1353 struct sk_buff *list;
1355 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1357 skb->dev = (void *) hdev;
1358 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1359 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1361 if (!(list = skb_shinfo(skb)->frag_list)) {
1362 /* Non fragmented */
1363 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1365 skb_queue_tail(&conn->data_q, skb);
1366 } else {
1367 /* Fragmented */
1368 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1370 skb_shinfo(skb)->frag_list = NULL;
1372 /* Queue all fragments atomically */
1373 spin_lock_bh(&conn->data_q.lock);
1375 __skb_queue_tail(&conn->data_q, skb);
1376 do {
1377 skb = list; list = list->next;
1379 skb->dev = (void *) hdev;
1380 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1381 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1383 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1385 __skb_queue_tail(&conn->data_q, skb);
1386 } while (list);
1388 spin_unlock_bh(&conn->data_q.lock);
1391 tasklet_schedule(&hdev->tx_task);
1393 EXPORT_SYMBOL(hci_send_acl);
1395 /* Send SCO data */
1396 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1398 struct hci_dev *hdev = conn->hdev;
1399 struct hci_sco_hdr hdr;
1401 BT_DBG("%s len %d", hdev->name, skb->len);
1403 hdr.handle = cpu_to_le16(conn->handle);
1404 hdr.dlen = skb->len;
1406 skb_push(skb, HCI_SCO_HDR_SIZE);
1407 skb_reset_transport_header(skb);
1408 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1410 skb->dev = (void *) hdev;
1411 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1413 skb_queue_tail(&conn->data_q, skb);
1414 tasklet_schedule(&hdev->tx_task);
1416 EXPORT_SYMBOL(hci_send_sco);
1418 /* ---- HCI TX task (outgoing data) ---- */
1420 /* HCI Connection scheduler */
1421 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1423 struct hci_conn_hash *h = &hdev->conn_hash;
1424 struct hci_conn *conn = NULL;
1425 int num = 0, min = ~0;
1426 struct list_head *p;
1428 /* We don't have to lock device here. Connections are always
1429 * added and removed with TX task disabled. */
1430 list_for_each(p, &h->list) {
1431 struct hci_conn *c;
1432 c = list_entry(p, struct hci_conn, list);
1434 if (c->type != type || skb_queue_empty(&c->data_q))
1435 continue;
1437 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1438 continue;
1440 num++;
1442 if (c->sent < min) {
1443 min = c->sent;
1444 conn = c;
1448 if (conn) {
1449 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1450 int q = cnt / num;
1451 *quote = q ? q : 1;
1452 } else
1453 *quote = 0;
1455 BT_DBG("conn %p quote %d", conn, *quote);
1456 return conn;
1459 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1461 struct hci_conn_hash *h = &hdev->conn_hash;
1462 struct list_head *p;
1463 struct hci_conn *c;
1465 BT_ERR("%s ACL tx timeout", hdev->name);
1467 /* Kill stalled connections */
1468 list_for_each(p, &h->list) {
1469 c = list_entry(p, struct hci_conn, list);
1470 if (c->type == ACL_LINK && c->sent) {
1471 BT_ERR("%s killing stalled ACL connection %s",
1472 hdev->name, batostr(&c->dst));
1473 hci_acl_disconn(c, 0x13);
1478 static inline void hci_sched_acl(struct hci_dev *hdev)
1480 struct hci_conn *conn;
1481 struct sk_buff *skb;
1482 int quote;
1484 BT_DBG("%s", hdev->name);
1486 if (!test_bit(HCI_RAW, &hdev->flags)) {
1487 /* ACL tx timeout must be longer than maximum
1488 * link supervision timeout (40.9 seconds) */
1489 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1490 hci_acl_tx_to(hdev);
1493 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1494 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1495 BT_DBG("skb %p len %d", skb, skb->len);
1497 hci_conn_enter_active_mode(conn);
1499 hci_send_frame(skb);
1500 hdev->acl_last_tx = jiffies;
1502 hdev->acl_cnt--;
1503 conn->sent++;
1508 /* Schedule SCO */
1509 static inline void hci_sched_sco(struct hci_dev *hdev)
1511 struct hci_conn *conn;
1512 struct sk_buff *skb;
1513 int quote;
1515 BT_DBG("%s", hdev->name);
1517 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1518 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1519 BT_DBG("skb %p len %d", skb, skb->len);
1520 hci_send_frame(skb);
1522 conn->sent++;
1523 if (conn->sent == ~0)
1524 conn->sent = 0;
1529 static inline void hci_sched_esco(struct hci_dev *hdev)
1531 struct hci_conn *conn;
1532 struct sk_buff *skb;
1533 int quote;
1535 BT_DBG("%s", hdev->name);
1537 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1538 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1539 BT_DBG("skb %p len %d", skb, skb->len);
1540 hci_send_frame(skb);
1542 conn->sent++;
1543 if (conn->sent == ~0)
1544 conn->sent = 0;
1549 static void hci_tx_task(unsigned long arg)
1551 struct hci_dev *hdev = (struct hci_dev *) arg;
1552 struct sk_buff *skb;
1554 read_lock(&hci_task_lock);
1556 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1558 /* Schedule queues and send stuff to HCI driver */
1560 hci_sched_acl(hdev);
1562 hci_sched_sco(hdev);
1564 hci_sched_esco(hdev);
1566 /* Send next queued raw (unknown type) packet */
1567 while ((skb = skb_dequeue(&hdev->raw_q)))
1568 hci_send_frame(skb);
1570 read_unlock(&hci_task_lock);
1573 /* ----- HCI RX task (incoming data proccessing) ----- */
1575 /* ACL data packet */
1576 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1578 struct hci_acl_hdr *hdr = (void *) skb->data;
1579 struct hci_conn *conn;
1580 __u16 handle, flags;
1582 skb_pull(skb, HCI_ACL_HDR_SIZE);
1584 handle = __le16_to_cpu(hdr->handle);
1585 flags = hci_flags(handle);
1586 handle = hci_handle(handle);
1588 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1590 hdev->stat.acl_rx++;
1592 hci_dev_lock(hdev);
1593 conn = hci_conn_hash_lookup_handle(hdev, handle);
1594 hci_dev_unlock(hdev);
1596 if (conn) {
1597 register struct hci_proto *hp;
1599 hci_conn_enter_active_mode(conn);
1601 /* Send to upper protocol */
1602 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1603 hp->recv_acldata(conn, skb, flags);
1604 return;
1606 } else {
1607 BT_ERR("%s ACL packet for unknown connection handle %d",
1608 hdev->name, handle);
1611 kfree_skb(skb);
1614 /* SCO data packet */
1615 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1617 struct hci_sco_hdr *hdr = (void *) skb->data;
1618 struct hci_conn *conn;
1619 __u16 handle;
1621 skb_pull(skb, HCI_SCO_HDR_SIZE);
1623 handle = __le16_to_cpu(hdr->handle);
1625 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1627 hdev->stat.sco_rx++;
1629 hci_dev_lock(hdev);
1630 conn = hci_conn_hash_lookup_handle(hdev, handle);
1631 hci_dev_unlock(hdev);
1633 if (conn) {
1634 register struct hci_proto *hp;
1636 /* Send to upper protocol */
1637 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1638 hp->recv_scodata(conn, skb);
1639 return;
1641 } else {
1642 BT_ERR("%s SCO packet for unknown connection handle %d",
1643 hdev->name, handle);
1646 kfree_skb(skb);
1649 static void hci_rx_task(unsigned long arg)
1651 struct hci_dev *hdev = (struct hci_dev *) arg;
1652 struct sk_buff *skb;
1654 BT_DBG("%s", hdev->name);
1656 read_lock(&hci_task_lock);
1658 while ((skb = skb_dequeue(&hdev->rx_q))) {
1659 if (atomic_read(&hdev->promisc)) {
1660 /* Send copy to the sockets */
1661 hci_send_to_sock(hdev, skb);
1664 if (test_bit(HCI_RAW, &hdev->flags)) {
1665 kfree_skb(skb);
1666 continue;
1669 if (test_bit(HCI_INIT, &hdev->flags)) {
1670 /* Don't process data packets in this states. */
1671 switch (bt_cb(skb)->pkt_type) {
1672 case HCI_ACLDATA_PKT:
1673 case HCI_SCODATA_PKT:
1674 kfree_skb(skb);
1675 continue;
1679 /* Process frame */
1680 switch (bt_cb(skb)->pkt_type) {
1681 case HCI_EVENT_PKT:
1682 hci_event_packet(hdev, skb);
1683 break;
1685 case HCI_ACLDATA_PKT:
1686 BT_DBG("%s ACL data packet", hdev->name);
1687 hci_acldata_packet(hdev, skb);
1688 break;
1690 case HCI_SCODATA_PKT:
1691 BT_DBG("%s SCO data packet", hdev->name);
1692 hci_scodata_packet(hdev, skb);
1693 break;
1695 default:
1696 kfree_skb(skb);
1697 break;
1701 read_unlock(&hci_task_lock);
1704 static void hci_cmd_task(unsigned long arg)
1706 struct hci_dev *hdev = (struct hci_dev *) arg;
1707 struct sk_buff *skb;
1709 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1711 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1712 BT_ERR("%s command tx timeout", hdev->name);
1713 atomic_set(&hdev->cmd_cnt, 1);
1716 /* Send queued commands */
1717 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1718 kfree_skb(hdev->sent_cmd);
1720 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1721 atomic_dec(&hdev->cmd_cnt);
1722 hci_send_frame(skb);
1723 hdev->cmd_last_tx = jiffies;
1724 } else {
1725 skb_queue_head(&hdev->cmd_q, skb);
1726 tasklet_schedule(&hdev->cmd_task);