ARM: EXYNOS: add support uart for EXYNOS4 and EXYNOS5
[linux-2.6/libata-dev.git] / net / bluetooth / hci_core.c
blob5aeb62491198b193b26e7b5e2c46a90b089bf848
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
47 #include <net/sock.h>
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
56 #define AUTO_OFF_TIMEOUT 2000
58 bool enable_hs;
60 static void hci_rx_work(struct work_struct *work);
61 static void hci_cmd_work(struct work_struct *work);
62 static void hci_tx_work(struct work_struct *work);
64 /* HCI device list */
65 LIST_HEAD(hci_dev_list);
66 DEFINE_RWLOCK(hci_dev_list_lock);
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list);
70 DEFINE_RWLOCK(hci_cb_list_lock);
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block *nb)
79 return atomic_notifier_chain_register(&hci_notifier, nb);
82 int hci_unregister_notifier(struct notifier_block *nb)
84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 static void hci_notify(struct hci_dev *hdev, int event)
89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
102 return;
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
111 static void hci_req_cancel(struct hci_dev *hdev, int err)
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
124 unsigned long opt, __u32 timeout)
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
129 BT_DBG("%s start", hdev->name);
131 hdev->req_status = HCI_REQ_PEND;
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
136 req(hdev, opt);
137 schedule_timeout(timeout);
139 remove_wait_queue(&hdev->req_wait_q, &wait);
141 if (signal_pending(current))
142 return -EINTR;
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
146 err = -bt_to_errno(hdev->req_result);
147 break;
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
153 default:
154 err = -ETIMEDOUT;
155 break;
158 hdev->req_status = hdev->req_result = 0;
160 BT_DBG("%s end: err %d", hdev->name, err);
162 return err;
165 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
166 unsigned long opt, __u32 timeout)
168 int ret;
170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
178 return ret;
181 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
183 BT_DBG("%s %ld", hdev->name, opt);
185 /* Reset device */
186 set_bit(HCI_RESET, &hdev->flags);
187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
190 static void bredr_init(struct hci_dev *hdev)
192 struct hci_cp_delete_stored_link_key cp;
193 __le16 param;
194 __u8 flt_type;
196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
198 /* Mandatory initialization */
200 /* Reset */
201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209 /* Read Local Version */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
215 /* Read BD Address */
216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
224 /* Read Voice Setting */
225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
227 /* Optional initialization */
229 /* Clear Event Filters */
230 flt_type = HCI_FLT_CLEAR_ALL;
231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
233 /* Connection accept timeout ~20 secs */
234 param = cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
242 static void amp_init(struct hci_dev *hdev)
244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
253 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
255 struct sk_buff *skb;
257 BT_DBG("%s %ld", hdev->name, opt);
259 /* Driver initialization */
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
269 skb_queue_purge(&hdev->driver_init);
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
287 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
289 BT_DBG("%s", hdev->name);
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
295 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
297 __u8 scan = opt;
299 BT_DBG("%s %x", hdev->name, scan);
301 /* Inquiry and Page scans */
302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
305 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
307 __u8 auth = opt;
309 BT_DBG("%s %x", hdev->name, auth);
311 /* Authentication */
312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
315 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
317 __u8 encrypt = opt;
319 BT_DBG("%s %x", hdev->name, encrypt);
321 /* Encryption */
322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
325 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
327 __le16 policy = cpu_to_le16(opt);
329 BT_DBG("%s %x", hdev->name, policy);
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
335 /* Get HCI device by index.
336 * Device is held on return. */
337 struct hci_dev *hci_dev_get(int index)
339 struct hci_dev *hdev = NULL, *d;
341 BT_DBG("%d", index);
343 if (index < 0)
344 return NULL;
346 read_lock(&hci_dev_list_lock);
347 list_for_each_entry(d, &hci_dev_list, list) {
348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
357 /* ---- Inquiry support ---- */
358 static void inquiry_cache_flush(struct hci_dev *hdev)
360 struct inquiry_cache *cache = &hdev->inq_cache;
361 struct inquiry_entry *next = cache->list, *e;
363 BT_DBG("cache %p", cache);
365 cache->list = NULL;
366 while ((e = next)) {
367 next = e->next;
368 kfree(e);
372 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
374 struct inquiry_cache *cache = &hdev->inq_cache;
375 struct inquiry_entry *e;
377 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
379 for (e = cache->list; e; e = e->next)
380 if (!bacmp(&e->data.bdaddr, bdaddr))
381 break;
382 return e;
385 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
387 struct inquiry_cache *cache = &hdev->inq_cache;
388 struct inquiry_entry *ie;
390 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
392 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
393 if (!ie) {
394 /* Entry not in the cache. Add new one. */
395 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
396 if (!ie)
397 return;
399 ie->next = cache->list;
400 cache->list = ie;
403 memcpy(&ie->data, data, sizeof(*data));
404 ie->timestamp = jiffies;
405 cache->timestamp = jiffies;
408 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
410 struct inquiry_cache *cache = &hdev->inq_cache;
411 struct inquiry_info *info = (struct inquiry_info *) buf;
412 struct inquiry_entry *e;
413 int copied = 0;
415 for (e = cache->list; e && copied < num; e = e->next, copied++) {
416 struct inquiry_data *data = &e->data;
417 bacpy(&info->bdaddr, &data->bdaddr);
418 info->pscan_rep_mode = data->pscan_rep_mode;
419 info->pscan_period_mode = data->pscan_period_mode;
420 info->pscan_mode = data->pscan_mode;
421 memcpy(info->dev_class, data->dev_class, 3);
422 info->clock_offset = data->clock_offset;
423 info++;
426 BT_DBG("cache %p, copied %d", cache, copied);
427 return copied;
430 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
432 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
433 struct hci_cp_inquiry cp;
435 BT_DBG("%s", hdev->name);
437 if (test_bit(HCI_INQUIRY, &hdev->flags))
438 return;
440 /* Start Inquiry */
441 memcpy(&cp.lap, &ir->lap, 3);
442 cp.length = ir->length;
443 cp.num_rsp = ir->num_rsp;
444 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
447 int hci_inquiry(void __user *arg)
449 __u8 __user *ptr = arg;
450 struct hci_inquiry_req ir;
451 struct hci_dev *hdev;
452 int err = 0, do_inquiry = 0, max_rsp;
453 long timeo;
454 __u8 *buf;
456 if (copy_from_user(&ir, ptr, sizeof(ir)))
457 return -EFAULT;
459 hdev = hci_dev_get(ir.dev_id);
460 if (!hdev)
461 return -ENODEV;
463 hci_dev_lock(hdev);
464 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
465 inquiry_cache_empty(hdev) ||
466 ir.flags & IREQ_CACHE_FLUSH) {
467 inquiry_cache_flush(hdev);
468 do_inquiry = 1;
470 hci_dev_unlock(hdev);
472 timeo = ir.length * msecs_to_jiffies(2000);
474 if (do_inquiry) {
475 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
476 if (err < 0)
477 goto done;
480 /* for unlimited number of responses we will use buffer with 255 entries */
481 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
483 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
484 * copy it to the user space.
486 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
487 if (!buf) {
488 err = -ENOMEM;
489 goto done;
492 hci_dev_lock(hdev);
493 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
494 hci_dev_unlock(hdev);
496 BT_DBG("num_rsp %d", ir.num_rsp);
498 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
499 ptr += sizeof(ir);
500 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
501 ir.num_rsp))
502 err = -EFAULT;
503 } else
504 err = -EFAULT;
506 kfree(buf);
508 done:
509 hci_dev_put(hdev);
510 return err;
513 /* ---- HCI ioctl helpers ---- */
515 int hci_dev_open(__u16 dev)
517 struct hci_dev *hdev;
518 int ret = 0;
520 hdev = hci_dev_get(dev);
521 if (!hdev)
522 return -ENODEV;
524 BT_DBG("%s %p", hdev->name, hdev);
526 hci_req_lock(hdev);
528 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
529 ret = -ERFKILL;
530 goto done;
533 if (test_bit(HCI_UP, &hdev->flags)) {
534 ret = -EALREADY;
535 goto done;
538 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
539 set_bit(HCI_RAW, &hdev->flags);
541 /* Treat all non BR/EDR controllers as raw devices if
542 enable_hs is not set */
543 if (hdev->dev_type != HCI_BREDR && !enable_hs)
544 set_bit(HCI_RAW, &hdev->flags);
546 if (hdev->open(hdev)) {
547 ret = -EIO;
548 goto done;
551 if (!test_bit(HCI_RAW, &hdev->flags)) {
552 atomic_set(&hdev->cmd_cnt, 1);
553 set_bit(HCI_INIT, &hdev->flags);
554 hdev->init_last_cmd = 0;
556 ret = __hci_request(hdev, hci_init_req, 0,
557 msecs_to_jiffies(HCI_INIT_TIMEOUT));
559 if (lmp_host_le_capable(hdev))
560 ret = __hci_request(hdev, hci_le_init_req, 0,
561 msecs_to_jiffies(HCI_INIT_TIMEOUT));
563 clear_bit(HCI_INIT, &hdev->flags);
566 if (!ret) {
567 hci_dev_hold(hdev);
568 set_bit(HCI_UP, &hdev->flags);
569 hci_notify(hdev, HCI_DEV_UP);
570 if (!test_bit(HCI_SETUP, &hdev->flags)) {
571 hci_dev_lock(hdev);
572 mgmt_powered(hdev, 1);
573 hci_dev_unlock(hdev);
575 } else {
576 /* Init failed, cleanup */
577 flush_work(&hdev->tx_work);
578 flush_work(&hdev->cmd_work);
579 flush_work(&hdev->rx_work);
581 skb_queue_purge(&hdev->cmd_q);
582 skb_queue_purge(&hdev->rx_q);
584 if (hdev->flush)
585 hdev->flush(hdev);
587 if (hdev->sent_cmd) {
588 kfree_skb(hdev->sent_cmd);
589 hdev->sent_cmd = NULL;
592 hdev->close(hdev);
593 hdev->flags = 0;
596 done:
597 hci_req_unlock(hdev);
598 hci_dev_put(hdev);
599 return ret;
602 static int hci_dev_do_close(struct hci_dev *hdev)
604 BT_DBG("%s %p", hdev->name, hdev);
606 hci_req_cancel(hdev, ENODEV);
607 hci_req_lock(hdev);
609 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
610 del_timer_sync(&hdev->cmd_timer);
611 hci_req_unlock(hdev);
612 return 0;
615 /* Flush RX and TX works */
616 flush_work(&hdev->tx_work);
617 flush_work(&hdev->rx_work);
619 if (hdev->discov_timeout > 0) {
620 cancel_delayed_work(&hdev->discov_off);
621 hdev->discov_timeout = 0;
624 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
625 cancel_delayed_work(&hdev->power_off);
627 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
628 cancel_delayed_work(&hdev->service_cache);
630 hci_dev_lock(hdev);
631 inquiry_cache_flush(hdev);
632 hci_conn_hash_flush(hdev);
633 hci_dev_unlock(hdev);
635 hci_notify(hdev, HCI_DEV_DOWN);
637 if (hdev->flush)
638 hdev->flush(hdev);
640 /* Reset device */
641 skb_queue_purge(&hdev->cmd_q);
642 atomic_set(&hdev->cmd_cnt, 1);
643 if (!test_bit(HCI_RAW, &hdev->flags) &&
644 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
645 set_bit(HCI_INIT, &hdev->flags);
646 __hci_request(hdev, hci_reset_req, 0,
647 msecs_to_jiffies(250));
648 clear_bit(HCI_INIT, &hdev->flags);
651 /* flush cmd work */
652 flush_work(&hdev->cmd_work);
654 /* Drop queues */
655 skb_queue_purge(&hdev->rx_q);
656 skb_queue_purge(&hdev->cmd_q);
657 skb_queue_purge(&hdev->raw_q);
659 /* Drop last sent command */
660 if (hdev->sent_cmd) {
661 del_timer_sync(&hdev->cmd_timer);
662 kfree_skb(hdev->sent_cmd);
663 hdev->sent_cmd = NULL;
666 /* After this point our queues are empty
667 * and no tasks are scheduled. */
668 hdev->close(hdev);
670 hci_dev_lock(hdev);
671 mgmt_powered(hdev, 0);
672 hci_dev_unlock(hdev);
674 /* Clear flags */
675 hdev->flags = 0;
677 hci_req_unlock(hdev);
679 hci_dev_put(hdev);
680 return 0;
683 int hci_dev_close(__u16 dev)
685 struct hci_dev *hdev;
686 int err;
688 hdev = hci_dev_get(dev);
689 if (!hdev)
690 return -ENODEV;
691 err = hci_dev_do_close(hdev);
692 hci_dev_put(hdev);
693 return err;
696 int hci_dev_reset(__u16 dev)
698 struct hci_dev *hdev;
699 int ret = 0;
701 hdev = hci_dev_get(dev);
702 if (!hdev)
703 return -ENODEV;
705 hci_req_lock(hdev);
707 if (!test_bit(HCI_UP, &hdev->flags))
708 goto done;
710 /* Drop queues */
711 skb_queue_purge(&hdev->rx_q);
712 skb_queue_purge(&hdev->cmd_q);
714 hci_dev_lock(hdev);
715 inquiry_cache_flush(hdev);
716 hci_conn_hash_flush(hdev);
717 hci_dev_unlock(hdev);
719 if (hdev->flush)
720 hdev->flush(hdev);
722 atomic_set(&hdev->cmd_cnt, 1);
723 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
725 if (!test_bit(HCI_RAW, &hdev->flags))
726 ret = __hci_request(hdev, hci_reset_req, 0,
727 msecs_to_jiffies(HCI_INIT_TIMEOUT));
729 done:
730 hci_req_unlock(hdev);
731 hci_dev_put(hdev);
732 return ret;
735 int hci_dev_reset_stat(__u16 dev)
737 struct hci_dev *hdev;
738 int ret = 0;
740 hdev = hci_dev_get(dev);
741 if (!hdev)
742 return -ENODEV;
744 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
746 hci_dev_put(hdev);
748 return ret;
751 int hci_dev_cmd(unsigned int cmd, void __user *arg)
753 struct hci_dev *hdev;
754 struct hci_dev_req dr;
755 int err = 0;
757 if (copy_from_user(&dr, arg, sizeof(dr)))
758 return -EFAULT;
760 hdev = hci_dev_get(dr.dev_id);
761 if (!hdev)
762 return -ENODEV;
764 switch (cmd) {
765 case HCISETAUTH:
766 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
767 msecs_to_jiffies(HCI_INIT_TIMEOUT));
768 break;
770 case HCISETENCRYPT:
771 if (!lmp_encrypt_capable(hdev)) {
772 err = -EOPNOTSUPP;
773 break;
776 if (!test_bit(HCI_AUTH, &hdev->flags)) {
777 /* Auth must be enabled first */
778 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
779 msecs_to_jiffies(HCI_INIT_TIMEOUT));
780 if (err)
781 break;
784 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
785 msecs_to_jiffies(HCI_INIT_TIMEOUT));
786 break;
788 case HCISETSCAN:
789 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
790 msecs_to_jiffies(HCI_INIT_TIMEOUT));
791 break;
793 case HCISETLINKPOL:
794 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
795 msecs_to_jiffies(HCI_INIT_TIMEOUT));
796 break;
798 case HCISETLINKMODE:
799 hdev->link_mode = ((__u16) dr.dev_opt) &
800 (HCI_LM_MASTER | HCI_LM_ACCEPT);
801 break;
803 case HCISETPTYPE:
804 hdev->pkt_type = (__u16) dr.dev_opt;
805 break;
807 case HCISETACLMTU:
808 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
809 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
810 break;
812 case HCISETSCOMTU:
813 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
814 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
815 break;
817 default:
818 err = -EINVAL;
819 break;
822 hci_dev_put(hdev);
823 return err;
826 int hci_get_dev_list(void __user *arg)
828 struct hci_dev *hdev;
829 struct hci_dev_list_req *dl;
830 struct hci_dev_req *dr;
831 int n = 0, size, err;
832 __u16 dev_num;
834 if (get_user(dev_num, (__u16 __user *) arg))
835 return -EFAULT;
837 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
838 return -EINVAL;
840 size = sizeof(*dl) + dev_num * sizeof(*dr);
842 dl = kzalloc(size, GFP_KERNEL);
843 if (!dl)
844 return -ENOMEM;
846 dr = dl->dev_req;
848 read_lock(&hci_dev_list_lock);
849 list_for_each_entry(hdev, &hci_dev_list, list) {
850 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
851 cancel_delayed_work(&hdev->power_off);
853 if (!test_bit(HCI_MGMT, &hdev->flags))
854 set_bit(HCI_PAIRABLE, &hdev->flags);
856 (dr + n)->dev_id = hdev->id;
857 (dr + n)->dev_opt = hdev->flags;
859 if (++n >= dev_num)
860 break;
862 read_unlock(&hci_dev_list_lock);
864 dl->dev_num = n;
865 size = sizeof(*dl) + n * sizeof(*dr);
867 err = copy_to_user(arg, dl, size);
868 kfree(dl);
870 return err ? -EFAULT : 0;
873 int hci_get_dev_info(void __user *arg)
875 struct hci_dev *hdev;
876 struct hci_dev_info di;
877 int err = 0;
879 if (copy_from_user(&di, arg, sizeof(di)))
880 return -EFAULT;
882 hdev = hci_dev_get(di.dev_id);
883 if (!hdev)
884 return -ENODEV;
886 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
887 cancel_delayed_work_sync(&hdev->power_off);
889 if (!test_bit(HCI_MGMT, &hdev->flags))
890 set_bit(HCI_PAIRABLE, &hdev->flags);
892 strcpy(di.name, hdev->name);
893 di.bdaddr = hdev->bdaddr;
894 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
895 di.flags = hdev->flags;
896 di.pkt_type = hdev->pkt_type;
897 di.acl_mtu = hdev->acl_mtu;
898 di.acl_pkts = hdev->acl_pkts;
899 di.sco_mtu = hdev->sco_mtu;
900 di.sco_pkts = hdev->sco_pkts;
901 di.link_policy = hdev->link_policy;
902 di.link_mode = hdev->link_mode;
904 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
905 memcpy(&di.features, &hdev->features, sizeof(di.features));
907 if (copy_to_user(arg, &di, sizeof(di)))
908 err = -EFAULT;
910 hci_dev_put(hdev);
912 return err;
915 /* ---- Interface to HCI drivers ---- */
917 static int hci_rfkill_set_block(void *data, bool blocked)
919 struct hci_dev *hdev = data;
921 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
923 if (!blocked)
924 return 0;
926 hci_dev_do_close(hdev);
928 return 0;
931 static const struct rfkill_ops hci_rfkill_ops = {
932 .set_block = hci_rfkill_set_block,
935 /* Alloc HCI device */
936 struct hci_dev *hci_alloc_dev(void)
938 struct hci_dev *hdev;
940 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
941 if (!hdev)
942 return NULL;
944 hci_init_sysfs(hdev);
945 skb_queue_head_init(&hdev->driver_init);
947 return hdev;
949 EXPORT_SYMBOL(hci_alloc_dev);
951 /* Free HCI device */
952 void hci_free_dev(struct hci_dev *hdev)
954 skb_queue_purge(&hdev->driver_init);
956 /* will free via device release */
957 put_device(&hdev->dev);
959 EXPORT_SYMBOL(hci_free_dev);
961 static void hci_power_on(struct work_struct *work)
963 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
965 BT_DBG("%s", hdev->name);
967 if (hci_dev_open(hdev->id) < 0)
968 return;
970 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
971 schedule_delayed_work(&hdev->power_off,
972 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
974 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
975 mgmt_index_added(hdev);
978 static void hci_power_off(struct work_struct *work)
980 struct hci_dev *hdev = container_of(work, struct hci_dev,
981 power_off.work);
983 BT_DBG("%s", hdev->name);
985 clear_bit(HCI_AUTO_OFF, &hdev->flags);
987 hci_dev_close(hdev->id);
990 static void hci_discov_off(struct work_struct *work)
992 struct hci_dev *hdev;
993 u8 scan = SCAN_PAGE;
995 hdev = container_of(work, struct hci_dev, discov_off.work);
997 BT_DBG("%s", hdev->name);
999 hci_dev_lock(hdev);
1001 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1003 hdev->discov_timeout = 0;
1005 hci_dev_unlock(hdev);
1008 int hci_uuids_clear(struct hci_dev *hdev)
1010 struct list_head *p, *n;
1012 list_for_each_safe(p, n, &hdev->uuids) {
1013 struct bt_uuid *uuid;
1015 uuid = list_entry(p, struct bt_uuid, list);
1017 list_del(p);
1018 kfree(uuid);
1021 return 0;
1024 int hci_link_keys_clear(struct hci_dev *hdev)
1026 struct list_head *p, *n;
1028 list_for_each_safe(p, n, &hdev->link_keys) {
1029 struct link_key *key;
1031 key = list_entry(p, struct link_key, list);
1033 list_del(p);
1034 kfree(key);
1037 return 0;
1040 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1042 struct link_key *k;
1044 list_for_each_entry(k, &hdev->link_keys, list)
1045 if (bacmp(bdaddr, &k->bdaddr) == 0)
1046 return k;
1048 return NULL;
1051 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1052 u8 key_type, u8 old_key_type)
1054 /* Legacy key */
1055 if (key_type < 0x03)
1056 return 1;
1058 /* Debug keys are insecure so don't store them persistently */
1059 if (key_type == HCI_LK_DEBUG_COMBINATION)
1060 return 0;
1062 /* Changed combination key and there's no previous one */
1063 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1064 return 0;
1066 /* Security mode 3 case */
1067 if (!conn)
1068 return 1;
1070 /* Neither local nor remote side had no-bonding as requirement */
1071 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1072 return 1;
1074 /* Local side had dedicated bonding as requirement */
1075 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1076 return 1;
1078 /* Remote side had dedicated bonding as requirement */
1079 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1080 return 1;
1082 /* If none of the above criteria match, then don't store the key
1083 * persistently */
1084 return 0;
1087 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1089 struct link_key *k;
1091 list_for_each_entry(k, &hdev->link_keys, list) {
1092 struct key_master_id *id;
1094 if (k->type != HCI_LK_SMP_LTK)
1095 continue;
1097 if (k->dlen != sizeof(*id))
1098 continue;
1100 id = (void *) &k->data;
1101 if (id->ediv == ediv &&
1102 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1103 return k;
1106 return NULL;
1108 EXPORT_SYMBOL(hci_find_ltk);
1110 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1111 bdaddr_t *bdaddr, u8 type)
1113 struct link_key *k;
1115 list_for_each_entry(k, &hdev->link_keys, list)
1116 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1117 return k;
1119 return NULL;
1121 EXPORT_SYMBOL(hci_find_link_key_type);
1123 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1124 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1126 struct link_key *key, *old_key;
1127 u8 old_key_type, persistent;
1129 old_key = hci_find_link_key(hdev, bdaddr);
1130 if (old_key) {
1131 old_key_type = old_key->type;
1132 key = old_key;
1133 } else {
1134 old_key_type = conn ? conn->key_type : 0xff;
1135 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1136 if (!key)
1137 return -ENOMEM;
1138 list_add(&key->list, &hdev->link_keys);
1141 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1143 /* Some buggy controller combinations generate a changed
1144 * combination key for legacy pairing even when there's no
1145 * previous key */
1146 if (type == HCI_LK_CHANGED_COMBINATION &&
1147 (!conn || conn->remote_auth == 0xff) &&
1148 old_key_type == 0xff) {
1149 type = HCI_LK_COMBINATION;
1150 if (conn)
1151 conn->key_type = type;
1154 bacpy(&key->bdaddr, bdaddr);
1155 memcpy(key->val, val, 16);
1156 key->pin_len = pin_len;
1158 if (type == HCI_LK_CHANGED_COMBINATION)
1159 key->type = old_key_type;
1160 else
1161 key->type = type;
1163 if (!new_key)
1164 return 0;
1166 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1168 mgmt_new_link_key(hdev, key, persistent);
1170 if (!persistent) {
1171 list_del(&key->list);
1172 kfree(key);
1175 return 0;
1178 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1179 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1181 struct link_key *key, *old_key;
1182 struct key_master_id *id;
1183 u8 old_key_type;
1185 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1187 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1188 if (old_key) {
1189 key = old_key;
1190 old_key_type = old_key->type;
1191 } else {
1192 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1193 if (!key)
1194 return -ENOMEM;
1195 list_add(&key->list, &hdev->link_keys);
1196 old_key_type = 0xff;
1199 key->dlen = sizeof(*id);
1201 bacpy(&key->bdaddr, bdaddr);
1202 memcpy(key->val, ltk, sizeof(key->val));
1203 key->type = HCI_LK_SMP_LTK;
1204 key->pin_len = key_size;
1206 id = (void *) &key->data;
1207 id->ediv = ediv;
1208 memcpy(id->rand, rand, sizeof(id->rand));
1210 if (new_key)
1211 mgmt_new_link_key(hdev, key, old_key_type);
1213 return 0;
1216 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1218 struct link_key *key;
1220 key = hci_find_link_key(hdev, bdaddr);
1221 if (!key)
1222 return -ENOENT;
1224 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1226 list_del(&key->list);
1227 kfree(key);
1229 return 0;
1232 /* HCI command timer function */
1233 static void hci_cmd_timer(unsigned long arg)
1235 struct hci_dev *hdev = (void *) arg;
1237 BT_ERR("%s command tx timeout", hdev->name);
1238 atomic_set(&hdev->cmd_cnt, 1);
1239 queue_work(hdev->workqueue, &hdev->cmd_work);
1242 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1243 bdaddr_t *bdaddr)
1245 struct oob_data *data;
1247 list_for_each_entry(data, &hdev->remote_oob_data, list)
1248 if (bacmp(bdaddr, &data->bdaddr) == 0)
1249 return data;
1251 return NULL;
1254 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1256 struct oob_data *data;
1258 data = hci_find_remote_oob_data(hdev, bdaddr);
1259 if (!data)
1260 return -ENOENT;
1262 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1264 list_del(&data->list);
1265 kfree(data);
1267 return 0;
1270 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1272 struct oob_data *data, *n;
1274 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1275 list_del(&data->list);
1276 kfree(data);
1279 return 0;
1282 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1283 u8 *randomizer)
1285 struct oob_data *data;
1287 data = hci_find_remote_oob_data(hdev, bdaddr);
1289 if (!data) {
1290 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1291 if (!data)
1292 return -ENOMEM;
1294 bacpy(&data->bdaddr, bdaddr);
1295 list_add(&data->list, &hdev->remote_oob_data);
1298 memcpy(data->hash, hash, sizeof(data->hash));
1299 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1301 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1303 return 0;
1306 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1307 bdaddr_t *bdaddr)
1309 struct bdaddr_list *b;
1311 list_for_each_entry(b, &hdev->blacklist, list)
1312 if (bacmp(bdaddr, &b->bdaddr) == 0)
1313 return b;
1315 return NULL;
1318 int hci_blacklist_clear(struct hci_dev *hdev)
1320 struct list_head *p, *n;
1322 list_for_each_safe(p, n, &hdev->blacklist) {
1323 struct bdaddr_list *b;
1325 b = list_entry(p, struct bdaddr_list, list);
1327 list_del(p);
1328 kfree(b);
1331 return 0;
1334 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1336 struct bdaddr_list *entry;
1338 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1339 return -EBADF;
1341 if (hci_blacklist_lookup(hdev, bdaddr))
1342 return -EEXIST;
1344 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1345 if (!entry)
1346 return -ENOMEM;
1348 bacpy(&entry->bdaddr, bdaddr);
1350 list_add(&entry->list, &hdev->blacklist);
1352 return mgmt_device_blocked(hdev, bdaddr);
1355 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1357 struct bdaddr_list *entry;
1359 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1360 return hci_blacklist_clear(hdev);
1362 entry = hci_blacklist_lookup(hdev, bdaddr);
1363 if (!entry)
1364 return -ENOENT;
1366 list_del(&entry->list);
1367 kfree(entry);
1369 return mgmt_device_unblocked(hdev, bdaddr);
1372 static void hci_clear_adv_cache(struct work_struct *work)
1374 struct hci_dev *hdev = container_of(work, struct hci_dev,
1375 adv_work.work);
1377 hci_dev_lock(hdev);
1379 hci_adv_entries_clear(hdev);
1381 hci_dev_unlock(hdev);
1384 int hci_adv_entries_clear(struct hci_dev *hdev)
1386 struct adv_entry *entry, *tmp;
1388 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1389 list_del(&entry->list);
1390 kfree(entry);
1393 BT_DBG("%s adv cache cleared", hdev->name);
1395 return 0;
1398 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1400 struct adv_entry *entry;
1402 list_for_each_entry(entry, &hdev->adv_entries, list)
1403 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1404 return entry;
1406 return NULL;
1409 static inline int is_connectable_adv(u8 evt_type)
1411 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1412 return 1;
1414 return 0;
1417 int hci_add_adv_entry(struct hci_dev *hdev,
1418 struct hci_ev_le_advertising_info *ev)
1420 struct adv_entry *entry;
1422 if (!is_connectable_adv(ev->evt_type))
1423 return -EINVAL;
1425 /* Only new entries should be added to adv_entries. So, if
1426 * bdaddr was found, don't add it. */
1427 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1428 return 0;
1430 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1431 if (!entry)
1432 return -ENOMEM;
1434 bacpy(&entry->bdaddr, &ev->bdaddr);
1435 entry->bdaddr_type = ev->bdaddr_type;
1437 list_add(&entry->list, &hdev->adv_entries);
1439 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1440 batostr(&entry->bdaddr), entry->bdaddr_type);
1442 return 0;
1445 /* Register HCI device */
1446 int hci_register_dev(struct hci_dev *hdev)
1448 struct list_head *head = &hci_dev_list, *p;
1449 int i, id, error;
1451 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1452 hdev->bus, hdev->owner);
1454 if (!hdev->open || !hdev->close || !hdev->destruct)
1455 return -EINVAL;
1457 /* Do not allow HCI_AMP devices to register at index 0,
1458 * so the index can be used as the AMP controller ID.
1460 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1462 write_lock(&hci_dev_list_lock);
1464 /* Find first available device id */
1465 list_for_each(p, &hci_dev_list) {
1466 if (list_entry(p, struct hci_dev, list)->id != id)
1467 break;
1468 head = p; id++;
1471 sprintf(hdev->name, "hci%d", id);
1472 hdev->id = id;
1473 list_add_tail(&hdev->list, head);
1475 atomic_set(&hdev->refcnt, 1);
1476 mutex_init(&hdev->lock);
1478 hdev->flags = 0;
1479 hdev->dev_flags = 0;
1480 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1481 hdev->esco_type = (ESCO_HV1);
1482 hdev->link_mode = (HCI_LM_ACCEPT);
1483 hdev->io_capability = 0x03; /* No Input No Output */
1485 hdev->idle_timeout = 0;
1486 hdev->sniff_max_interval = 800;
1487 hdev->sniff_min_interval = 80;
1489 INIT_WORK(&hdev->rx_work, hci_rx_work);
1490 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1491 INIT_WORK(&hdev->tx_work, hci_tx_work);
1494 skb_queue_head_init(&hdev->rx_q);
1495 skb_queue_head_init(&hdev->cmd_q);
1496 skb_queue_head_init(&hdev->raw_q);
1498 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1500 for (i = 0; i < NUM_REASSEMBLY; i++)
1501 hdev->reassembly[i] = NULL;
1503 init_waitqueue_head(&hdev->req_wait_q);
1504 mutex_init(&hdev->req_lock);
1506 inquiry_cache_init(hdev);
1508 hci_conn_hash_init(hdev);
1510 INIT_LIST_HEAD(&hdev->mgmt_pending);
1512 INIT_LIST_HEAD(&hdev->blacklist);
1514 INIT_LIST_HEAD(&hdev->uuids);
1516 INIT_LIST_HEAD(&hdev->link_keys);
1518 INIT_LIST_HEAD(&hdev->remote_oob_data);
1520 INIT_LIST_HEAD(&hdev->adv_entries);
1522 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1523 INIT_WORK(&hdev->power_on, hci_power_on);
1524 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1526 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1528 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1530 atomic_set(&hdev->promisc, 0);
1532 write_unlock(&hci_dev_list_lock);
1534 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1535 WQ_MEM_RECLAIM, 1);
1536 if (!hdev->workqueue) {
1537 error = -ENOMEM;
1538 goto err;
1541 error = hci_add_sysfs(hdev);
1542 if (error < 0)
1543 goto err_wqueue;
1545 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1546 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1547 if (hdev->rfkill) {
1548 if (rfkill_register(hdev->rfkill) < 0) {
1549 rfkill_destroy(hdev->rfkill);
1550 hdev->rfkill = NULL;
1554 set_bit(HCI_AUTO_OFF, &hdev->flags);
1555 set_bit(HCI_SETUP, &hdev->flags);
1556 schedule_work(&hdev->power_on);
1558 hci_notify(hdev, HCI_DEV_REG);
1560 return id;
1562 err_wqueue:
1563 destroy_workqueue(hdev->workqueue);
1564 err:
1565 write_lock(&hci_dev_list_lock);
1566 list_del(&hdev->list);
1567 write_unlock(&hci_dev_list_lock);
1569 return error;
1571 EXPORT_SYMBOL(hci_register_dev);
1573 /* Unregister HCI device */
1574 void hci_unregister_dev(struct hci_dev *hdev)
1576 int i;
1578 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1580 write_lock(&hci_dev_list_lock);
1581 list_del(&hdev->list);
1582 write_unlock(&hci_dev_list_lock);
1584 hci_dev_do_close(hdev);
1586 for (i = 0; i < NUM_REASSEMBLY; i++)
1587 kfree_skb(hdev->reassembly[i]);
1589 if (!test_bit(HCI_INIT, &hdev->flags) &&
1590 !test_bit(HCI_SETUP, &hdev->flags)) {
1591 hci_dev_lock(hdev);
1592 mgmt_index_removed(hdev);
1593 hci_dev_unlock(hdev);
1596 /* mgmt_index_removed should take care of emptying the
1597 * pending list */
1598 BUG_ON(!list_empty(&hdev->mgmt_pending));
1600 hci_notify(hdev, HCI_DEV_UNREG);
1602 if (hdev->rfkill) {
1603 rfkill_unregister(hdev->rfkill);
1604 rfkill_destroy(hdev->rfkill);
1607 hci_del_sysfs(hdev);
1609 cancel_delayed_work_sync(&hdev->adv_work);
1611 destroy_workqueue(hdev->workqueue);
1613 hci_dev_lock(hdev);
1614 hci_blacklist_clear(hdev);
1615 hci_uuids_clear(hdev);
1616 hci_link_keys_clear(hdev);
1617 hci_remote_oob_data_clear(hdev);
1618 hci_adv_entries_clear(hdev);
1619 hci_dev_unlock(hdev);
1621 __hci_dev_put(hdev);
1623 EXPORT_SYMBOL(hci_unregister_dev);
1625 /* Suspend HCI device */
1626 int hci_suspend_dev(struct hci_dev *hdev)
1628 hci_notify(hdev, HCI_DEV_SUSPEND);
1629 return 0;
1631 EXPORT_SYMBOL(hci_suspend_dev);
1633 /* Resume HCI device */
1634 int hci_resume_dev(struct hci_dev *hdev)
1636 hci_notify(hdev, HCI_DEV_RESUME);
1637 return 0;
1639 EXPORT_SYMBOL(hci_resume_dev);
1641 /* Receive frame from HCI drivers */
1642 int hci_recv_frame(struct sk_buff *skb)
1644 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1645 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1646 && !test_bit(HCI_INIT, &hdev->flags))) {
1647 kfree_skb(skb);
1648 return -ENXIO;
1651 /* Incomming skb */
1652 bt_cb(skb)->incoming = 1;
1654 /* Time stamp */
1655 __net_timestamp(skb);
1657 skb_queue_tail(&hdev->rx_q, skb);
1658 queue_work(hdev->workqueue, &hdev->rx_work);
1660 return 0;
1662 EXPORT_SYMBOL(hci_recv_frame);
1664 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1665 int count, __u8 index)
1667 int len = 0;
1668 int hlen = 0;
1669 int remain = count;
1670 struct sk_buff *skb;
1671 struct bt_skb_cb *scb;
1673 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1674 index >= NUM_REASSEMBLY)
1675 return -EILSEQ;
1677 skb = hdev->reassembly[index];
1679 if (!skb) {
1680 switch (type) {
1681 case HCI_ACLDATA_PKT:
1682 len = HCI_MAX_FRAME_SIZE;
1683 hlen = HCI_ACL_HDR_SIZE;
1684 break;
1685 case HCI_EVENT_PKT:
1686 len = HCI_MAX_EVENT_SIZE;
1687 hlen = HCI_EVENT_HDR_SIZE;
1688 break;
1689 case HCI_SCODATA_PKT:
1690 len = HCI_MAX_SCO_SIZE;
1691 hlen = HCI_SCO_HDR_SIZE;
1692 break;
1695 skb = bt_skb_alloc(len, GFP_ATOMIC);
1696 if (!skb)
1697 return -ENOMEM;
1699 scb = (void *) skb->cb;
1700 scb->expect = hlen;
1701 scb->pkt_type = type;
1703 skb->dev = (void *) hdev;
1704 hdev->reassembly[index] = skb;
1707 while (count) {
1708 scb = (void *) skb->cb;
1709 len = min(scb->expect, (__u16)count);
1711 memcpy(skb_put(skb, len), data, len);
1713 count -= len;
1714 data += len;
1715 scb->expect -= len;
1716 remain = count;
1718 switch (type) {
1719 case HCI_EVENT_PKT:
1720 if (skb->len == HCI_EVENT_HDR_SIZE) {
1721 struct hci_event_hdr *h = hci_event_hdr(skb);
1722 scb->expect = h->plen;
1724 if (skb_tailroom(skb) < scb->expect) {
1725 kfree_skb(skb);
1726 hdev->reassembly[index] = NULL;
1727 return -ENOMEM;
1730 break;
1732 case HCI_ACLDATA_PKT:
1733 if (skb->len == HCI_ACL_HDR_SIZE) {
1734 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1735 scb->expect = __le16_to_cpu(h->dlen);
1737 if (skb_tailroom(skb) < scb->expect) {
1738 kfree_skb(skb);
1739 hdev->reassembly[index] = NULL;
1740 return -ENOMEM;
1743 break;
1745 case HCI_SCODATA_PKT:
1746 if (skb->len == HCI_SCO_HDR_SIZE) {
1747 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1748 scb->expect = h->dlen;
1750 if (skb_tailroom(skb) < scb->expect) {
1751 kfree_skb(skb);
1752 hdev->reassembly[index] = NULL;
1753 return -ENOMEM;
1756 break;
1759 if (scb->expect == 0) {
1760 /* Complete frame */
1762 bt_cb(skb)->pkt_type = type;
1763 hci_recv_frame(skb);
1765 hdev->reassembly[index] = NULL;
1766 return remain;
1770 return remain;
1773 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1775 int rem = 0;
1777 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1778 return -EILSEQ;
1780 while (count) {
1781 rem = hci_reassembly(hdev, type, data, count, type - 1);
1782 if (rem < 0)
1783 return rem;
1785 data += (count - rem);
1786 count = rem;
1789 return rem;
1791 EXPORT_SYMBOL(hci_recv_fragment);
1793 #define STREAM_REASSEMBLY 0
1795 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1797 int type;
1798 int rem = 0;
1800 while (count) {
1801 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1803 if (!skb) {
1804 struct { char type; } *pkt;
1806 /* Start of the frame */
1807 pkt = data;
1808 type = pkt->type;
1810 data++;
1811 count--;
1812 } else
1813 type = bt_cb(skb)->pkt_type;
1815 rem = hci_reassembly(hdev, type, data, count,
1816 STREAM_REASSEMBLY);
1817 if (rem < 0)
1818 return rem;
1820 data += (count - rem);
1821 count = rem;
1824 return rem;
1826 EXPORT_SYMBOL(hci_recv_stream_fragment);
1828 /* ---- Interface to upper protocols ---- */
1830 int hci_register_cb(struct hci_cb *cb)
1832 BT_DBG("%p name %s", cb, cb->name);
1834 write_lock(&hci_cb_list_lock);
1835 list_add(&cb->list, &hci_cb_list);
1836 write_unlock(&hci_cb_list_lock);
1838 return 0;
1840 EXPORT_SYMBOL(hci_register_cb);
1842 int hci_unregister_cb(struct hci_cb *cb)
1844 BT_DBG("%p name %s", cb, cb->name);
1846 write_lock(&hci_cb_list_lock);
1847 list_del(&cb->list);
1848 write_unlock(&hci_cb_list_lock);
1850 return 0;
1852 EXPORT_SYMBOL(hci_unregister_cb);
1854 static int hci_send_frame(struct sk_buff *skb)
1856 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1858 if (!hdev) {
1859 kfree_skb(skb);
1860 return -ENODEV;
1863 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1865 if (atomic_read(&hdev->promisc)) {
1866 /* Time stamp */
1867 __net_timestamp(skb);
1869 hci_send_to_sock(hdev, skb, NULL);
1872 /* Get rid of skb owner, prior to sending to the driver. */
1873 skb_orphan(skb);
1875 return hdev->send(skb);
1878 /* Send HCI command */
1879 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1881 int len = HCI_COMMAND_HDR_SIZE + plen;
1882 struct hci_command_hdr *hdr;
1883 struct sk_buff *skb;
1885 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1887 skb = bt_skb_alloc(len, GFP_ATOMIC);
1888 if (!skb) {
1889 BT_ERR("%s no memory for command", hdev->name);
1890 return -ENOMEM;
1893 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1894 hdr->opcode = cpu_to_le16(opcode);
1895 hdr->plen = plen;
1897 if (plen)
1898 memcpy(skb_put(skb, plen), param, plen);
1900 BT_DBG("skb len %d", skb->len);
1902 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1903 skb->dev = (void *) hdev;
1905 if (test_bit(HCI_INIT, &hdev->flags))
1906 hdev->init_last_cmd = opcode;
1908 skb_queue_tail(&hdev->cmd_q, skb);
1909 queue_work(hdev->workqueue, &hdev->cmd_work);
1911 return 0;
1914 /* Get data from the previously sent command */
1915 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1917 struct hci_command_hdr *hdr;
1919 if (!hdev->sent_cmd)
1920 return NULL;
1922 hdr = (void *) hdev->sent_cmd->data;
1924 if (hdr->opcode != cpu_to_le16(opcode))
1925 return NULL;
1927 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1929 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1932 /* Send ACL data */
1933 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1935 struct hci_acl_hdr *hdr;
1936 int len = skb->len;
1938 skb_push(skb, HCI_ACL_HDR_SIZE);
1939 skb_reset_transport_header(skb);
1940 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1941 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1942 hdr->dlen = cpu_to_le16(len);
1945 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1946 struct sk_buff *skb, __u16 flags)
1948 struct hci_dev *hdev = conn->hdev;
1949 struct sk_buff *list;
1951 list = skb_shinfo(skb)->frag_list;
1952 if (!list) {
1953 /* Non fragmented */
1954 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1956 skb_queue_tail(queue, skb);
1957 } else {
1958 /* Fragmented */
1959 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1961 skb_shinfo(skb)->frag_list = NULL;
1963 /* Queue all fragments atomically */
1964 spin_lock(&queue->lock);
1966 __skb_queue_tail(queue, skb);
1968 flags &= ~ACL_START;
1969 flags |= ACL_CONT;
1970 do {
1971 skb = list; list = list->next;
1973 skb->dev = (void *) hdev;
1974 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1975 hci_add_acl_hdr(skb, conn->handle, flags);
1977 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1979 __skb_queue_tail(queue, skb);
1980 } while (list);
1982 spin_unlock(&queue->lock);
1986 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
1988 struct hci_conn *conn = chan->conn;
1989 struct hci_dev *hdev = conn->hdev;
1991 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
1993 skb->dev = (void *) hdev;
1994 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1995 hci_add_acl_hdr(skb, conn->handle, flags);
1997 hci_queue_acl(conn, &chan->data_q, skb, flags);
1999 queue_work(hdev->workqueue, &hdev->tx_work);
2001 EXPORT_SYMBOL(hci_send_acl);
2003 /* Send SCO data */
2004 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2006 struct hci_dev *hdev = conn->hdev;
2007 struct hci_sco_hdr hdr;
2009 BT_DBG("%s len %d", hdev->name, skb->len);
2011 hdr.handle = cpu_to_le16(conn->handle);
2012 hdr.dlen = skb->len;
2014 skb_push(skb, HCI_SCO_HDR_SIZE);
2015 skb_reset_transport_header(skb);
2016 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2018 skb->dev = (void *) hdev;
2019 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2021 skb_queue_tail(&conn->data_q, skb);
2022 queue_work(hdev->workqueue, &hdev->tx_work);
2024 EXPORT_SYMBOL(hci_send_sco);
2026 /* ---- HCI TX task (outgoing data) ---- */
2028 /* HCI Connection scheduler */
2029 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2031 struct hci_conn_hash *h = &hdev->conn_hash;
2032 struct hci_conn *conn = NULL, *c;
2033 int num = 0, min = ~0;
2035 /* We don't have to lock device here. Connections are always
2036 * added and removed with TX task disabled. */
2038 rcu_read_lock();
2040 list_for_each_entry_rcu(c, &h->list, list) {
2041 if (c->type != type || skb_queue_empty(&c->data_q))
2042 continue;
2044 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2045 continue;
2047 num++;
2049 if (c->sent < min) {
2050 min = c->sent;
2051 conn = c;
2054 if (hci_conn_num(hdev, type) == num)
2055 break;
2058 rcu_read_unlock();
2060 if (conn) {
2061 int cnt, q;
2063 switch (conn->type) {
2064 case ACL_LINK:
2065 cnt = hdev->acl_cnt;
2066 break;
2067 case SCO_LINK:
2068 case ESCO_LINK:
2069 cnt = hdev->sco_cnt;
2070 break;
2071 case LE_LINK:
2072 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2073 break;
2074 default:
2075 cnt = 0;
2076 BT_ERR("Unknown link type");
2079 q = cnt / num;
2080 *quote = q ? q : 1;
2081 } else
2082 *quote = 0;
2084 BT_DBG("conn %p quote %d", conn, *quote);
2085 return conn;
2088 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2090 struct hci_conn_hash *h = &hdev->conn_hash;
2091 struct hci_conn *c;
2093 BT_ERR("%s link tx timeout", hdev->name);
2095 rcu_read_lock();
2097 /* Kill stalled connections */
2098 list_for_each_entry_rcu(c, &h->list, list) {
2099 if (c->type == type && c->sent) {
2100 BT_ERR("%s killing stalled connection %s",
2101 hdev->name, batostr(&c->dst));
2102 hci_acl_disconn(c, 0x13);
2106 rcu_read_unlock();
2109 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2110 int *quote)
2112 struct hci_conn_hash *h = &hdev->conn_hash;
2113 struct hci_chan *chan = NULL;
2114 int num = 0, min = ~0, cur_prio = 0;
2115 struct hci_conn *conn;
2116 int cnt, q, conn_num = 0;
2118 BT_DBG("%s", hdev->name);
2120 rcu_read_lock();
2122 list_for_each_entry_rcu(conn, &h->list, list) {
2123 struct hci_chan *tmp;
2125 if (conn->type != type)
2126 continue;
2128 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2129 continue;
2131 conn_num++;
2133 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2134 struct sk_buff *skb;
2136 if (skb_queue_empty(&tmp->data_q))
2137 continue;
2139 skb = skb_peek(&tmp->data_q);
2140 if (skb->priority < cur_prio)
2141 continue;
2143 if (skb->priority > cur_prio) {
2144 num = 0;
2145 min = ~0;
2146 cur_prio = skb->priority;
2149 num++;
2151 if (conn->sent < min) {
2152 min = conn->sent;
2153 chan = tmp;
2157 if (hci_conn_num(hdev, type) == conn_num)
2158 break;
2161 rcu_read_unlock();
2163 if (!chan)
2164 return NULL;
2166 switch (chan->conn->type) {
2167 case ACL_LINK:
2168 cnt = hdev->acl_cnt;
2169 break;
2170 case SCO_LINK:
2171 case ESCO_LINK:
2172 cnt = hdev->sco_cnt;
2173 break;
2174 case LE_LINK:
2175 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2176 break;
2177 default:
2178 cnt = 0;
2179 BT_ERR("Unknown link type");
2182 q = cnt / num;
2183 *quote = q ? q : 1;
2184 BT_DBG("chan %p quote %d", chan, *quote);
2185 return chan;
2188 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2190 struct hci_conn_hash *h = &hdev->conn_hash;
2191 struct hci_conn *conn;
2192 int num = 0;
2194 BT_DBG("%s", hdev->name);
2196 rcu_read_lock();
2198 list_for_each_entry_rcu(conn, &h->list, list) {
2199 struct hci_chan *chan;
2201 if (conn->type != type)
2202 continue;
2204 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2205 continue;
2207 num++;
2209 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2210 struct sk_buff *skb;
2212 if (chan->sent) {
2213 chan->sent = 0;
2214 continue;
2217 if (skb_queue_empty(&chan->data_q))
2218 continue;
2220 skb = skb_peek(&chan->data_q);
2221 if (skb->priority >= HCI_PRIO_MAX - 1)
2222 continue;
2224 skb->priority = HCI_PRIO_MAX - 1;
2226 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2227 skb->priority);
2230 if (hci_conn_num(hdev, type) == num)
2231 break;
2234 rcu_read_unlock();
2238 static inline void hci_sched_acl(struct hci_dev *hdev)
2240 struct hci_chan *chan;
2241 struct sk_buff *skb;
2242 int quote;
2243 unsigned int cnt;
2245 BT_DBG("%s", hdev->name);
2247 if (!hci_conn_num(hdev, ACL_LINK))
2248 return;
2250 if (!test_bit(HCI_RAW, &hdev->flags)) {
2251 /* ACL tx timeout must be longer than maximum
2252 * link supervision timeout (40.9 seconds) */
2253 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2254 hci_link_tx_to(hdev, ACL_LINK);
2257 cnt = hdev->acl_cnt;
2259 while (hdev->acl_cnt &&
2260 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2261 u32 priority = (skb_peek(&chan->data_q))->priority;
2262 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2263 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2264 skb->len, skb->priority);
2266 /* Stop if priority has changed */
2267 if (skb->priority < priority)
2268 break;
2270 skb = skb_dequeue(&chan->data_q);
2272 hci_conn_enter_active_mode(chan->conn,
2273 bt_cb(skb)->force_active);
2275 hci_send_frame(skb);
2276 hdev->acl_last_tx = jiffies;
2278 hdev->acl_cnt--;
2279 chan->sent++;
2280 chan->conn->sent++;
2284 if (cnt != hdev->acl_cnt)
2285 hci_prio_recalculate(hdev, ACL_LINK);
2288 /* Schedule SCO */
2289 static inline void hci_sched_sco(struct hci_dev *hdev)
2291 struct hci_conn *conn;
2292 struct sk_buff *skb;
2293 int quote;
2295 BT_DBG("%s", hdev->name);
2297 if (!hci_conn_num(hdev, SCO_LINK))
2298 return;
2300 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2301 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2302 BT_DBG("skb %p len %d", skb, skb->len);
2303 hci_send_frame(skb);
2305 conn->sent++;
2306 if (conn->sent == ~0)
2307 conn->sent = 0;
2312 static inline void hci_sched_esco(struct hci_dev *hdev)
2314 struct hci_conn *conn;
2315 struct sk_buff *skb;
2316 int quote;
2318 BT_DBG("%s", hdev->name);
2320 if (!hci_conn_num(hdev, ESCO_LINK))
2321 return;
2323 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2324 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2325 BT_DBG("skb %p len %d", skb, skb->len);
2326 hci_send_frame(skb);
2328 conn->sent++;
2329 if (conn->sent == ~0)
2330 conn->sent = 0;
2335 static inline void hci_sched_le(struct hci_dev *hdev)
2337 struct hci_chan *chan;
2338 struct sk_buff *skb;
2339 int quote, cnt, tmp;
2341 BT_DBG("%s", hdev->name);
2343 if (!hci_conn_num(hdev, LE_LINK))
2344 return;
2346 if (!test_bit(HCI_RAW, &hdev->flags)) {
2347 /* LE tx timeout must be longer than maximum
2348 * link supervision timeout (40.9 seconds) */
2349 if (!hdev->le_cnt && hdev->le_pkts &&
2350 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2351 hci_link_tx_to(hdev, LE_LINK);
2354 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2355 tmp = cnt;
2356 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2357 u32 priority = (skb_peek(&chan->data_q))->priority;
2358 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2359 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2360 skb->len, skb->priority);
2362 /* Stop if priority has changed */
2363 if (skb->priority < priority)
2364 break;
2366 skb = skb_dequeue(&chan->data_q);
2368 hci_send_frame(skb);
2369 hdev->le_last_tx = jiffies;
2371 cnt--;
2372 chan->sent++;
2373 chan->conn->sent++;
2377 if (hdev->le_pkts)
2378 hdev->le_cnt = cnt;
2379 else
2380 hdev->acl_cnt = cnt;
2382 if (cnt != tmp)
2383 hci_prio_recalculate(hdev, LE_LINK);
2386 static void hci_tx_work(struct work_struct *work)
2388 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2389 struct sk_buff *skb;
2391 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2392 hdev->sco_cnt, hdev->le_cnt);
2394 /* Schedule queues and send stuff to HCI driver */
2396 hci_sched_acl(hdev);
2398 hci_sched_sco(hdev);
2400 hci_sched_esco(hdev);
2402 hci_sched_le(hdev);
2404 /* Send next queued raw (unknown type) packet */
2405 while ((skb = skb_dequeue(&hdev->raw_q)))
2406 hci_send_frame(skb);
2409 /* ----- HCI RX task (incoming data processing) ----- */
2411 /* ACL data packet */
2412 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2414 struct hci_acl_hdr *hdr = (void *) skb->data;
2415 struct hci_conn *conn;
2416 __u16 handle, flags;
2418 skb_pull(skb, HCI_ACL_HDR_SIZE);
2420 handle = __le16_to_cpu(hdr->handle);
2421 flags = hci_flags(handle);
2422 handle = hci_handle(handle);
2424 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2426 hdev->stat.acl_rx++;
2428 hci_dev_lock(hdev);
2429 conn = hci_conn_hash_lookup_handle(hdev, handle);
2430 hci_dev_unlock(hdev);
2432 if (conn) {
2433 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2435 /* Send to upper protocol */
2436 l2cap_recv_acldata(conn, skb, flags);
2437 return;
2438 } else {
2439 BT_ERR("%s ACL packet for unknown connection handle %d",
2440 hdev->name, handle);
2443 kfree_skb(skb);
2446 /* SCO data packet */
2447 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2449 struct hci_sco_hdr *hdr = (void *) skb->data;
2450 struct hci_conn *conn;
2451 __u16 handle;
2453 skb_pull(skb, HCI_SCO_HDR_SIZE);
2455 handle = __le16_to_cpu(hdr->handle);
2457 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2459 hdev->stat.sco_rx++;
2461 hci_dev_lock(hdev);
2462 conn = hci_conn_hash_lookup_handle(hdev, handle);
2463 hci_dev_unlock(hdev);
2465 if (conn) {
2466 /* Send to upper protocol */
2467 sco_recv_scodata(conn, skb);
2468 return;
2469 } else {
2470 BT_ERR("%s SCO packet for unknown connection handle %d",
2471 hdev->name, handle);
2474 kfree_skb(skb);
2477 static void hci_rx_work(struct work_struct *work)
2479 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2480 struct sk_buff *skb;
2482 BT_DBG("%s", hdev->name);
2484 while ((skb = skb_dequeue(&hdev->rx_q))) {
2485 if (atomic_read(&hdev->promisc)) {
2486 /* Send copy to the sockets */
2487 hci_send_to_sock(hdev, skb, NULL);
2490 if (test_bit(HCI_RAW, &hdev->flags)) {
2491 kfree_skb(skb);
2492 continue;
2495 if (test_bit(HCI_INIT, &hdev->flags)) {
2496 /* Don't process data packets in this states. */
2497 switch (bt_cb(skb)->pkt_type) {
2498 case HCI_ACLDATA_PKT:
2499 case HCI_SCODATA_PKT:
2500 kfree_skb(skb);
2501 continue;
2505 /* Process frame */
2506 switch (bt_cb(skb)->pkt_type) {
2507 case HCI_EVENT_PKT:
2508 BT_DBG("%s Event packet", hdev->name);
2509 hci_event_packet(hdev, skb);
2510 break;
2512 case HCI_ACLDATA_PKT:
2513 BT_DBG("%s ACL data packet", hdev->name);
2514 hci_acldata_packet(hdev, skb);
2515 break;
2517 case HCI_SCODATA_PKT:
2518 BT_DBG("%s SCO data packet", hdev->name);
2519 hci_scodata_packet(hdev, skb);
2520 break;
2522 default:
2523 kfree_skb(skb);
2524 break;
2529 static void hci_cmd_work(struct work_struct *work)
2531 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2532 struct sk_buff *skb;
2534 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2536 /* Send queued commands */
2537 if (atomic_read(&hdev->cmd_cnt)) {
2538 skb = skb_dequeue(&hdev->cmd_q);
2539 if (!skb)
2540 return;
2542 kfree_skb(hdev->sent_cmd);
2544 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2545 if (hdev->sent_cmd) {
2546 atomic_dec(&hdev->cmd_cnt);
2547 hci_send_frame(skb);
2548 if (test_bit(HCI_RESET, &hdev->flags))
2549 del_timer(&hdev->cmd_timer);
2550 else
2551 mod_timer(&hdev->cmd_timer,
2552 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2553 } else {
2554 skb_queue_head(&hdev->cmd_q, skb);
2555 queue_work(hdev->workqueue, &hdev->cmd_work);
2560 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2562 /* General inquiry access code (GIAC) */
2563 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2564 struct hci_cp_inquiry cp;
2566 BT_DBG("%s", hdev->name);
2568 if (test_bit(HCI_INQUIRY, &hdev->flags))
2569 return -EINPROGRESS;
2571 memset(&cp, 0, sizeof(cp));
2572 memcpy(&cp.lap, lap, sizeof(cp.lap));
2573 cp.length = length;
2575 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2578 int hci_cancel_inquiry(struct hci_dev *hdev)
2580 BT_DBG("%s", hdev->name);
2582 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2583 return -EPERM;
2585 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2588 module_param(enable_hs, bool, 0644);
2589 MODULE_PARM_DESC(enable_hs, "Enable High Speed");