Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux-2.6.git] / net / bluetooth / hci_core.c
blobf01e5e135b998ee853c2bbddfec528ebcfc6f84b
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
67 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
69 u16 opcode = __le16_to_cpu(sent->opcode);
70 struct sk_buff *skb;
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
79 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80 return;
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
88 return;
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
98 static void hci_req_cancel(struct hci_dev *hdev, int err)
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
109 /* Execute request and wait for completion. */
110 static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
117 BT_DBG("%s start", hdev->name);
119 hdev->req_status = HCI_REQ_PEND;
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
124 req(hdev, opt);
125 schedule_timeout(timeout);
127 remove_wait_queue(&hdev->req_wait_q, &wait);
129 if (signal_pending(current))
130 return -EINTR;
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
134 err = -bt_to_errno(hdev->req_result);
135 break;
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
141 default:
142 err = -ETIMEDOUT;
143 break;
146 hdev->req_status = hdev->req_result = 0;
148 BT_DBG("%s end: err %d", hdev->name, err);
150 return err;
153 static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
157 int ret;
159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
162 /* Serialize all requests */
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
167 return ret;
170 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
172 BT_DBG("%s %ld", hdev->name, opt);
174 /* Reset device */
175 set_bit(HCI_RESET, &hdev->flags);
176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
179 static void bredr_init(struct hci_dev *hdev)
181 struct hci_cp_delete_stored_link_key cp;
182 __le16 param;
183 __u8 flt_type;
185 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
187 /* Mandatory initialization */
189 /* Read Local Supported Features */
190 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
192 /* Read Local Version */
193 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
195 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
196 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
198 /* Read BD Address */
199 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
201 /* Read Class of Device */
202 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
204 /* Read Local Name */
205 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
207 /* Read Voice Setting */
208 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
210 /* Optional initialization */
212 /* Clear Event Filters */
213 flt_type = HCI_FLT_CLEAR_ALL;
214 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
216 /* Connection accept timeout ~20 secs */
217 param = __constant_cpu_to_le16(0x7d00);
218 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
220 bacpy(&cp.bdaddr, BDADDR_ANY);
221 cp.delete_all = 1;
222 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
225 static void amp_init(struct hci_dev *hdev)
227 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
229 /* Read Local Version */
230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
232 /* Read Local AMP Info */
233 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
235 /* Read Data Blk size */
236 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
239 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
241 struct sk_buff *skb;
243 BT_DBG("%s %ld", hdev->name, opt);
245 /* Driver initialization */
247 /* Special commands */
248 while ((skb = skb_dequeue(&hdev->driver_init))) {
249 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
250 skb->dev = (void *) hdev;
252 skb_queue_tail(&hdev->cmd_q, skb);
253 queue_work(hdev->workqueue, &hdev->cmd_work);
255 skb_queue_purge(&hdev->driver_init);
257 /* Reset */
258 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
259 hci_reset_req(hdev, 0);
261 switch (hdev->dev_type) {
262 case HCI_BREDR:
263 bredr_init(hdev);
264 break;
266 case HCI_AMP:
267 amp_init(hdev);
268 break;
270 default:
271 BT_ERR("Unknown device type %d", hdev->dev_type);
272 break;
276 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
278 BT_DBG("%s", hdev->name);
280 /* Read LE buffer size */
281 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
284 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
286 __u8 scan = opt;
288 BT_DBG("%s %x", hdev->name, scan);
290 /* Inquiry and Page scans */
291 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
294 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
296 __u8 auth = opt;
298 BT_DBG("%s %x", hdev->name, auth);
300 /* Authentication */
301 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
304 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
306 __u8 encrypt = opt;
308 BT_DBG("%s %x", hdev->name, encrypt);
310 /* Encryption */
311 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
314 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
316 __le16 policy = cpu_to_le16(opt);
318 BT_DBG("%s %x", hdev->name, policy);
320 /* Default link policy */
321 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
324 /* Get HCI device by index.
325 * Device is held on return. */
326 struct hci_dev *hci_dev_get(int index)
328 struct hci_dev *hdev = NULL, *d;
330 BT_DBG("%d", index);
332 if (index < 0)
333 return NULL;
335 read_lock(&hci_dev_list_lock);
336 list_for_each_entry(d, &hci_dev_list, list) {
337 if (d->id == index) {
338 hdev = hci_dev_hold(d);
339 break;
342 read_unlock(&hci_dev_list_lock);
343 return hdev;
346 /* ---- Inquiry support ---- */
348 bool hci_discovery_active(struct hci_dev *hdev)
350 struct discovery_state *discov = &hdev->discovery;
352 switch (discov->state) {
353 case DISCOVERY_FINDING:
354 case DISCOVERY_RESOLVING:
355 return true;
357 default:
358 return false;
362 void hci_discovery_set_state(struct hci_dev *hdev, int state)
364 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
366 if (hdev->discovery.state == state)
367 return;
369 switch (state) {
370 case DISCOVERY_STOPPED:
371 if (hdev->discovery.state != DISCOVERY_STARTING)
372 mgmt_discovering(hdev, 0);
373 break;
374 case DISCOVERY_STARTING:
375 break;
376 case DISCOVERY_FINDING:
377 mgmt_discovering(hdev, 1);
378 break;
379 case DISCOVERY_RESOLVING:
380 break;
381 case DISCOVERY_STOPPING:
382 break;
385 hdev->discovery.state = state;
388 static void inquiry_cache_flush(struct hci_dev *hdev)
390 struct discovery_state *cache = &hdev->discovery;
391 struct inquiry_entry *p, *n;
393 list_for_each_entry_safe(p, n, &cache->all, all) {
394 list_del(&p->all);
395 kfree(p);
398 INIT_LIST_HEAD(&cache->unknown);
399 INIT_LIST_HEAD(&cache->resolve);
402 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
403 bdaddr_t *bdaddr)
405 struct discovery_state *cache = &hdev->discovery;
406 struct inquiry_entry *e;
408 BT_DBG("cache %p, %pMR", cache, bdaddr);
410 list_for_each_entry(e, &cache->all, all) {
411 if (!bacmp(&e->data.bdaddr, bdaddr))
412 return e;
415 return NULL;
418 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
419 bdaddr_t *bdaddr)
421 struct discovery_state *cache = &hdev->discovery;
422 struct inquiry_entry *e;
424 BT_DBG("cache %p, %pMR", cache, bdaddr);
426 list_for_each_entry(e, &cache->unknown, list) {
427 if (!bacmp(&e->data.bdaddr, bdaddr))
428 return e;
431 return NULL;
434 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
435 bdaddr_t *bdaddr,
436 int state)
438 struct discovery_state *cache = &hdev->discovery;
439 struct inquiry_entry *e;
441 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
443 list_for_each_entry(e, &cache->resolve, list) {
444 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
445 return e;
446 if (!bacmp(&e->data.bdaddr, bdaddr))
447 return e;
450 return NULL;
453 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
454 struct inquiry_entry *ie)
456 struct discovery_state *cache = &hdev->discovery;
457 struct list_head *pos = &cache->resolve;
458 struct inquiry_entry *p;
460 list_del(&ie->list);
462 list_for_each_entry(p, &cache->resolve, list) {
463 if (p->name_state != NAME_PENDING &&
464 abs(p->data.rssi) >= abs(ie->data.rssi))
465 break;
466 pos = &p->list;
469 list_add(&ie->list, pos);
472 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
473 bool name_known, bool *ssp)
475 struct discovery_state *cache = &hdev->discovery;
476 struct inquiry_entry *ie;
478 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
480 if (ssp)
481 *ssp = data->ssp_mode;
483 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
484 if (ie) {
485 if (ie->data.ssp_mode && ssp)
486 *ssp = true;
488 if (ie->name_state == NAME_NEEDED &&
489 data->rssi != ie->data.rssi) {
490 ie->data.rssi = data->rssi;
491 hci_inquiry_cache_update_resolve(hdev, ie);
494 goto update;
497 /* Entry not in the cache. Add new one. */
498 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
499 if (!ie)
500 return false;
502 list_add(&ie->all, &cache->all);
504 if (name_known) {
505 ie->name_state = NAME_KNOWN;
506 } else {
507 ie->name_state = NAME_NOT_KNOWN;
508 list_add(&ie->list, &cache->unknown);
511 update:
512 if (name_known && ie->name_state != NAME_KNOWN &&
513 ie->name_state != NAME_PENDING) {
514 ie->name_state = NAME_KNOWN;
515 list_del(&ie->list);
518 memcpy(&ie->data, data, sizeof(*data));
519 ie->timestamp = jiffies;
520 cache->timestamp = jiffies;
522 if (ie->name_state == NAME_NOT_KNOWN)
523 return false;
525 return true;
528 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
530 struct discovery_state *cache = &hdev->discovery;
531 struct inquiry_info *info = (struct inquiry_info *) buf;
532 struct inquiry_entry *e;
533 int copied = 0;
535 list_for_each_entry(e, &cache->all, all) {
536 struct inquiry_data *data = &e->data;
538 if (copied >= num)
539 break;
541 bacpy(&info->bdaddr, &data->bdaddr);
542 info->pscan_rep_mode = data->pscan_rep_mode;
543 info->pscan_period_mode = data->pscan_period_mode;
544 info->pscan_mode = data->pscan_mode;
545 memcpy(info->dev_class, data->dev_class, 3);
546 info->clock_offset = data->clock_offset;
548 info++;
549 copied++;
552 BT_DBG("cache %p, copied %d", cache, copied);
553 return copied;
556 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
558 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
559 struct hci_cp_inquiry cp;
561 BT_DBG("%s", hdev->name);
563 if (test_bit(HCI_INQUIRY, &hdev->flags))
564 return;
566 /* Start Inquiry */
567 memcpy(&cp.lap, &ir->lap, 3);
568 cp.length = ir->length;
569 cp.num_rsp = ir->num_rsp;
570 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
573 int hci_inquiry(void __user *arg)
575 __u8 __user *ptr = arg;
576 struct hci_inquiry_req ir;
577 struct hci_dev *hdev;
578 int err = 0, do_inquiry = 0, max_rsp;
579 long timeo;
580 __u8 *buf;
582 if (copy_from_user(&ir, ptr, sizeof(ir)))
583 return -EFAULT;
585 hdev = hci_dev_get(ir.dev_id);
586 if (!hdev)
587 return -ENODEV;
589 hci_dev_lock(hdev);
590 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
591 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
592 inquiry_cache_flush(hdev);
593 do_inquiry = 1;
595 hci_dev_unlock(hdev);
597 timeo = ir.length * msecs_to_jiffies(2000);
599 if (do_inquiry) {
600 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
601 if (err < 0)
602 goto done;
605 /* for unlimited number of responses we will use buffer with
606 * 255 entries
608 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
610 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
611 * copy it to the user space.
613 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
614 if (!buf) {
615 err = -ENOMEM;
616 goto done;
619 hci_dev_lock(hdev);
620 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
621 hci_dev_unlock(hdev);
623 BT_DBG("num_rsp %d", ir.num_rsp);
625 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
626 ptr += sizeof(ir);
627 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
628 ir.num_rsp))
629 err = -EFAULT;
630 } else
631 err = -EFAULT;
633 kfree(buf);
635 done:
636 hci_dev_put(hdev);
637 return err;
640 /* ---- HCI ioctl helpers ---- */
642 int hci_dev_open(__u16 dev)
644 struct hci_dev *hdev;
645 int ret = 0;
647 hdev = hci_dev_get(dev);
648 if (!hdev)
649 return -ENODEV;
651 BT_DBG("%s %p", hdev->name, hdev);
653 hci_req_lock(hdev);
655 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
656 ret = -ENODEV;
657 goto done;
660 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
661 ret = -ERFKILL;
662 goto done;
665 if (test_bit(HCI_UP, &hdev->flags)) {
666 ret = -EALREADY;
667 goto done;
670 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
671 set_bit(HCI_RAW, &hdev->flags);
673 /* Treat all non BR/EDR controllers as raw devices if
674 enable_hs is not set */
675 if (hdev->dev_type != HCI_BREDR && !enable_hs)
676 set_bit(HCI_RAW, &hdev->flags);
678 if (hdev->open(hdev)) {
679 ret = -EIO;
680 goto done;
683 if (!test_bit(HCI_RAW, &hdev->flags)) {
684 atomic_set(&hdev->cmd_cnt, 1);
685 set_bit(HCI_INIT, &hdev->flags);
686 hdev->init_last_cmd = 0;
688 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
690 if (lmp_host_le_capable(hdev))
691 ret = __hci_request(hdev, hci_le_init_req, 0,
692 HCI_INIT_TIMEOUT);
694 clear_bit(HCI_INIT, &hdev->flags);
697 if (!ret) {
698 hci_dev_hold(hdev);
699 set_bit(HCI_UP, &hdev->flags);
700 hci_notify(hdev, HCI_DEV_UP);
701 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
702 mgmt_valid_hdev(hdev)) {
703 hci_dev_lock(hdev);
704 mgmt_powered(hdev, 1);
705 hci_dev_unlock(hdev);
707 } else {
708 /* Init failed, cleanup */
709 flush_work(&hdev->tx_work);
710 flush_work(&hdev->cmd_work);
711 flush_work(&hdev->rx_work);
713 skb_queue_purge(&hdev->cmd_q);
714 skb_queue_purge(&hdev->rx_q);
716 if (hdev->flush)
717 hdev->flush(hdev);
719 if (hdev->sent_cmd) {
720 kfree_skb(hdev->sent_cmd);
721 hdev->sent_cmd = NULL;
724 hdev->close(hdev);
725 hdev->flags = 0;
728 done:
729 hci_req_unlock(hdev);
730 hci_dev_put(hdev);
731 return ret;
734 static int hci_dev_do_close(struct hci_dev *hdev)
736 BT_DBG("%s %p", hdev->name, hdev);
738 cancel_work_sync(&hdev->le_scan);
740 cancel_delayed_work(&hdev->power_off);
742 hci_req_cancel(hdev, ENODEV);
743 hci_req_lock(hdev);
745 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
746 del_timer_sync(&hdev->cmd_timer);
747 hci_req_unlock(hdev);
748 return 0;
751 /* Flush RX and TX works */
752 flush_work(&hdev->tx_work);
753 flush_work(&hdev->rx_work);
755 if (hdev->discov_timeout > 0) {
756 cancel_delayed_work(&hdev->discov_off);
757 hdev->discov_timeout = 0;
758 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
761 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
762 cancel_delayed_work(&hdev->service_cache);
764 cancel_delayed_work_sync(&hdev->le_scan_disable);
766 hci_dev_lock(hdev);
767 inquiry_cache_flush(hdev);
768 hci_conn_hash_flush(hdev);
769 hci_dev_unlock(hdev);
771 hci_notify(hdev, HCI_DEV_DOWN);
773 if (hdev->flush)
774 hdev->flush(hdev);
776 /* Reset device */
777 skb_queue_purge(&hdev->cmd_q);
778 atomic_set(&hdev->cmd_cnt, 1);
779 if (!test_bit(HCI_RAW, &hdev->flags) &&
780 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
781 set_bit(HCI_INIT, &hdev->flags);
782 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
783 clear_bit(HCI_INIT, &hdev->flags);
786 /* flush cmd work */
787 flush_work(&hdev->cmd_work);
789 /* Drop queues */
790 skb_queue_purge(&hdev->rx_q);
791 skb_queue_purge(&hdev->cmd_q);
792 skb_queue_purge(&hdev->raw_q);
794 /* Drop last sent command */
795 if (hdev->sent_cmd) {
796 del_timer_sync(&hdev->cmd_timer);
797 kfree_skb(hdev->sent_cmd);
798 hdev->sent_cmd = NULL;
801 /* After this point our queues are empty
802 * and no tasks are scheduled. */
803 hdev->close(hdev);
805 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
806 mgmt_valid_hdev(hdev)) {
807 hci_dev_lock(hdev);
808 mgmt_powered(hdev, 0);
809 hci_dev_unlock(hdev);
812 /* Clear flags */
813 hdev->flags = 0;
815 memset(hdev->eir, 0, sizeof(hdev->eir));
816 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
818 hci_req_unlock(hdev);
820 hci_dev_put(hdev);
821 return 0;
824 int hci_dev_close(__u16 dev)
826 struct hci_dev *hdev;
827 int err;
829 hdev = hci_dev_get(dev);
830 if (!hdev)
831 return -ENODEV;
833 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
834 cancel_delayed_work(&hdev->power_off);
836 err = hci_dev_do_close(hdev);
838 hci_dev_put(hdev);
839 return err;
842 int hci_dev_reset(__u16 dev)
844 struct hci_dev *hdev;
845 int ret = 0;
847 hdev = hci_dev_get(dev);
848 if (!hdev)
849 return -ENODEV;
851 hci_req_lock(hdev);
853 if (!test_bit(HCI_UP, &hdev->flags))
854 goto done;
856 /* Drop queues */
857 skb_queue_purge(&hdev->rx_q);
858 skb_queue_purge(&hdev->cmd_q);
860 hci_dev_lock(hdev);
861 inquiry_cache_flush(hdev);
862 hci_conn_hash_flush(hdev);
863 hci_dev_unlock(hdev);
865 if (hdev->flush)
866 hdev->flush(hdev);
868 atomic_set(&hdev->cmd_cnt, 1);
869 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
871 if (!test_bit(HCI_RAW, &hdev->flags))
872 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
874 done:
875 hci_req_unlock(hdev);
876 hci_dev_put(hdev);
877 return ret;
880 int hci_dev_reset_stat(__u16 dev)
882 struct hci_dev *hdev;
883 int ret = 0;
885 hdev = hci_dev_get(dev);
886 if (!hdev)
887 return -ENODEV;
889 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
891 hci_dev_put(hdev);
893 return ret;
896 int hci_dev_cmd(unsigned int cmd, void __user *arg)
898 struct hci_dev *hdev;
899 struct hci_dev_req dr;
900 int err = 0;
902 if (copy_from_user(&dr, arg, sizeof(dr)))
903 return -EFAULT;
905 hdev = hci_dev_get(dr.dev_id);
906 if (!hdev)
907 return -ENODEV;
909 switch (cmd) {
910 case HCISETAUTH:
911 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
912 HCI_INIT_TIMEOUT);
913 break;
915 case HCISETENCRYPT:
916 if (!lmp_encrypt_capable(hdev)) {
917 err = -EOPNOTSUPP;
918 break;
921 if (!test_bit(HCI_AUTH, &hdev->flags)) {
922 /* Auth must be enabled first */
923 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
924 HCI_INIT_TIMEOUT);
925 if (err)
926 break;
929 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
930 HCI_INIT_TIMEOUT);
931 break;
933 case HCISETSCAN:
934 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
935 HCI_INIT_TIMEOUT);
936 break;
938 case HCISETLINKPOL:
939 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
940 HCI_INIT_TIMEOUT);
941 break;
943 case HCISETLINKMODE:
944 hdev->link_mode = ((__u16) dr.dev_opt) &
945 (HCI_LM_MASTER | HCI_LM_ACCEPT);
946 break;
948 case HCISETPTYPE:
949 hdev->pkt_type = (__u16) dr.dev_opt;
950 break;
952 case HCISETACLMTU:
953 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
954 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
955 break;
957 case HCISETSCOMTU:
958 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
959 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
960 break;
962 default:
963 err = -EINVAL;
964 break;
967 hci_dev_put(hdev);
968 return err;
971 int hci_get_dev_list(void __user *arg)
973 struct hci_dev *hdev;
974 struct hci_dev_list_req *dl;
975 struct hci_dev_req *dr;
976 int n = 0, size, err;
977 __u16 dev_num;
979 if (get_user(dev_num, (__u16 __user *) arg))
980 return -EFAULT;
982 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
983 return -EINVAL;
985 size = sizeof(*dl) + dev_num * sizeof(*dr);
987 dl = kzalloc(size, GFP_KERNEL);
988 if (!dl)
989 return -ENOMEM;
991 dr = dl->dev_req;
993 read_lock(&hci_dev_list_lock);
994 list_for_each_entry(hdev, &hci_dev_list, list) {
995 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
996 cancel_delayed_work(&hdev->power_off);
998 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
999 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1001 (dr + n)->dev_id = hdev->id;
1002 (dr + n)->dev_opt = hdev->flags;
1004 if (++n >= dev_num)
1005 break;
1007 read_unlock(&hci_dev_list_lock);
1009 dl->dev_num = n;
1010 size = sizeof(*dl) + n * sizeof(*dr);
1012 err = copy_to_user(arg, dl, size);
1013 kfree(dl);
1015 return err ? -EFAULT : 0;
1018 int hci_get_dev_info(void __user *arg)
1020 struct hci_dev *hdev;
1021 struct hci_dev_info di;
1022 int err = 0;
1024 if (copy_from_user(&di, arg, sizeof(di)))
1025 return -EFAULT;
1027 hdev = hci_dev_get(di.dev_id);
1028 if (!hdev)
1029 return -ENODEV;
1031 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1032 cancel_delayed_work_sync(&hdev->power_off);
1034 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1035 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1037 strcpy(di.name, hdev->name);
1038 di.bdaddr = hdev->bdaddr;
1039 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1040 di.flags = hdev->flags;
1041 di.pkt_type = hdev->pkt_type;
1042 di.acl_mtu = hdev->acl_mtu;
1043 di.acl_pkts = hdev->acl_pkts;
1044 di.sco_mtu = hdev->sco_mtu;
1045 di.sco_pkts = hdev->sco_pkts;
1046 di.link_policy = hdev->link_policy;
1047 di.link_mode = hdev->link_mode;
1049 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1050 memcpy(&di.features, &hdev->features, sizeof(di.features));
1052 if (copy_to_user(arg, &di, sizeof(di)))
1053 err = -EFAULT;
1055 hci_dev_put(hdev);
1057 return err;
1060 /* ---- Interface to HCI drivers ---- */
1062 static int hci_rfkill_set_block(void *data, bool blocked)
1064 struct hci_dev *hdev = data;
1066 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1068 if (!blocked)
1069 return 0;
1071 hci_dev_do_close(hdev);
1073 return 0;
1076 static const struct rfkill_ops hci_rfkill_ops = {
1077 .set_block = hci_rfkill_set_block,
1080 static void hci_power_on(struct work_struct *work)
1082 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1084 BT_DBG("%s", hdev->name);
1086 if (hci_dev_open(hdev->id) < 0)
1087 return;
1089 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1090 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
1092 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1093 mgmt_index_added(hdev);
1096 static void hci_power_off(struct work_struct *work)
1098 struct hci_dev *hdev = container_of(work, struct hci_dev,
1099 power_off.work);
1101 BT_DBG("%s", hdev->name);
1103 hci_dev_do_close(hdev);
1106 static void hci_discov_off(struct work_struct *work)
1108 struct hci_dev *hdev;
1109 u8 scan = SCAN_PAGE;
1111 hdev = container_of(work, struct hci_dev, discov_off.work);
1113 BT_DBG("%s", hdev->name);
1115 hci_dev_lock(hdev);
1117 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1119 hdev->discov_timeout = 0;
1121 hci_dev_unlock(hdev);
1124 int hci_uuids_clear(struct hci_dev *hdev)
1126 struct list_head *p, *n;
1128 list_for_each_safe(p, n, &hdev->uuids) {
1129 struct bt_uuid *uuid;
1131 uuid = list_entry(p, struct bt_uuid, list);
1133 list_del(p);
1134 kfree(uuid);
1137 return 0;
1140 int hci_link_keys_clear(struct hci_dev *hdev)
1142 struct list_head *p, *n;
1144 list_for_each_safe(p, n, &hdev->link_keys) {
1145 struct link_key *key;
1147 key = list_entry(p, struct link_key, list);
1149 list_del(p);
1150 kfree(key);
1153 return 0;
1156 int hci_smp_ltks_clear(struct hci_dev *hdev)
1158 struct smp_ltk *k, *tmp;
1160 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1161 list_del(&k->list);
1162 kfree(k);
1165 return 0;
1168 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1170 struct link_key *k;
1172 list_for_each_entry(k, &hdev->link_keys, list)
1173 if (bacmp(bdaddr, &k->bdaddr) == 0)
1174 return k;
1176 return NULL;
1179 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1180 u8 key_type, u8 old_key_type)
1182 /* Legacy key */
1183 if (key_type < 0x03)
1184 return true;
1186 /* Debug keys are insecure so don't store them persistently */
1187 if (key_type == HCI_LK_DEBUG_COMBINATION)
1188 return false;
1190 /* Changed combination key and there's no previous one */
1191 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1192 return false;
1194 /* Security mode 3 case */
1195 if (!conn)
1196 return true;
1198 /* Neither local nor remote side had no-bonding as requirement */
1199 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1200 return true;
1202 /* Local side had dedicated bonding as requirement */
1203 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1204 return true;
1206 /* Remote side had dedicated bonding as requirement */
1207 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1208 return true;
1210 /* If none of the above criteria match, then don't store the key
1211 * persistently */
1212 return false;
1215 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1217 struct smp_ltk *k;
1219 list_for_each_entry(k, &hdev->long_term_keys, list) {
1220 if (k->ediv != ediv ||
1221 memcmp(rand, k->rand, sizeof(k->rand)))
1222 continue;
1224 return k;
1227 return NULL;
1230 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1231 u8 addr_type)
1233 struct smp_ltk *k;
1235 list_for_each_entry(k, &hdev->long_term_keys, list)
1236 if (addr_type == k->bdaddr_type &&
1237 bacmp(bdaddr, &k->bdaddr) == 0)
1238 return k;
1240 return NULL;
1243 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1244 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1246 struct link_key *key, *old_key;
1247 u8 old_key_type;
1248 bool persistent;
1250 old_key = hci_find_link_key(hdev, bdaddr);
1251 if (old_key) {
1252 old_key_type = old_key->type;
1253 key = old_key;
1254 } else {
1255 old_key_type = conn ? conn->key_type : 0xff;
1256 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1257 if (!key)
1258 return -ENOMEM;
1259 list_add(&key->list, &hdev->link_keys);
1262 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1264 /* Some buggy controller combinations generate a changed
1265 * combination key for legacy pairing even when there's no
1266 * previous key */
1267 if (type == HCI_LK_CHANGED_COMBINATION &&
1268 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1269 type = HCI_LK_COMBINATION;
1270 if (conn)
1271 conn->key_type = type;
1274 bacpy(&key->bdaddr, bdaddr);
1275 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1276 key->pin_len = pin_len;
1278 if (type == HCI_LK_CHANGED_COMBINATION)
1279 key->type = old_key_type;
1280 else
1281 key->type = type;
1283 if (!new_key)
1284 return 0;
1286 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1288 mgmt_new_link_key(hdev, key, persistent);
1290 if (conn)
1291 conn->flush_key = !persistent;
1293 return 0;
1296 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1297 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1298 ediv, u8 rand[8])
1300 struct smp_ltk *key, *old_key;
1302 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1303 return 0;
1305 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1306 if (old_key)
1307 key = old_key;
1308 else {
1309 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1310 if (!key)
1311 return -ENOMEM;
1312 list_add(&key->list, &hdev->long_term_keys);
1315 bacpy(&key->bdaddr, bdaddr);
1316 key->bdaddr_type = addr_type;
1317 memcpy(key->val, tk, sizeof(key->val));
1318 key->authenticated = authenticated;
1319 key->ediv = ediv;
1320 key->enc_size = enc_size;
1321 key->type = type;
1322 memcpy(key->rand, rand, sizeof(key->rand));
1324 if (!new_key)
1325 return 0;
1327 if (type & HCI_SMP_LTK)
1328 mgmt_new_ltk(hdev, key, 1);
1330 return 0;
1333 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1335 struct link_key *key;
1337 key = hci_find_link_key(hdev, bdaddr);
1338 if (!key)
1339 return -ENOENT;
1341 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1343 list_del(&key->list);
1344 kfree(key);
1346 return 0;
1349 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1351 struct smp_ltk *k, *tmp;
1353 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1354 if (bacmp(bdaddr, &k->bdaddr))
1355 continue;
1357 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1359 list_del(&k->list);
1360 kfree(k);
1363 return 0;
1366 /* HCI command timer function */
1367 static void hci_cmd_timeout(unsigned long arg)
1369 struct hci_dev *hdev = (void *) arg;
1371 if (hdev->sent_cmd) {
1372 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1373 u16 opcode = __le16_to_cpu(sent->opcode);
1375 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1376 } else {
1377 BT_ERR("%s command tx timeout", hdev->name);
1380 atomic_set(&hdev->cmd_cnt, 1);
1381 queue_work(hdev->workqueue, &hdev->cmd_work);
1384 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1385 bdaddr_t *bdaddr)
1387 struct oob_data *data;
1389 list_for_each_entry(data, &hdev->remote_oob_data, list)
1390 if (bacmp(bdaddr, &data->bdaddr) == 0)
1391 return data;
1393 return NULL;
1396 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1398 struct oob_data *data;
1400 data = hci_find_remote_oob_data(hdev, bdaddr);
1401 if (!data)
1402 return -ENOENT;
1404 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1406 list_del(&data->list);
1407 kfree(data);
1409 return 0;
1412 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1414 struct oob_data *data, *n;
1416 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1417 list_del(&data->list);
1418 kfree(data);
1421 return 0;
1424 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1425 u8 *randomizer)
1427 struct oob_data *data;
1429 data = hci_find_remote_oob_data(hdev, bdaddr);
1431 if (!data) {
1432 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1433 if (!data)
1434 return -ENOMEM;
1436 bacpy(&data->bdaddr, bdaddr);
1437 list_add(&data->list, &hdev->remote_oob_data);
1440 memcpy(data->hash, hash, sizeof(data->hash));
1441 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1443 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1445 return 0;
1448 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1450 struct bdaddr_list *b;
1452 list_for_each_entry(b, &hdev->blacklist, list)
1453 if (bacmp(bdaddr, &b->bdaddr) == 0)
1454 return b;
1456 return NULL;
1459 int hci_blacklist_clear(struct hci_dev *hdev)
1461 struct list_head *p, *n;
1463 list_for_each_safe(p, n, &hdev->blacklist) {
1464 struct bdaddr_list *b;
1466 b = list_entry(p, struct bdaddr_list, list);
1468 list_del(p);
1469 kfree(b);
1472 return 0;
1475 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1477 struct bdaddr_list *entry;
1479 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1480 return -EBADF;
1482 if (hci_blacklist_lookup(hdev, bdaddr))
1483 return -EEXIST;
1485 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1486 if (!entry)
1487 return -ENOMEM;
1489 bacpy(&entry->bdaddr, bdaddr);
1491 list_add(&entry->list, &hdev->blacklist);
1493 return mgmt_device_blocked(hdev, bdaddr, type);
1496 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1498 struct bdaddr_list *entry;
1500 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1501 return hci_blacklist_clear(hdev);
1503 entry = hci_blacklist_lookup(hdev, bdaddr);
1504 if (!entry)
1505 return -ENOENT;
1507 list_del(&entry->list);
1508 kfree(entry);
1510 return mgmt_device_unblocked(hdev, bdaddr, type);
1513 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1515 struct le_scan_params *param = (struct le_scan_params *) opt;
1516 struct hci_cp_le_set_scan_param cp;
1518 memset(&cp, 0, sizeof(cp));
1519 cp.type = param->type;
1520 cp.interval = cpu_to_le16(param->interval);
1521 cp.window = cpu_to_le16(param->window);
1523 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1526 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1528 struct hci_cp_le_set_scan_enable cp;
1530 memset(&cp, 0, sizeof(cp));
1531 cp.enable = 1;
1532 cp.filter_dup = 1;
1534 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1537 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1538 u16 window, int timeout)
1540 long timeo = msecs_to_jiffies(3000);
1541 struct le_scan_params param;
1542 int err;
1544 BT_DBG("%s", hdev->name);
1546 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1547 return -EINPROGRESS;
1549 param.type = type;
1550 param.interval = interval;
1551 param.window = window;
1553 hci_req_lock(hdev);
1555 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1556 timeo);
1557 if (!err)
1558 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1560 hci_req_unlock(hdev);
1562 if (err < 0)
1563 return err;
1565 schedule_delayed_work(&hdev->le_scan_disable,
1566 msecs_to_jiffies(timeout));
1568 return 0;
1571 int hci_cancel_le_scan(struct hci_dev *hdev)
1573 BT_DBG("%s", hdev->name);
1575 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1576 return -EALREADY;
1578 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1579 struct hci_cp_le_set_scan_enable cp;
1581 /* Send HCI command to disable LE Scan */
1582 memset(&cp, 0, sizeof(cp));
1583 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1586 return 0;
1589 static void le_scan_disable_work(struct work_struct *work)
1591 struct hci_dev *hdev = container_of(work, struct hci_dev,
1592 le_scan_disable.work);
1593 struct hci_cp_le_set_scan_enable cp;
1595 BT_DBG("%s", hdev->name);
1597 memset(&cp, 0, sizeof(cp));
1599 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1602 static void le_scan_work(struct work_struct *work)
1604 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1605 struct le_scan_params *param = &hdev->le_scan_params;
1607 BT_DBG("%s", hdev->name);
1609 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1610 param->timeout);
1613 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1614 int timeout)
1616 struct le_scan_params *param = &hdev->le_scan_params;
1618 BT_DBG("%s", hdev->name);
1620 if (work_busy(&hdev->le_scan))
1621 return -EINPROGRESS;
1623 param->type = type;
1624 param->interval = interval;
1625 param->window = window;
1626 param->timeout = timeout;
1628 queue_work(system_long_wq, &hdev->le_scan);
1630 return 0;
1633 /* Alloc HCI device */
1634 struct hci_dev *hci_alloc_dev(void)
1636 struct hci_dev *hdev;
1638 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1639 if (!hdev)
1640 return NULL;
1642 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1643 hdev->esco_type = (ESCO_HV1);
1644 hdev->link_mode = (HCI_LM_ACCEPT);
1645 hdev->io_capability = 0x03; /* No Input No Output */
1647 hdev->sniff_max_interval = 800;
1648 hdev->sniff_min_interval = 80;
1650 mutex_init(&hdev->lock);
1651 mutex_init(&hdev->req_lock);
1653 INIT_LIST_HEAD(&hdev->mgmt_pending);
1654 INIT_LIST_HEAD(&hdev->blacklist);
1655 INIT_LIST_HEAD(&hdev->uuids);
1656 INIT_LIST_HEAD(&hdev->link_keys);
1657 INIT_LIST_HEAD(&hdev->long_term_keys);
1658 INIT_LIST_HEAD(&hdev->remote_oob_data);
1659 INIT_LIST_HEAD(&hdev->conn_hash.list);
1661 INIT_WORK(&hdev->rx_work, hci_rx_work);
1662 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1663 INIT_WORK(&hdev->tx_work, hci_tx_work);
1664 INIT_WORK(&hdev->power_on, hci_power_on);
1665 INIT_WORK(&hdev->le_scan, le_scan_work);
1667 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1668 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1669 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1671 skb_queue_head_init(&hdev->driver_init);
1672 skb_queue_head_init(&hdev->rx_q);
1673 skb_queue_head_init(&hdev->cmd_q);
1674 skb_queue_head_init(&hdev->raw_q);
1676 init_waitqueue_head(&hdev->req_wait_q);
1678 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1680 hci_init_sysfs(hdev);
1681 discovery_init(hdev);
1683 return hdev;
1685 EXPORT_SYMBOL(hci_alloc_dev);
1687 /* Free HCI device */
1688 void hci_free_dev(struct hci_dev *hdev)
1690 skb_queue_purge(&hdev->driver_init);
1692 /* will free via device release */
1693 put_device(&hdev->dev);
1695 EXPORT_SYMBOL(hci_free_dev);
1697 /* Register HCI device */
1698 int hci_register_dev(struct hci_dev *hdev)
1700 int id, error;
1702 if (!hdev->open || !hdev->close)
1703 return -EINVAL;
1705 /* Do not allow HCI_AMP devices to register at index 0,
1706 * so the index can be used as the AMP controller ID.
1708 switch (hdev->dev_type) {
1709 case HCI_BREDR:
1710 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1711 break;
1712 case HCI_AMP:
1713 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1714 break;
1715 default:
1716 return -EINVAL;
1719 if (id < 0)
1720 return id;
1722 sprintf(hdev->name, "hci%d", id);
1723 hdev->id = id;
1725 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1727 write_lock(&hci_dev_list_lock);
1728 list_add(&hdev->list, &hci_dev_list);
1729 write_unlock(&hci_dev_list_lock);
1731 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1732 WQ_MEM_RECLAIM, 1);
1733 if (!hdev->workqueue) {
1734 error = -ENOMEM;
1735 goto err;
1738 error = hci_add_sysfs(hdev);
1739 if (error < 0)
1740 goto err_wqueue;
1742 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1743 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1744 hdev);
1745 if (hdev->rfkill) {
1746 if (rfkill_register(hdev->rfkill) < 0) {
1747 rfkill_destroy(hdev->rfkill);
1748 hdev->rfkill = NULL;
1752 set_bit(HCI_SETUP, &hdev->dev_flags);
1754 if (hdev->dev_type != HCI_AMP)
1755 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1757 hci_notify(hdev, HCI_DEV_REG);
1758 hci_dev_hold(hdev);
1760 schedule_work(&hdev->power_on);
1762 return id;
1764 err_wqueue:
1765 destroy_workqueue(hdev->workqueue);
1766 err:
1767 ida_simple_remove(&hci_index_ida, hdev->id);
1768 write_lock(&hci_dev_list_lock);
1769 list_del(&hdev->list);
1770 write_unlock(&hci_dev_list_lock);
1772 return error;
1774 EXPORT_SYMBOL(hci_register_dev);
1776 /* Unregister HCI device */
1777 void hci_unregister_dev(struct hci_dev *hdev)
1779 int i, id;
1781 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1783 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1785 id = hdev->id;
1787 write_lock(&hci_dev_list_lock);
1788 list_del(&hdev->list);
1789 write_unlock(&hci_dev_list_lock);
1791 hci_dev_do_close(hdev);
1793 for (i = 0; i < NUM_REASSEMBLY; i++)
1794 kfree_skb(hdev->reassembly[i]);
1796 if (!test_bit(HCI_INIT, &hdev->flags) &&
1797 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1798 hci_dev_lock(hdev);
1799 mgmt_index_removed(hdev);
1800 hci_dev_unlock(hdev);
1803 /* mgmt_index_removed should take care of emptying the
1804 * pending list */
1805 BUG_ON(!list_empty(&hdev->mgmt_pending));
1807 hci_notify(hdev, HCI_DEV_UNREG);
1809 if (hdev->rfkill) {
1810 rfkill_unregister(hdev->rfkill);
1811 rfkill_destroy(hdev->rfkill);
1814 hci_del_sysfs(hdev);
1816 destroy_workqueue(hdev->workqueue);
1818 hci_dev_lock(hdev);
1819 hci_blacklist_clear(hdev);
1820 hci_uuids_clear(hdev);
1821 hci_link_keys_clear(hdev);
1822 hci_smp_ltks_clear(hdev);
1823 hci_remote_oob_data_clear(hdev);
1824 hci_dev_unlock(hdev);
1826 hci_dev_put(hdev);
1828 ida_simple_remove(&hci_index_ida, id);
1830 EXPORT_SYMBOL(hci_unregister_dev);
1832 /* Suspend HCI device */
1833 int hci_suspend_dev(struct hci_dev *hdev)
1835 hci_notify(hdev, HCI_DEV_SUSPEND);
1836 return 0;
1838 EXPORT_SYMBOL(hci_suspend_dev);
1840 /* Resume HCI device */
1841 int hci_resume_dev(struct hci_dev *hdev)
1843 hci_notify(hdev, HCI_DEV_RESUME);
1844 return 0;
1846 EXPORT_SYMBOL(hci_resume_dev);
1848 /* Receive frame from HCI drivers */
1849 int hci_recv_frame(struct sk_buff *skb)
1851 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1852 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1853 && !test_bit(HCI_INIT, &hdev->flags))) {
1854 kfree_skb(skb);
1855 return -ENXIO;
1858 /* Incomming skb */
1859 bt_cb(skb)->incoming = 1;
1861 /* Time stamp */
1862 __net_timestamp(skb);
1864 skb_queue_tail(&hdev->rx_q, skb);
1865 queue_work(hdev->workqueue, &hdev->rx_work);
1867 return 0;
1869 EXPORT_SYMBOL(hci_recv_frame);
1871 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1872 int count, __u8 index)
1874 int len = 0;
1875 int hlen = 0;
1876 int remain = count;
1877 struct sk_buff *skb;
1878 struct bt_skb_cb *scb;
1880 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1881 index >= NUM_REASSEMBLY)
1882 return -EILSEQ;
1884 skb = hdev->reassembly[index];
1886 if (!skb) {
1887 switch (type) {
1888 case HCI_ACLDATA_PKT:
1889 len = HCI_MAX_FRAME_SIZE;
1890 hlen = HCI_ACL_HDR_SIZE;
1891 break;
1892 case HCI_EVENT_PKT:
1893 len = HCI_MAX_EVENT_SIZE;
1894 hlen = HCI_EVENT_HDR_SIZE;
1895 break;
1896 case HCI_SCODATA_PKT:
1897 len = HCI_MAX_SCO_SIZE;
1898 hlen = HCI_SCO_HDR_SIZE;
1899 break;
1902 skb = bt_skb_alloc(len, GFP_ATOMIC);
1903 if (!skb)
1904 return -ENOMEM;
1906 scb = (void *) skb->cb;
1907 scb->expect = hlen;
1908 scb->pkt_type = type;
1910 skb->dev = (void *) hdev;
1911 hdev->reassembly[index] = skb;
1914 while (count) {
1915 scb = (void *) skb->cb;
1916 len = min_t(uint, scb->expect, count);
1918 memcpy(skb_put(skb, len), data, len);
1920 count -= len;
1921 data += len;
1922 scb->expect -= len;
1923 remain = count;
1925 switch (type) {
1926 case HCI_EVENT_PKT:
1927 if (skb->len == HCI_EVENT_HDR_SIZE) {
1928 struct hci_event_hdr *h = hci_event_hdr(skb);
1929 scb->expect = h->plen;
1931 if (skb_tailroom(skb) < scb->expect) {
1932 kfree_skb(skb);
1933 hdev->reassembly[index] = NULL;
1934 return -ENOMEM;
1937 break;
1939 case HCI_ACLDATA_PKT:
1940 if (skb->len == HCI_ACL_HDR_SIZE) {
1941 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1942 scb->expect = __le16_to_cpu(h->dlen);
1944 if (skb_tailroom(skb) < scb->expect) {
1945 kfree_skb(skb);
1946 hdev->reassembly[index] = NULL;
1947 return -ENOMEM;
1950 break;
1952 case HCI_SCODATA_PKT:
1953 if (skb->len == HCI_SCO_HDR_SIZE) {
1954 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1955 scb->expect = h->dlen;
1957 if (skb_tailroom(skb) < scb->expect) {
1958 kfree_skb(skb);
1959 hdev->reassembly[index] = NULL;
1960 return -ENOMEM;
1963 break;
1966 if (scb->expect == 0) {
1967 /* Complete frame */
1969 bt_cb(skb)->pkt_type = type;
1970 hci_recv_frame(skb);
1972 hdev->reassembly[index] = NULL;
1973 return remain;
1977 return remain;
1980 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1982 int rem = 0;
1984 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1985 return -EILSEQ;
1987 while (count) {
1988 rem = hci_reassembly(hdev, type, data, count, type - 1);
1989 if (rem < 0)
1990 return rem;
1992 data += (count - rem);
1993 count = rem;
1996 return rem;
1998 EXPORT_SYMBOL(hci_recv_fragment);
2000 #define STREAM_REASSEMBLY 0
2002 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2004 int type;
2005 int rem = 0;
2007 while (count) {
2008 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2010 if (!skb) {
2011 struct { char type; } *pkt;
2013 /* Start of the frame */
2014 pkt = data;
2015 type = pkt->type;
2017 data++;
2018 count--;
2019 } else
2020 type = bt_cb(skb)->pkt_type;
2022 rem = hci_reassembly(hdev, type, data, count,
2023 STREAM_REASSEMBLY);
2024 if (rem < 0)
2025 return rem;
2027 data += (count - rem);
2028 count = rem;
2031 return rem;
2033 EXPORT_SYMBOL(hci_recv_stream_fragment);
2035 /* ---- Interface to upper protocols ---- */
2037 int hci_register_cb(struct hci_cb *cb)
2039 BT_DBG("%p name %s", cb, cb->name);
2041 write_lock(&hci_cb_list_lock);
2042 list_add(&cb->list, &hci_cb_list);
2043 write_unlock(&hci_cb_list_lock);
2045 return 0;
2047 EXPORT_SYMBOL(hci_register_cb);
2049 int hci_unregister_cb(struct hci_cb *cb)
2051 BT_DBG("%p name %s", cb, cb->name);
2053 write_lock(&hci_cb_list_lock);
2054 list_del(&cb->list);
2055 write_unlock(&hci_cb_list_lock);
2057 return 0;
2059 EXPORT_SYMBOL(hci_unregister_cb);
2061 static int hci_send_frame(struct sk_buff *skb)
2063 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2065 if (!hdev) {
2066 kfree_skb(skb);
2067 return -ENODEV;
2070 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2072 /* Time stamp */
2073 __net_timestamp(skb);
2075 /* Send copy to monitor */
2076 hci_send_to_monitor(hdev, skb);
2078 if (atomic_read(&hdev->promisc)) {
2079 /* Send copy to the sockets */
2080 hci_send_to_sock(hdev, skb);
2083 /* Get rid of skb owner, prior to sending to the driver. */
2084 skb_orphan(skb);
2086 return hdev->send(skb);
2089 /* Send HCI command */
2090 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2092 int len = HCI_COMMAND_HDR_SIZE + plen;
2093 struct hci_command_hdr *hdr;
2094 struct sk_buff *skb;
2096 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2098 skb = bt_skb_alloc(len, GFP_ATOMIC);
2099 if (!skb) {
2100 BT_ERR("%s no memory for command", hdev->name);
2101 return -ENOMEM;
2104 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2105 hdr->opcode = cpu_to_le16(opcode);
2106 hdr->plen = plen;
2108 if (plen)
2109 memcpy(skb_put(skb, plen), param, plen);
2111 BT_DBG("skb len %d", skb->len);
2113 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2114 skb->dev = (void *) hdev;
2116 if (test_bit(HCI_INIT, &hdev->flags))
2117 hdev->init_last_cmd = opcode;
2119 skb_queue_tail(&hdev->cmd_q, skb);
2120 queue_work(hdev->workqueue, &hdev->cmd_work);
2122 return 0;
2125 /* Get data from the previously sent command */
2126 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2128 struct hci_command_hdr *hdr;
2130 if (!hdev->sent_cmd)
2131 return NULL;
2133 hdr = (void *) hdev->sent_cmd->data;
2135 if (hdr->opcode != cpu_to_le16(opcode))
2136 return NULL;
2138 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2140 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2143 /* Send ACL data */
2144 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2146 struct hci_acl_hdr *hdr;
2147 int len = skb->len;
2149 skb_push(skb, HCI_ACL_HDR_SIZE);
2150 skb_reset_transport_header(skb);
2151 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2152 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2153 hdr->dlen = cpu_to_le16(len);
2156 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2157 struct sk_buff *skb, __u16 flags)
2159 struct hci_conn *conn = chan->conn;
2160 struct hci_dev *hdev = conn->hdev;
2161 struct sk_buff *list;
2163 skb->len = skb_headlen(skb);
2164 skb->data_len = 0;
2166 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2168 switch (hdev->dev_type) {
2169 case HCI_BREDR:
2170 hci_add_acl_hdr(skb, conn->handle, flags);
2171 break;
2172 case HCI_AMP:
2173 hci_add_acl_hdr(skb, chan->handle, flags);
2174 break;
2175 default:
2176 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2177 return;
2180 list = skb_shinfo(skb)->frag_list;
2181 if (!list) {
2182 /* Non fragmented */
2183 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2185 skb_queue_tail(queue, skb);
2186 } else {
2187 /* Fragmented */
2188 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2190 skb_shinfo(skb)->frag_list = NULL;
2192 /* Queue all fragments atomically */
2193 spin_lock(&queue->lock);
2195 __skb_queue_tail(queue, skb);
2197 flags &= ~ACL_START;
2198 flags |= ACL_CONT;
2199 do {
2200 skb = list; list = list->next;
2202 skb->dev = (void *) hdev;
2203 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2204 hci_add_acl_hdr(skb, conn->handle, flags);
2206 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2208 __skb_queue_tail(queue, skb);
2209 } while (list);
2211 spin_unlock(&queue->lock);
2215 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2217 struct hci_dev *hdev = chan->conn->hdev;
2219 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2221 skb->dev = (void *) hdev;
2223 hci_queue_acl(chan, &chan->data_q, skb, flags);
2225 queue_work(hdev->workqueue, &hdev->tx_work);
2228 /* Send SCO data */
2229 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2231 struct hci_dev *hdev = conn->hdev;
2232 struct hci_sco_hdr hdr;
2234 BT_DBG("%s len %d", hdev->name, skb->len);
2236 hdr.handle = cpu_to_le16(conn->handle);
2237 hdr.dlen = skb->len;
2239 skb_push(skb, HCI_SCO_HDR_SIZE);
2240 skb_reset_transport_header(skb);
2241 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2243 skb->dev = (void *) hdev;
2244 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2246 skb_queue_tail(&conn->data_q, skb);
2247 queue_work(hdev->workqueue, &hdev->tx_work);
2250 /* ---- HCI TX task (outgoing data) ---- */
2252 /* HCI Connection scheduler */
2253 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2254 int *quote)
2256 struct hci_conn_hash *h = &hdev->conn_hash;
2257 struct hci_conn *conn = NULL, *c;
2258 unsigned int num = 0, min = ~0;
2260 /* We don't have to lock device here. Connections are always
2261 * added and removed with TX task disabled. */
2263 rcu_read_lock();
2265 list_for_each_entry_rcu(c, &h->list, list) {
2266 if (c->type != type || skb_queue_empty(&c->data_q))
2267 continue;
2269 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2270 continue;
2272 num++;
2274 if (c->sent < min) {
2275 min = c->sent;
2276 conn = c;
2279 if (hci_conn_num(hdev, type) == num)
2280 break;
2283 rcu_read_unlock();
2285 if (conn) {
2286 int cnt, q;
2288 switch (conn->type) {
2289 case ACL_LINK:
2290 cnt = hdev->acl_cnt;
2291 break;
2292 case SCO_LINK:
2293 case ESCO_LINK:
2294 cnt = hdev->sco_cnt;
2295 break;
2296 case LE_LINK:
2297 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2298 break;
2299 default:
2300 cnt = 0;
2301 BT_ERR("Unknown link type");
2304 q = cnt / num;
2305 *quote = q ? q : 1;
2306 } else
2307 *quote = 0;
2309 BT_DBG("conn %p quote %d", conn, *quote);
2310 return conn;
2313 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2315 struct hci_conn_hash *h = &hdev->conn_hash;
2316 struct hci_conn *c;
2318 BT_ERR("%s link tx timeout", hdev->name);
2320 rcu_read_lock();
2322 /* Kill stalled connections */
2323 list_for_each_entry_rcu(c, &h->list, list) {
2324 if (c->type == type && c->sent) {
2325 BT_ERR("%s killing stalled connection %pMR",
2326 hdev->name, &c->dst);
2327 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2331 rcu_read_unlock();
2334 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2335 int *quote)
2337 struct hci_conn_hash *h = &hdev->conn_hash;
2338 struct hci_chan *chan = NULL;
2339 unsigned int num = 0, min = ~0, cur_prio = 0;
2340 struct hci_conn *conn;
2341 int cnt, q, conn_num = 0;
2343 BT_DBG("%s", hdev->name);
2345 rcu_read_lock();
2347 list_for_each_entry_rcu(conn, &h->list, list) {
2348 struct hci_chan *tmp;
2350 if (conn->type != type)
2351 continue;
2353 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2354 continue;
2356 conn_num++;
2358 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2359 struct sk_buff *skb;
2361 if (skb_queue_empty(&tmp->data_q))
2362 continue;
2364 skb = skb_peek(&tmp->data_q);
2365 if (skb->priority < cur_prio)
2366 continue;
2368 if (skb->priority > cur_prio) {
2369 num = 0;
2370 min = ~0;
2371 cur_prio = skb->priority;
2374 num++;
2376 if (conn->sent < min) {
2377 min = conn->sent;
2378 chan = tmp;
2382 if (hci_conn_num(hdev, type) == conn_num)
2383 break;
2386 rcu_read_unlock();
2388 if (!chan)
2389 return NULL;
2391 switch (chan->conn->type) {
2392 case ACL_LINK:
2393 cnt = hdev->acl_cnt;
2394 break;
2395 case AMP_LINK:
2396 cnt = hdev->block_cnt;
2397 break;
2398 case SCO_LINK:
2399 case ESCO_LINK:
2400 cnt = hdev->sco_cnt;
2401 break;
2402 case LE_LINK:
2403 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2404 break;
2405 default:
2406 cnt = 0;
2407 BT_ERR("Unknown link type");
2410 q = cnt / num;
2411 *quote = q ? q : 1;
2412 BT_DBG("chan %p quote %d", chan, *quote);
2413 return chan;
2416 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2418 struct hci_conn_hash *h = &hdev->conn_hash;
2419 struct hci_conn *conn;
2420 int num = 0;
2422 BT_DBG("%s", hdev->name);
2424 rcu_read_lock();
2426 list_for_each_entry_rcu(conn, &h->list, list) {
2427 struct hci_chan *chan;
2429 if (conn->type != type)
2430 continue;
2432 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2433 continue;
2435 num++;
2437 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2438 struct sk_buff *skb;
2440 if (chan->sent) {
2441 chan->sent = 0;
2442 continue;
2445 if (skb_queue_empty(&chan->data_q))
2446 continue;
2448 skb = skb_peek(&chan->data_q);
2449 if (skb->priority >= HCI_PRIO_MAX - 1)
2450 continue;
2452 skb->priority = HCI_PRIO_MAX - 1;
2454 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2455 skb->priority);
2458 if (hci_conn_num(hdev, type) == num)
2459 break;
2462 rcu_read_unlock();
2466 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2468 /* Calculate count of blocks used by this packet */
2469 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2472 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2474 if (!test_bit(HCI_RAW, &hdev->flags)) {
2475 /* ACL tx timeout must be longer than maximum
2476 * link supervision timeout (40.9 seconds) */
2477 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2478 HCI_ACL_TX_TIMEOUT))
2479 hci_link_tx_to(hdev, ACL_LINK);
2483 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2485 unsigned int cnt = hdev->acl_cnt;
2486 struct hci_chan *chan;
2487 struct sk_buff *skb;
2488 int quote;
2490 __check_timeout(hdev, cnt);
2492 while (hdev->acl_cnt &&
2493 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2494 u32 priority = (skb_peek(&chan->data_q))->priority;
2495 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2496 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2497 skb->len, skb->priority);
2499 /* Stop if priority has changed */
2500 if (skb->priority < priority)
2501 break;
2503 skb = skb_dequeue(&chan->data_q);
2505 hci_conn_enter_active_mode(chan->conn,
2506 bt_cb(skb)->force_active);
2508 hci_send_frame(skb);
2509 hdev->acl_last_tx = jiffies;
2511 hdev->acl_cnt--;
2512 chan->sent++;
2513 chan->conn->sent++;
2517 if (cnt != hdev->acl_cnt)
2518 hci_prio_recalculate(hdev, ACL_LINK);
2521 static void hci_sched_acl_blk(struct hci_dev *hdev)
2523 unsigned int cnt = hdev->block_cnt;
2524 struct hci_chan *chan;
2525 struct sk_buff *skb;
2526 int quote;
2527 u8 type;
2529 __check_timeout(hdev, cnt);
2531 BT_DBG("%s", hdev->name);
2533 if (hdev->dev_type == HCI_AMP)
2534 type = AMP_LINK;
2535 else
2536 type = ACL_LINK;
2538 while (hdev->block_cnt > 0 &&
2539 (chan = hci_chan_sent(hdev, type, &quote))) {
2540 u32 priority = (skb_peek(&chan->data_q))->priority;
2541 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2542 int blocks;
2544 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2545 skb->len, skb->priority);
2547 /* Stop if priority has changed */
2548 if (skb->priority < priority)
2549 break;
2551 skb = skb_dequeue(&chan->data_q);
2553 blocks = __get_blocks(hdev, skb);
2554 if (blocks > hdev->block_cnt)
2555 return;
2557 hci_conn_enter_active_mode(chan->conn,
2558 bt_cb(skb)->force_active);
2560 hci_send_frame(skb);
2561 hdev->acl_last_tx = jiffies;
2563 hdev->block_cnt -= blocks;
2564 quote -= blocks;
2566 chan->sent += blocks;
2567 chan->conn->sent += blocks;
2571 if (cnt != hdev->block_cnt)
2572 hci_prio_recalculate(hdev, type);
2575 static void hci_sched_acl(struct hci_dev *hdev)
2577 BT_DBG("%s", hdev->name);
2579 /* No ACL link over BR/EDR controller */
2580 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2581 return;
2583 /* No AMP link over AMP controller */
2584 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
2585 return;
2587 switch (hdev->flow_ctl_mode) {
2588 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2589 hci_sched_acl_pkt(hdev);
2590 break;
2592 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2593 hci_sched_acl_blk(hdev);
2594 break;
2598 /* Schedule SCO */
2599 static void hci_sched_sco(struct hci_dev *hdev)
2601 struct hci_conn *conn;
2602 struct sk_buff *skb;
2603 int quote;
2605 BT_DBG("%s", hdev->name);
2607 if (!hci_conn_num(hdev, SCO_LINK))
2608 return;
2610 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2611 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2612 BT_DBG("skb %p len %d", skb, skb->len);
2613 hci_send_frame(skb);
2615 conn->sent++;
2616 if (conn->sent == ~0)
2617 conn->sent = 0;
2622 static void hci_sched_esco(struct hci_dev *hdev)
2624 struct hci_conn *conn;
2625 struct sk_buff *skb;
2626 int quote;
2628 BT_DBG("%s", hdev->name);
2630 if (!hci_conn_num(hdev, ESCO_LINK))
2631 return;
2633 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2634 &quote))) {
2635 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2636 BT_DBG("skb %p len %d", skb, skb->len);
2637 hci_send_frame(skb);
2639 conn->sent++;
2640 if (conn->sent == ~0)
2641 conn->sent = 0;
2646 static void hci_sched_le(struct hci_dev *hdev)
2648 struct hci_chan *chan;
2649 struct sk_buff *skb;
2650 int quote, cnt, tmp;
2652 BT_DBG("%s", hdev->name);
2654 if (!hci_conn_num(hdev, LE_LINK))
2655 return;
2657 if (!test_bit(HCI_RAW, &hdev->flags)) {
2658 /* LE tx timeout must be longer than maximum
2659 * link supervision timeout (40.9 seconds) */
2660 if (!hdev->le_cnt && hdev->le_pkts &&
2661 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2662 hci_link_tx_to(hdev, LE_LINK);
2665 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2666 tmp = cnt;
2667 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2668 u32 priority = (skb_peek(&chan->data_q))->priority;
2669 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2670 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2671 skb->len, skb->priority);
2673 /* Stop if priority has changed */
2674 if (skb->priority < priority)
2675 break;
2677 skb = skb_dequeue(&chan->data_q);
2679 hci_send_frame(skb);
2680 hdev->le_last_tx = jiffies;
2682 cnt--;
2683 chan->sent++;
2684 chan->conn->sent++;
2688 if (hdev->le_pkts)
2689 hdev->le_cnt = cnt;
2690 else
2691 hdev->acl_cnt = cnt;
2693 if (cnt != tmp)
2694 hci_prio_recalculate(hdev, LE_LINK);
2697 static void hci_tx_work(struct work_struct *work)
2699 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2700 struct sk_buff *skb;
2702 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2703 hdev->sco_cnt, hdev->le_cnt);
2705 /* Schedule queues and send stuff to HCI driver */
2707 hci_sched_acl(hdev);
2709 hci_sched_sco(hdev);
2711 hci_sched_esco(hdev);
2713 hci_sched_le(hdev);
2715 /* Send next queued raw (unknown type) packet */
2716 while ((skb = skb_dequeue(&hdev->raw_q)))
2717 hci_send_frame(skb);
2720 /* ----- HCI RX task (incoming data processing) ----- */
2722 /* ACL data packet */
2723 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2725 struct hci_acl_hdr *hdr = (void *) skb->data;
2726 struct hci_conn *conn;
2727 __u16 handle, flags;
2729 skb_pull(skb, HCI_ACL_HDR_SIZE);
2731 handle = __le16_to_cpu(hdr->handle);
2732 flags = hci_flags(handle);
2733 handle = hci_handle(handle);
2735 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2736 handle, flags);
2738 hdev->stat.acl_rx++;
2740 hci_dev_lock(hdev);
2741 conn = hci_conn_hash_lookup_handle(hdev, handle);
2742 hci_dev_unlock(hdev);
2744 if (conn) {
2745 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2747 hci_dev_lock(hdev);
2748 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2749 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2750 mgmt_device_connected(hdev, &conn->dst, conn->type,
2751 conn->dst_type, 0, NULL, 0,
2752 conn->dev_class);
2753 hci_dev_unlock(hdev);
2755 /* Send to upper protocol */
2756 l2cap_recv_acldata(conn, skb, flags);
2757 return;
2758 } else {
2759 BT_ERR("%s ACL packet for unknown connection handle %d",
2760 hdev->name, handle);
2763 kfree_skb(skb);
2766 /* SCO data packet */
2767 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2769 struct hci_sco_hdr *hdr = (void *) skb->data;
2770 struct hci_conn *conn;
2771 __u16 handle;
2773 skb_pull(skb, HCI_SCO_HDR_SIZE);
2775 handle = __le16_to_cpu(hdr->handle);
2777 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2779 hdev->stat.sco_rx++;
2781 hci_dev_lock(hdev);
2782 conn = hci_conn_hash_lookup_handle(hdev, handle);
2783 hci_dev_unlock(hdev);
2785 if (conn) {
2786 /* Send to upper protocol */
2787 sco_recv_scodata(conn, skb);
2788 return;
2789 } else {
2790 BT_ERR("%s SCO packet for unknown connection handle %d",
2791 hdev->name, handle);
2794 kfree_skb(skb);
2797 static void hci_rx_work(struct work_struct *work)
2799 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2800 struct sk_buff *skb;
2802 BT_DBG("%s", hdev->name);
2804 while ((skb = skb_dequeue(&hdev->rx_q))) {
2805 /* Send copy to monitor */
2806 hci_send_to_monitor(hdev, skb);
2808 if (atomic_read(&hdev->promisc)) {
2809 /* Send copy to the sockets */
2810 hci_send_to_sock(hdev, skb);
2813 if (test_bit(HCI_RAW, &hdev->flags)) {
2814 kfree_skb(skb);
2815 continue;
2818 if (test_bit(HCI_INIT, &hdev->flags)) {
2819 /* Don't process data packets in this states. */
2820 switch (bt_cb(skb)->pkt_type) {
2821 case HCI_ACLDATA_PKT:
2822 case HCI_SCODATA_PKT:
2823 kfree_skb(skb);
2824 continue;
2828 /* Process frame */
2829 switch (bt_cb(skb)->pkt_type) {
2830 case HCI_EVENT_PKT:
2831 BT_DBG("%s Event packet", hdev->name);
2832 hci_event_packet(hdev, skb);
2833 break;
2835 case HCI_ACLDATA_PKT:
2836 BT_DBG("%s ACL data packet", hdev->name);
2837 hci_acldata_packet(hdev, skb);
2838 break;
2840 case HCI_SCODATA_PKT:
2841 BT_DBG("%s SCO data packet", hdev->name);
2842 hci_scodata_packet(hdev, skb);
2843 break;
2845 default:
2846 kfree_skb(skb);
2847 break;
2852 static void hci_cmd_work(struct work_struct *work)
2854 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2855 struct sk_buff *skb;
2857 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2858 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2860 /* Send queued commands */
2861 if (atomic_read(&hdev->cmd_cnt)) {
2862 skb = skb_dequeue(&hdev->cmd_q);
2863 if (!skb)
2864 return;
2866 kfree_skb(hdev->sent_cmd);
2868 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2869 if (hdev->sent_cmd) {
2870 atomic_dec(&hdev->cmd_cnt);
2871 hci_send_frame(skb);
2872 if (test_bit(HCI_RESET, &hdev->flags))
2873 del_timer(&hdev->cmd_timer);
2874 else
2875 mod_timer(&hdev->cmd_timer,
2876 jiffies + HCI_CMD_TIMEOUT);
2877 } else {
2878 skb_queue_head(&hdev->cmd_q, skb);
2879 queue_work(hdev->workqueue, &hdev->cmd_work);
2884 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2886 /* General inquiry access code (GIAC) */
2887 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2888 struct hci_cp_inquiry cp;
2890 BT_DBG("%s", hdev->name);
2892 if (test_bit(HCI_INQUIRY, &hdev->flags))
2893 return -EINPROGRESS;
2895 inquiry_cache_flush(hdev);
2897 memset(&cp, 0, sizeof(cp));
2898 memcpy(&cp.lap, lap, sizeof(cp.lap));
2899 cp.length = length;
2901 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2904 int hci_cancel_inquiry(struct hci_dev *hdev)
2906 BT_DBG("%s", hdev->name);
2908 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2909 return -EALREADY;
2911 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2914 u8 bdaddr_to_le(u8 bdaddr_type)
2916 switch (bdaddr_type) {
2917 case BDADDR_LE_PUBLIC:
2918 return ADDR_LE_DEV_PUBLIC;
2920 default:
2921 /* Fallback to LE Random address type */
2922 return ADDR_LE_DEV_RANDOM;