ARM: mach-shmobile: Use DT_MACHINE for armadillo 800 eva
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / hci_core.c
blobedfd61addceca4b269891734646fedbe6fbe8606
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
54 #define AUTO_OFF_TIMEOUT 2000
56 static void hci_rx_work(struct work_struct *work);
57 static void hci_cmd_work(struct work_struct *work);
58 static void hci_tx_work(struct work_struct *work);
60 /* HCI device list */
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
68 /* ---- HCI notifications ---- */
70 static void hci_notify(struct hci_dev *hdev, int event)
72 hci_sock_dev_event(hdev, event);
75 /* ---- HCI requests ---- */
77 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
79 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81 /* If this is the init phase check if the completed command matches
82 * the last init command, and if not just return.
84 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
86 struct sk_buff *skb;
88 /* Some CSR based controllers generate a spontaneous
89 * reset complete event during init and any pending
90 * command will never be completed. In such a case we
91 * need to resend whatever was the last sent
92 * command.
95 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
96 return;
98 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
99 if (skb) {
100 skb_queue_head(&hdev->cmd_q, skb);
101 queue_work(hdev->workqueue, &hdev->cmd_work);
104 return;
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127 unsigned long opt, __u32 timeout)
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
132 BT_DBG("%s start", hdev->name);
134 hdev->req_status = HCI_REQ_PEND;
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
139 req(hdev, opt);
140 schedule_timeout(timeout);
142 remove_wait_queue(&hdev->req_wait_q, &wait);
144 if (signal_pending(current))
145 return -EINTR;
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
156 default:
157 err = -ETIMEDOUT;
158 break;
161 hdev->req_status = hdev->req_result = 0;
163 BT_DBG("%s end: err %d", hdev->name, err);
165 return err;
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 unsigned long opt, __u32 timeout)
171 int ret;
173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
181 return ret;
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186 BT_DBG("%s %ld", hdev->name, opt);
188 /* Reset device */
189 set_bit(HCI_RESET, &hdev->flags);
190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
193 static void bredr_init(struct hci_dev *hdev)
195 struct hci_cp_delete_stored_link_key cp;
196 __le16 param;
197 __u8 flt_type;
199 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201 /* Mandatory initialization */
203 /* Reset */
204 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
205 set_bit(HCI_RESET, &hdev->flags);
206 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
209 /* Read Local Supported Features */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
212 /* Read Local Version */
213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
218 /* Read BD Address */
219 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221 /* Read Class of Device */
222 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224 /* Read Local Name */
225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
227 /* Read Voice Setting */
228 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
230 /* Optional initialization */
232 /* Clear Event Filters */
233 flt_type = HCI_FLT_CLEAR_ALL;
234 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
236 /* Connection accept timeout ~20 secs */
237 param = cpu_to_le16(0x7d00);
238 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
240 bacpy(&cp.bdaddr, BDADDR_ANY);
241 cp.delete_all = 1;
242 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
245 static void amp_init(struct hci_dev *hdev)
247 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249 /* Reset */
250 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252 /* Read Local Version */
253 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
256 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
258 struct sk_buff *skb;
260 BT_DBG("%s %ld", hdev->name, opt);
262 /* Driver initialization */
264 /* Special commands */
265 while ((skb = skb_dequeue(&hdev->driver_init))) {
266 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
267 skb->dev = (void *) hdev;
269 skb_queue_tail(&hdev->cmd_q, skb);
270 queue_work(hdev->workqueue, &hdev->cmd_work);
272 skb_queue_purge(&hdev->driver_init);
274 switch (hdev->dev_type) {
275 case HCI_BREDR:
276 bredr_init(hdev);
277 break;
279 case HCI_AMP:
280 amp_init(hdev);
281 break;
283 default:
284 BT_ERR("Unknown device type %d", hdev->dev_type);
285 break;
290 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
292 BT_DBG("%s", hdev->name);
294 /* Read LE buffer size */
295 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
298 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
300 __u8 scan = opt;
302 BT_DBG("%s %x", hdev->name, scan);
304 /* Inquiry and Page scans */
305 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
308 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
310 __u8 auth = opt;
312 BT_DBG("%s %x", hdev->name, auth);
314 /* Authentication */
315 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
318 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
320 __u8 encrypt = opt;
322 BT_DBG("%s %x", hdev->name, encrypt);
324 /* Encryption */
325 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
328 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
330 __le16 policy = cpu_to_le16(opt);
332 BT_DBG("%s %x", hdev->name, policy);
334 /* Default link policy */
335 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
338 /* Get HCI device by index.
339 * Device is held on return. */
340 struct hci_dev *hci_dev_get(int index)
342 struct hci_dev *hdev = NULL, *d;
344 BT_DBG("%d", index);
346 if (index < 0)
347 return NULL;
349 read_lock(&hci_dev_list_lock);
350 list_for_each_entry(d, &hci_dev_list, list) {
351 if (d->id == index) {
352 hdev = hci_dev_hold(d);
353 break;
356 read_unlock(&hci_dev_list_lock);
357 return hdev;
360 /* ---- Inquiry support ---- */
362 bool hci_discovery_active(struct hci_dev *hdev)
364 struct discovery_state *discov = &hdev->discovery;
366 switch (discov->state) {
367 case DISCOVERY_FINDING:
368 case DISCOVERY_RESOLVING:
369 return true;
371 default:
372 return false;
376 void hci_discovery_set_state(struct hci_dev *hdev, int state)
378 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
380 if (hdev->discovery.state == state)
381 return;
383 switch (state) {
384 case DISCOVERY_STOPPED:
385 if (hdev->discovery.state != DISCOVERY_STARTING)
386 mgmt_discovering(hdev, 0);
387 hdev->discovery.type = 0;
388 break;
389 case DISCOVERY_STARTING:
390 break;
391 case DISCOVERY_FINDING:
392 mgmt_discovering(hdev, 1);
393 break;
394 case DISCOVERY_RESOLVING:
395 break;
396 case DISCOVERY_STOPPING:
397 break;
400 hdev->discovery.state = state;
403 static void inquiry_cache_flush(struct hci_dev *hdev)
405 struct discovery_state *cache = &hdev->discovery;
406 struct inquiry_entry *p, *n;
408 list_for_each_entry_safe(p, n, &cache->all, all) {
409 list_del(&p->all);
410 kfree(p);
413 INIT_LIST_HEAD(&cache->unknown);
414 INIT_LIST_HEAD(&cache->resolve);
417 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
419 struct discovery_state *cache = &hdev->discovery;
420 struct inquiry_entry *e;
422 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
424 list_for_each_entry(e, &cache->all, all) {
425 if (!bacmp(&e->data.bdaddr, bdaddr))
426 return e;
429 return NULL;
432 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
433 bdaddr_t *bdaddr)
435 struct discovery_state *cache = &hdev->discovery;
436 struct inquiry_entry *e;
438 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
440 list_for_each_entry(e, &cache->unknown, list) {
441 if (!bacmp(&e->data.bdaddr, bdaddr))
442 return e;
445 return NULL;
448 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
449 bdaddr_t *bdaddr,
450 int state)
452 struct discovery_state *cache = &hdev->discovery;
453 struct inquiry_entry *e;
455 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
457 list_for_each_entry(e, &cache->resolve, list) {
458 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
459 return e;
460 if (!bacmp(&e->data.bdaddr, bdaddr))
461 return e;
464 return NULL;
467 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
468 struct inquiry_entry *ie)
470 struct discovery_state *cache = &hdev->discovery;
471 struct list_head *pos = &cache->resolve;
472 struct inquiry_entry *p;
474 list_del(&ie->list);
476 list_for_each_entry(p, &cache->resolve, list) {
477 if (p->name_state != NAME_PENDING &&
478 abs(p->data.rssi) >= abs(ie->data.rssi))
479 break;
480 pos = &p->list;
483 list_add(&ie->list, pos);
486 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
487 bool name_known, bool *ssp)
489 struct discovery_state *cache = &hdev->discovery;
490 struct inquiry_entry *ie;
492 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
494 if (ssp)
495 *ssp = data->ssp_mode;
497 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
498 if (ie) {
499 if (ie->data.ssp_mode && ssp)
500 *ssp = true;
502 if (ie->name_state == NAME_NEEDED &&
503 data->rssi != ie->data.rssi) {
504 ie->data.rssi = data->rssi;
505 hci_inquiry_cache_update_resolve(hdev, ie);
508 goto update;
511 /* Entry not in the cache. Add new one. */
512 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
513 if (!ie)
514 return false;
516 list_add(&ie->all, &cache->all);
518 if (name_known) {
519 ie->name_state = NAME_KNOWN;
520 } else {
521 ie->name_state = NAME_NOT_KNOWN;
522 list_add(&ie->list, &cache->unknown);
525 update:
526 if (name_known && ie->name_state != NAME_KNOWN &&
527 ie->name_state != NAME_PENDING) {
528 ie->name_state = NAME_KNOWN;
529 list_del(&ie->list);
532 memcpy(&ie->data, data, sizeof(*data));
533 ie->timestamp = jiffies;
534 cache->timestamp = jiffies;
536 if (ie->name_state == NAME_NOT_KNOWN)
537 return false;
539 return true;
542 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
544 struct discovery_state *cache = &hdev->discovery;
545 struct inquiry_info *info = (struct inquiry_info *) buf;
546 struct inquiry_entry *e;
547 int copied = 0;
549 list_for_each_entry(e, &cache->all, all) {
550 struct inquiry_data *data = &e->data;
552 if (copied >= num)
553 break;
555 bacpy(&info->bdaddr, &data->bdaddr);
556 info->pscan_rep_mode = data->pscan_rep_mode;
557 info->pscan_period_mode = data->pscan_period_mode;
558 info->pscan_mode = data->pscan_mode;
559 memcpy(info->dev_class, data->dev_class, 3);
560 info->clock_offset = data->clock_offset;
562 info++;
563 copied++;
566 BT_DBG("cache %p, copied %d", cache, copied);
567 return copied;
570 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
572 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
573 struct hci_cp_inquiry cp;
575 BT_DBG("%s", hdev->name);
577 if (test_bit(HCI_INQUIRY, &hdev->flags))
578 return;
580 /* Start Inquiry */
581 memcpy(&cp.lap, &ir->lap, 3);
582 cp.length = ir->length;
583 cp.num_rsp = ir->num_rsp;
584 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
587 int hci_inquiry(void __user *arg)
589 __u8 __user *ptr = arg;
590 struct hci_inquiry_req ir;
591 struct hci_dev *hdev;
592 int err = 0, do_inquiry = 0, max_rsp;
593 long timeo;
594 __u8 *buf;
596 if (copy_from_user(&ir, ptr, sizeof(ir)))
597 return -EFAULT;
599 hdev = hci_dev_get(ir.dev_id);
600 if (!hdev)
601 return -ENODEV;
603 hci_dev_lock(hdev);
604 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
605 inquiry_cache_empty(hdev) ||
606 ir.flags & IREQ_CACHE_FLUSH) {
607 inquiry_cache_flush(hdev);
608 do_inquiry = 1;
610 hci_dev_unlock(hdev);
612 timeo = ir.length * msecs_to_jiffies(2000);
614 if (do_inquiry) {
615 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
616 if (err < 0)
617 goto done;
620 /* for unlimited number of responses we will use buffer with 255 entries */
621 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
623 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
624 * copy it to the user space.
626 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
627 if (!buf) {
628 err = -ENOMEM;
629 goto done;
632 hci_dev_lock(hdev);
633 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
634 hci_dev_unlock(hdev);
636 BT_DBG("num_rsp %d", ir.num_rsp);
638 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
639 ptr += sizeof(ir);
640 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
641 ir.num_rsp))
642 err = -EFAULT;
643 } else
644 err = -EFAULT;
646 kfree(buf);
648 done:
649 hci_dev_put(hdev);
650 return err;
653 /* ---- HCI ioctl helpers ---- */
655 int hci_dev_open(__u16 dev)
657 struct hci_dev *hdev;
658 int ret = 0;
660 hdev = hci_dev_get(dev);
661 if (!hdev)
662 return -ENODEV;
664 BT_DBG("%s %p", hdev->name, hdev);
666 hci_req_lock(hdev);
668 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
669 ret = -ENODEV;
670 goto done;
673 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
674 ret = -ERFKILL;
675 goto done;
678 if (test_bit(HCI_UP, &hdev->flags)) {
679 ret = -EALREADY;
680 goto done;
683 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
684 set_bit(HCI_RAW, &hdev->flags);
686 /* Treat all non BR/EDR controllers as raw devices if
687 enable_hs is not set */
688 if (hdev->dev_type != HCI_BREDR && !enable_hs)
689 set_bit(HCI_RAW, &hdev->flags);
691 if (hdev->open(hdev)) {
692 ret = -EIO;
693 goto done;
696 if (!test_bit(HCI_RAW, &hdev->flags)) {
697 atomic_set(&hdev->cmd_cnt, 1);
698 set_bit(HCI_INIT, &hdev->flags);
699 hdev->init_last_cmd = 0;
701 ret = __hci_request(hdev, hci_init_req, 0,
702 msecs_to_jiffies(HCI_INIT_TIMEOUT));
704 if (lmp_host_le_capable(hdev))
705 ret = __hci_request(hdev, hci_le_init_req, 0,
706 msecs_to_jiffies(HCI_INIT_TIMEOUT));
708 clear_bit(HCI_INIT, &hdev->flags);
711 if (!ret) {
712 hci_dev_hold(hdev);
713 set_bit(HCI_UP, &hdev->flags);
714 hci_notify(hdev, HCI_DEV_UP);
715 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
716 hci_dev_lock(hdev);
717 mgmt_powered(hdev, 1);
718 hci_dev_unlock(hdev);
720 } else {
721 /* Init failed, cleanup */
722 flush_work(&hdev->tx_work);
723 flush_work(&hdev->cmd_work);
724 flush_work(&hdev->rx_work);
726 skb_queue_purge(&hdev->cmd_q);
727 skb_queue_purge(&hdev->rx_q);
729 if (hdev->flush)
730 hdev->flush(hdev);
732 if (hdev->sent_cmd) {
733 kfree_skb(hdev->sent_cmd);
734 hdev->sent_cmd = NULL;
737 hdev->close(hdev);
738 hdev->flags = 0;
741 done:
742 hci_req_unlock(hdev);
743 hci_dev_put(hdev);
744 return ret;
747 static int hci_dev_do_close(struct hci_dev *hdev)
749 BT_DBG("%s %p", hdev->name, hdev);
751 cancel_work_sync(&hdev->le_scan);
753 hci_req_cancel(hdev, ENODEV);
754 hci_req_lock(hdev);
756 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
757 del_timer_sync(&hdev->cmd_timer);
758 hci_req_unlock(hdev);
759 return 0;
762 /* Flush RX and TX works */
763 flush_work(&hdev->tx_work);
764 flush_work(&hdev->rx_work);
766 if (hdev->discov_timeout > 0) {
767 cancel_delayed_work(&hdev->discov_off);
768 hdev->discov_timeout = 0;
769 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
772 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
773 cancel_delayed_work(&hdev->service_cache);
775 cancel_delayed_work_sync(&hdev->le_scan_disable);
777 hci_dev_lock(hdev);
778 inquiry_cache_flush(hdev);
779 hci_conn_hash_flush(hdev);
780 hci_dev_unlock(hdev);
782 hci_notify(hdev, HCI_DEV_DOWN);
784 if (hdev->flush)
785 hdev->flush(hdev);
787 /* Reset device */
788 skb_queue_purge(&hdev->cmd_q);
789 atomic_set(&hdev->cmd_cnt, 1);
790 if (!test_bit(HCI_RAW, &hdev->flags) &&
791 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
792 set_bit(HCI_INIT, &hdev->flags);
793 __hci_request(hdev, hci_reset_req, 0,
794 msecs_to_jiffies(250));
795 clear_bit(HCI_INIT, &hdev->flags);
798 /* flush cmd work */
799 flush_work(&hdev->cmd_work);
801 /* Drop queues */
802 skb_queue_purge(&hdev->rx_q);
803 skb_queue_purge(&hdev->cmd_q);
804 skb_queue_purge(&hdev->raw_q);
806 /* Drop last sent command */
807 if (hdev->sent_cmd) {
808 del_timer_sync(&hdev->cmd_timer);
809 kfree_skb(hdev->sent_cmd);
810 hdev->sent_cmd = NULL;
813 /* After this point our queues are empty
814 * and no tasks are scheduled. */
815 hdev->close(hdev);
817 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
818 hci_dev_lock(hdev);
819 mgmt_powered(hdev, 0);
820 hci_dev_unlock(hdev);
823 /* Clear flags */
824 hdev->flags = 0;
826 memset(hdev->eir, 0, sizeof(hdev->eir));
827 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
829 hci_req_unlock(hdev);
831 hci_dev_put(hdev);
832 return 0;
835 int hci_dev_close(__u16 dev)
837 struct hci_dev *hdev;
838 int err;
840 hdev = hci_dev_get(dev);
841 if (!hdev)
842 return -ENODEV;
844 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
845 cancel_delayed_work(&hdev->power_off);
847 err = hci_dev_do_close(hdev);
849 hci_dev_put(hdev);
850 return err;
853 int hci_dev_reset(__u16 dev)
855 struct hci_dev *hdev;
856 int ret = 0;
858 hdev = hci_dev_get(dev);
859 if (!hdev)
860 return -ENODEV;
862 hci_req_lock(hdev);
864 if (!test_bit(HCI_UP, &hdev->flags))
865 goto done;
867 /* Drop queues */
868 skb_queue_purge(&hdev->rx_q);
869 skb_queue_purge(&hdev->cmd_q);
871 hci_dev_lock(hdev);
872 inquiry_cache_flush(hdev);
873 hci_conn_hash_flush(hdev);
874 hci_dev_unlock(hdev);
876 if (hdev->flush)
877 hdev->flush(hdev);
879 atomic_set(&hdev->cmd_cnt, 1);
880 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
882 if (!test_bit(HCI_RAW, &hdev->flags))
883 ret = __hci_request(hdev, hci_reset_req, 0,
884 msecs_to_jiffies(HCI_INIT_TIMEOUT));
886 done:
887 hci_req_unlock(hdev);
888 hci_dev_put(hdev);
889 return ret;
892 int hci_dev_reset_stat(__u16 dev)
894 struct hci_dev *hdev;
895 int ret = 0;
897 hdev = hci_dev_get(dev);
898 if (!hdev)
899 return -ENODEV;
901 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
903 hci_dev_put(hdev);
905 return ret;
908 int hci_dev_cmd(unsigned int cmd, void __user *arg)
910 struct hci_dev *hdev;
911 struct hci_dev_req dr;
912 int err = 0;
914 if (copy_from_user(&dr, arg, sizeof(dr)))
915 return -EFAULT;
917 hdev = hci_dev_get(dr.dev_id);
918 if (!hdev)
919 return -ENODEV;
921 switch (cmd) {
922 case HCISETAUTH:
923 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
924 msecs_to_jiffies(HCI_INIT_TIMEOUT));
925 break;
927 case HCISETENCRYPT:
928 if (!lmp_encrypt_capable(hdev)) {
929 err = -EOPNOTSUPP;
930 break;
933 if (!test_bit(HCI_AUTH, &hdev->flags)) {
934 /* Auth must be enabled first */
935 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
936 msecs_to_jiffies(HCI_INIT_TIMEOUT));
937 if (err)
938 break;
941 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
942 msecs_to_jiffies(HCI_INIT_TIMEOUT));
943 break;
945 case HCISETSCAN:
946 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
947 msecs_to_jiffies(HCI_INIT_TIMEOUT));
948 break;
950 case HCISETLINKPOL:
951 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
952 msecs_to_jiffies(HCI_INIT_TIMEOUT));
953 break;
955 case HCISETLINKMODE:
956 hdev->link_mode = ((__u16) dr.dev_opt) &
957 (HCI_LM_MASTER | HCI_LM_ACCEPT);
958 break;
960 case HCISETPTYPE:
961 hdev->pkt_type = (__u16) dr.dev_opt;
962 break;
964 case HCISETACLMTU:
965 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
966 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
967 break;
969 case HCISETSCOMTU:
970 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
971 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
972 break;
974 default:
975 err = -EINVAL;
976 break;
979 hci_dev_put(hdev);
980 return err;
983 int hci_get_dev_list(void __user *arg)
985 struct hci_dev *hdev;
986 struct hci_dev_list_req *dl;
987 struct hci_dev_req *dr;
988 int n = 0, size, err;
989 __u16 dev_num;
991 if (get_user(dev_num, (__u16 __user *) arg))
992 return -EFAULT;
994 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
995 return -EINVAL;
997 size = sizeof(*dl) + dev_num * sizeof(*dr);
999 dl = kzalloc(size, GFP_KERNEL);
1000 if (!dl)
1001 return -ENOMEM;
1003 dr = dl->dev_req;
1005 read_lock(&hci_dev_list_lock);
1006 list_for_each_entry(hdev, &hci_dev_list, list) {
1007 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1008 cancel_delayed_work(&hdev->power_off);
1010 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1011 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1013 (dr + n)->dev_id = hdev->id;
1014 (dr + n)->dev_opt = hdev->flags;
1016 if (++n >= dev_num)
1017 break;
1019 read_unlock(&hci_dev_list_lock);
1021 dl->dev_num = n;
1022 size = sizeof(*dl) + n * sizeof(*dr);
1024 err = copy_to_user(arg, dl, size);
1025 kfree(dl);
1027 return err ? -EFAULT : 0;
1030 int hci_get_dev_info(void __user *arg)
1032 struct hci_dev *hdev;
1033 struct hci_dev_info di;
1034 int err = 0;
1036 if (copy_from_user(&di, arg, sizeof(di)))
1037 return -EFAULT;
1039 hdev = hci_dev_get(di.dev_id);
1040 if (!hdev)
1041 return -ENODEV;
1043 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1044 cancel_delayed_work_sync(&hdev->power_off);
1046 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1047 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1049 strcpy(di.name, hdev->name);
1050 di.bdaddr = hdev->bdaddr;
1051 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1052 di.flags = hdev->flags;
1053 di.pkt_type = hdev->pkt_type;
1054 di.acl_mtu = hdev->acl_mtu;
1055 di.acl_pkts = hdev->acl_pkts;
1056 di.sco_mtu = hdev->sco_mtu;
1057 di.sco_pkts = hdev->sco_pkts;
1058 di.link_policy = hdev->link_policy;
1059 di.link_mode = hdev->link_mode;
1061 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1062 memcpy(&di.features, &hdev->features, sizeof(di.features));
1064 if (copy_to_user(arg, &di, sizeof(di)))
1065 err = -EFAULT;
1067 hci_dev_put(hdev);
1069 return err;
1072 /* ---- Interface to HCI drivers ---- */
1074 static int hci_rfkill_set_block(void *data, bool blocked)
1076 struct hci_dev *hdev = data;
1078 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1080 if (!blocked)
1081 return 0;
1083 hci_dev_do_close(hdev);
1085 return 0;
1088 static const struct rfkill_ops hci_rfkill_ops = {
1089 .set_block = hci_rfkill_set_block,
1092 /* Alloc HCI device */
1093 struct hci_dev *hci_alloc_dev(void)
1095 struct hci_dev *hdev;
1097 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1098 if (!hdev)
1099 return NULL;
1101 hci_init_sysfs(hdev);
1102 skb_queue_head_init(&hdev->driver_init);
1104 return hdev;
1106 EXPORT_SYMBOL(hci_alloc_dev);
1108 /* Free HCI device */
1109 void hci_free_dev(struct hci_dev *hdev)
1111 skb_queue_purge(&hdev->driver_init);
1113 /* will free via device release */
1114 put_device(&hdev->dev);
1116 EXPORT_SYMBOL(hci_free_dev);
1118 static void hci_power_on(struct work_struct *work)
1120 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1122 BT_DBG("%s", hdev->name);
1124 if (hci_dev_open(hdev->id) < 0)
1125 return;
1127 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1128 schedule_delayed_work(&hdev->power_off,
1129 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1131 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1132 mgmt_index_added(hdev);
1135 static void hci_power_off(struct work_struct *work)
1137 struct hci_dev *hdev = container_of(work, struct hci_dev,
1138 power_off.work);
1140 BT_DBG("%s", hdev->name);
1142 hci_dev_do_close(hdev);
1145 static void hci_discov_off(struct work_struct *work)
1147 struct hci_dev *hdev;
1148 u8 scan = SCAN_PAGE;
1150 hdev = container_of(work, struct hci_dev, discov_off.work);
1152 BT_DBG("%s", hdev->name);
1154 hci_dev_lock(hdev);
1156 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1158 hdev->discov_timeout = 0;
1160 hci_dev_unlock(hdev);
1163 int hci_uuids_clear(struct hci_dev *hdev)
1165 struct list_head *p, *n;
1167 list_for_each_safe(p, n, &hdev->uuids) {
1168 struct bt_uuid *uuid;
1170 uuid = list_entry(p, struct bt_uuid, list);
1172 list_del(p);
1173 kfree(uuid);
1176 return 0;
1179 int hci_link_keys_clear(struct hci_dev *hdev)
1181 struct list_head *p, *n;
1183 list_for_each_safe(p, n, &hdev->link_keys) {
1184 struct link_key *key;
1186 key = list_entry(p, struct link_key, list);
1188 list_del(p);
1189 kfree(key);
1192 return 0;
1195 int hci_smp_ltks_clear(struct hci_dev *hdev)
1197 struct smp_ltk *k, *tmp;
1199 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1200 list_del(&k->list);
1201 kfree(k);
1204 return 0;
1207 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1209 struct link_key *k;
1211 list_for_each_entry(k, &hdev->link_keys, list)
1212 if (bacmp(bdaddr, &k->bdaddr) == 0)
1213 return k;
1215 return NULL;
1218 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1219 u8 key_type, u8 old_key_type)
1221 /* Legacy key */
1222 if (key_type < 0x03)
1223 return true;
1225 /* Debug keys are insecure so don't store them persistently */
1226 if (key_type == HCI_LK_DEBUG_COMBINATION)
1227 return false;
1229 /* Changed combination key and there's no previous one */
1230 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1231 return false;
1233 /* Security mode 3 case */
1234 if (!conn)
1235 return true;
1237 /* Neither local nor remote side had no-bonding as requirement */
1238 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1239 return true;
1241 /* Local side had dedicated bonding as requirement */
1242 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1243 return true;
1245 /* Remote side had dedicated bonding as requirement */
1246 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1247 return true;
1249 /* If none of the above criteria match, then don't store the key
1250 * persistently */
1251 return false;
1254 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1256 struct smp_ltk *k;
1258 list_for_each_entry(k, &hdev->long_term_keys, list) {
1259 if (k->ediv != ediv ||
1260 memcmp(rand, k->rand, sizeof(k->rand)))
1261 continue;
1263 return k;
1266 return NULL;
1268 EXPORT_SYMBOL(hci_find_ltk);
1270 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1271 u8 addr_type)
1273 struct smp_ltk *k;
1275 list_for_each_entry(k, &hdev->long_term_keys, list)
1276 if (addr_type == k->bdaddr_type &&
1277 bacmp(bdaddr, &k->bdaddr) == 0)
1278 return k;
1280 return NULL;
1282 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1284 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1285 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1287 struct link_key *key, *old_key;
1288 u8 old_key_type;
1289 bool persistent;
1291 old_key = hci_find_link_key(hdev, bdaddr);
1292 if (old_key) {
1293 old_key_type = old_key->type;
1294 key = old_key;
1295 } else {
1296 old_key_type = conn ? conn->key_type : 0xff;
1297 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1298 if (!key)
1299 return -ENOMEM;
1300 list_add(&key->list, &hdev->link_keys);
1303 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1305 /* Some buggy controller combinations generate a changed
1306 * combination key for legacy pairing even when there's no
1307 * previous key */
1308 if (type == HCI_LK_CHANGED_COMBINATION &&
1309 (!conn || conn->remote_auth == 0xff) &&
1310 old_key_type == 0xff) {
1311 type = HCI_LK_COMBINATION;
1312 if (conn)
1313 conn->key_type = type;
1316 bacpy(&key->bdaddr, bdaddr);
1317 memcpy(key->val, val, 16);
1318 key->pin_len = pin_len;
1320 if (type == HCI_LK_CHANGED_COMBINATION)
1321 key->type = old_key_type;
1322 else
1323 key->type = type;
1325 if (!new_key)
1326 return 0;
1328 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1330 mgmt_new_link_key(hdev, key, persistent);
1332 if (conn)
1333 conn->flush_key = !persistent;
1335 return 0;
1338 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1339 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
1340 ediv, u8 rand[8])
1342 struct smp_ltk *key, *old_key;
1344 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1345 return 0;
1347 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1348 if (old_key)
1349 key = old_key;
1350 else {
1351 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1352 if (!key)
1353 return -ENOMEM;
1354 list_add(&key->list, &hdev->long_term_keys);
1357 bacpy(&key->bdaddr, bdaddr);
1358 key->bdaddr_type = addr_type;
1359 memcpy(key->val, tk, sizeof(key->val));
1360 key->authenticated = authenticated;
1361 key->ediv = ediv;
1362 key->enc_size = enc_size;
1363 key->type = type;
1364 memcpy(key->rand, rand, sizeof(key->rand));
1366 if (!new_key)
1367 return 0;
1369 if (type & HCI_SMP_LTK)
1370 mgmt_new_ltk(hdev, key, 1);
1372 return 0;
1375 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1377 struct link_key *key;
1379 key = hci_find_link_key(hdev, bdaddr);
1380 if (!key)
1381 return -ENOENT;
1383 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1385 list_del(&key->list);
1386 kfree(key);
1388 return 0;
1391 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1393 struct smp_ltk *k, *tmp;
1395 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1396 if (bacmp(bdaddr, &k->bdaddr))
1397 continue;
1399 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1401 list_del(&k->list);
1402 kfree(k);
1405 return 0;
1408 /* HCI command timer function */
1409 static void hci_cmd_timer(unsigned long arg)
1411 struct hci_dev *hdev = (void *) arg;
1413 BT_ERR("%s command tx timeout", hdev->name);
1414 atomic_set(&hdev->cmd_cnt, 1);
1415 queue_work(hdev->workqueue, &hdev->cmd_work);
1418 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1419 bdaddr_t *bdaddr)
1421 struct oob_data *data;
1423 list_for_each_entry(data, &hdev->remote_oob_data, list)
1424 if (bacmp(bdaddr, &data->bdaddr) == 0)
1425 return data;
1427 return NULL;
1430 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1432 struct oob_data *data;
1434 data = hci_find_remote_oob_data(hdev, bdaddr);
1435 if (!data)
1436 return -ENOENT;
1438 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1440 list_del(&data->list);
1441 kfree(data);
1443 return 0;
1446 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1448 struct oob_data *data, *n;
1450 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1451 list_del(&data->list);
1452 kfree(data);
1455 return 0;
1458 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1459 u8 *randomizer)
1461 struct oob_data *data;
1463 data = hci_find_remote_oob_data(hdev, bdaddr);
1465 if (!data) {
1466 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1467 if (!data)
1468 return -ENOMEM;
1470 bacpy(&data->bdaddr, bdaddr);
1471 list_add(&data->list, &hdev->remote_oob_data);
1474 memcpy(data->hash, hash, sizeof(data->hash));
1475 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1477 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1479 return 0;
1482 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1484 struct bdaddr_list *b;
1486 list_for_each_entry(b, &hdev->blacklist, list)
1487 if (bacmp(bdaddr, &b->bdaddr) == 0)
1488 return b;
1490 return NULL;
1493 int hci_blacklist_clear(struct hci_dev *hdev)
1495 struct list_head *p, *n;
1497 list_for_each_safe(p, n, &hdev->blacklist) {
1498 struct bdaddr_list *b;
1500 b = list_entry(p, struct bdaddr_list, list);
1502 list_del(p);
1503 kfree(b);
1506 return 0;
1509 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1511 struct bdaddr_list *entry;
1513 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1514 return -EBADF;
1516 if (hci_blacklist_lookup(hdev, bdaddr))
1517 return -EEXIST;
1519 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1520 if (!entry)
1521 return -ENOMEM;
1523 bacpy(&entry->bdaddr, bdaddr);
1525 list_add(&entry->list, &hdev->blacklist);
1527 return mgmt_device_blocked(hdev, bdaddr, type);
1530 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1532 struct bdaddr_list *entry;
1534 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1535 return hci_blacklist_clear(hdev);
1537 entry = hci_blacklist_lookup(hdev, bdaddr);
1538 if (!entry)
1539 return -ENOENT;
1541 list_del(&entry->list);
1542 kfree(entry);
1544 return mgmt_device_unblocked(hdev, bdaddr, type);
1547 static void hci_clear_adv_cache(struct work_struct *work)
1549 struct hci_dev *hdev = container_of(work, struct hci_dev,
1550 adv_work.work);
1552 hci_dev_lock(hdev);
1554 hci_adv_entries_clear(hdev);
1556 hci_dev_unlock(hdev);
1559 int hci_adv_entries_clear(struct hci_dev *hdev)
1561 struct adv_entry *entry, *tmp;
1563 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1564 list_del(&entry->list);
1565 kfree(entry);
1568 BT_DBG("%s adv cache cleared", hdev->name);
1570 return 0;
1573 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1575 struct adv_entry *entry;
1577 list_for_each_entry(entry, &hdev->adv_entries, list)
1578 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1579 return entry;
1581 return NULL;
1584 static inline int is_connectable_adv(u8 evt_type)
1586 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1587 return 1;
1589 return 0;
1592 int hci_add_adv_entry(struct hci_dev *hdev,
1593 struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
1594 return -EINVAL;
1596 /* Only new entries should be added to adv_entries. So, if
1597 * bdaddr was found, don't add it. */
1598 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1599 return 0;
1601 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1602 if (!entry)
1603 return -ENOMEM;
1605 bacpy(&entry->bdaddr, &ev->bdaddr);
1606 entry->bdaddr_type = ev->bdaddr_type;
1608 list_add(&entry->list, &hdev->adv_entries);
1610 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1611 batostr(&entry->bdaddr), entry->bdaddr_type);
1613 return 0;
1616 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1618 struct le_scan_params *param = (struct le_scan_params *) opt;
1619 struct hci_cp_le_set_scan_param cp;
1621 memset(&cp, 0, sizeof(cp));
1622 cp.type = param->type;
1623 cp.interval = cpu_to_le16(param->interval);
1624 cp.window = cpu_to_le16(param->window);
1626 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1629 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1631 struct hci_cp_le_set_scan_enable cp;
1633 memset(&cp, 0, sizeof(cp));
1634 cp.enable = 1;
1636 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1639 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1640 u16 window, int timeout)
1642 long timeo = msecs_to_jiffies(3000);
1643 struct le_scan_params param;
1644 int err;
1646 BT_DBG("%s", hdev->name);
1648 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1649 return -EINPROGRESS;
1651 param.type = type;
1652 param.interval = interval;
1653 param.window = window;
1655 hci_req_lock(hdev);
1657 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1658 timeo);
1659 if (!err)
1660 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1662 hci_req_unlock(hdev);
1664 if (err < 0)
1665 return err;
1667 schedule_delayed_work(&hdev->le_scan_disable,
1668 msecs_to_jiffies(timeout));
1670 return 0;
1673 static void le_scan_disable_work(struct work_struct *work)
1675 struct hci_dev *hdev = container_of(work, struct hci_dev,
1676 le_scan_disable.work);
1677 struct hci_cp_le_set_scan_enable cp;
1679 BT_DBG("%s", hdev->name);
1681 memset(&cp, 0, sizeof(cp));
1683 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1686 static void le_scan_work(struct work_struct *work)
1688 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1689 struct le_scan_params *param = &hdev->le_scan_params;
1691 BT_DBG("%s", hdev->name);
1693 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1694 param->timeout);
1697 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1698 int timeout)
1700 struct le_scan_params *param = &hdev->le_scan_params;
1702 BT_DBG("%s", hdev->name);
1704 if (work_busy(&hdev->le_scan))
1705 return -EINPROGRESS;
1707 param->type = type;
1708 param->interval = interval;
1709 param->window = window;
1710 param->timeout = timeout;
1712 queue_work(system_long_wq, &hdev->le_scan);
1714 return 0;
1717 /* Register HCI device */
1718 int hci_register_dev(struct hci_dev *hdev)
1720 struct list_head *head = &hci_dev_list, *p;
1721 int i, id, error;
1723 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1725 if (!hdev->open || !hdev->close)
1726 return -EINVAL;
1728 /* Do not allow HCI_AMP devices to register at index 0,
1729 * so the index can be used as the AMP controller ID.
1731 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1733 write_lock(&hci_dev_list_lock);
1735 /* Find first available device id */
1736 list_for_each(p, &hci_dev_list) {
1737 if (list_entry(p, struct hci_dev, list)->id != id)
1738 break;
1739 head = p; id++;
1742 sprintf(hdev->name, "hci%d", id);
1743 hdev->id = id;
1744 list_add_tail(&hdev->list, head);
1746 mutex_init(&hdev->lock);
1748 hdev->flags = 0;
1749 hdev->dev_flags = 0;
1750 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1751 hdev->esco_type = (ESCO_HV1);
1752 hdev->link_mode = (HCI_LM_ACCEPT);
1753 hdev->io_capability = 0x03; /* No Input No Output */
1755 hdev->idle_timeout = 0;
1756 hdev->sniff_max_interval = 800;
1757 hdev->sniff_min_interval = 80;
1759 INIT_WORK(&hdev->rx_work, hci_rx_work);
1760 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1761 INIT_WORK(&hdev->tx_work, hci_tx_work);
1764 skb_queue_head_init(&hdev->rx_q);
1765 skb_queue_head_init(&hdev->cmd_q);
1766 skb_queue_head_init(&hdev->raw_q);
1768 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1770 for (i = 0; i < NUM_REASSEMBLY; i++)
1771 hdev->reassembly[i] = NULL;
1773 init_waitqueue_head(&hdev->req_wait_q);
1774 mutex_init(&hdev->req_lock);
1776 discovery_init(hdev);
1778 hci_conn_hash_init(hdev);
1780 INIT_LIST_HEAD(&hdev->mgmt_pending);
1782 INIT_LIST_HEAD(&hdev->blacklist);
1784 INIT_LIST_HEAD(&hdev->uuids);
1786 INIT_LIST_HEAD(&hdev->link_keys);
1787 INIT_LIST_HEAD(&hdev->long_term_keys);
1789 INIT_LIST_HEAD(&hdev->remote_oob_data);
1791 INIT_LIST_HEAD(&hdev->adv_entries);
1793 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1794 INIT_WORK(&hdev->power_on, hci_power_on);
1795 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1797 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1799 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1801 atomic_set(&hdev->promisc, 0);
1803 INIT_WORK(&hdev->le_scan, le_scan_work);
1805 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1807 write_unlock(&hci_dev_list_lock);
1809 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1810 WQ_MEM_RECLAIM, 1);
1811 if (!hdev->workqueue) {
1812 error = -ENOMEM;
1813 goto err;
1816 error = hci_add_sysfs(hdev);
1817 if (error < 0)
1818 goto err_wqueue;
1820 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1821 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1822 if (hdev->rfkill) {
1823 if (rfkill_register(hdev->rfkill) < 0) {
1824 rfkill_destroy(hdev->rfkill);
1825 hdev->rfkill = NULL;
1829 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1830 set_bit(HCI_SETUP, &hdev->dev_flags);
1831 schedule_work(&hdev->power_on);
1833 hci_notify(hdev, HCI_DEV_REG);
1834 hci_dev_hold(hdev);
1836 return id;
1838 err_wqueue:
1839 destroy_workqueue(hdev->workqueue);
1840 err:
1841 write_lock(&hci_dev_list_lock);
1842 list_del(&hdev->list);
1843 write_unlock(&hci_dev_list_lock);
1845 return error;
1847 EXPORT_SYMBOL(hci_register_dev);
1849 /* Unregister HCI device */
1850 void hci_unregister_dev(struct hci_dev *hdev)
1852 int i;
1854 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1856 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1858 write_lock(&hci_dev_list_lock);
1859 list_del(&hdev->list);
1860 write_unlock(&hci_dev_list_lock);
1862 hci_dev_do_close(hdev);
1864 for (i = 0; i < NUM_REASSEMBLY; i++)
1865 kfree_skb(hdev->reassembly[i]);
1867 if (!test_bit(HCI_INIT, &hdev->flags) &&
1868 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1869 hci_dev_lock(hdev);
1870 mgmt_index_removed(hdev);
1871 hci_dev_unlock(hdev);
1874 /* mgmt_index_removed should take care of emptying the
1875 * pending list */
1876 BUG_ON(!list_empty(&hdev->mgmt_pending));
1878 hci_notify(hdev, HCI_DEV_UNREG);
1880 if (hdev->rfkill) {
1881 rfkill_unregister(hdev->rfkill);
1882 rfkill_destroy(hdev->rfkill);
1885 hci_del_sysfs(hdev);
1887 cancel_delayed_work_sync(&hdev->adv_work);
1889 destroy_workqueue(hdev->workqueue);
1891 hci_dev_lock(hdev);
1892 hci_blacklist_clear(hdev);
1893 hci_uuids_clear(hdev);
1894 hci_link_keys_clear(hdev);
1895 hci_smp_ltks_clear(hdev);
1896 hci_remote_oob_data_clear(hdev);
1897 hci_adv_entries_clear(hdev);
1898 hci_dev_unlock(hdev);
1900 hci_dev_put(hdev);
1902 EXPORT_SYMBOL(hci_unregister_dev);
1904 /* Suspend HCI device */
1905 int hci_suspend_dev(struct hci_dev *hdev)
1907 hci_notify(hdev, HCI_DEV_SUSPEND);
1908 return 0;
1910 EXPORT_SYMBOL(hci_suspend_dev);
1912 /* Resume HCI device */
1913 int hci_resume_dev(struct hci_dev *hdev)
1915 hci_notify(hdev, HCI_DEV_RESUME);
1916 return 0;
1918 EXPORT_SYMBOL(hci_resume_dev);
1920 /* Receive frame from HCI drivers */
1921 int hci_recv_frame(struct sk_buff *skb)
1923 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1924 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1925 && !test_bit(HCI_INIT, &hdev->flags))) {
1926 kfree_skb(skb);
1927 return -ENXIO;
1930 /* Incomming skb */
1931 bt_cb(skb)->incoming = 1;
1933 /* Time stamp */
1934 __net_timestamp(skb);
1936 skb_queue_tail(&hdev->rx_q, skb);
1937 queue_work(hdev->workqueue, &hdev->rx_work);
1939 return 0;
1941 EXPORT_SYMBOL(hci_recv_frame);
1943 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1944 int count, __u8 index)
1946 int len = 0;
1947 int hlen = 0;
1948 int remain = count;
1949 struct sk_buff *skb;
1950 struct bt_skb_cb *scb;
1952 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1953 index >= NUM_REASSEMBLY)
1954 return -EILSEQ;
1956 skb = hdev->reassembly[index];
1958 if (!skb) {
1959 switch (type) {
1960 case HCI_ACLDATA_PKT:
1961 len = HCI_MAX_FRAME_SIZE;
1962 hlen = HCI_ACL_HDR_SIZE;
1963 break;
1964 case HCI_EVENT_PKT:
1965 len = HCI_MAX_EVENT_SIZE;
1966 hlen = HCI_EVENT_HDR_SIZE;
1967 break;
1968 case HCI_SCODATA_PKT:
1969 len = HCI_MAX_SCO_SIZE;
1970 hlen = HCI_SCO_HDR_SIZE;
1971 break;
1974 skb = bt_skb_alloc(len, GFP_ATOMIC);
1975 if (!skb)
1976 return -ENOMEM;
1978 scb = (void *) skb->cb;
1979 scb->expect = hlen;
1980 scb->pkt_type = type;
1982 skb->dev = (void *) hdev;
1983 hdev->reassembly[index] = skb;
1986 while (count) {
1987 scb = (void *) skb->cb;
1988 len = min_t(uint, scb->expect, count);
1990 memcpy(skb_put(skb, len), data, len);
1992 count -= len;
1993 data += len;
1994 scb->expect -= len;
1995 remain = count;
1997 switch (type) {
1998 case HCI_EVENT_PKT:
1999 if (skb->len == HCI_EVENT_HDR_SIZE) {
2000 struct hci_event_hdr *h = hci_event_hdr(skb);
2001 scb->expect = h->plen;
2003 if (skb_tailroom(skb) < scb->expect) {
2004 kfree_skb(skb);
2005 hdev->reassembly[index] = NULL;
2006 return -ENOMEM;
2009 break;
2011 case HCI_ACLDATA_PKT:
2012 if (skb->len == HCI_ACL_HDR_SIZE) {
2013 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2014 scb->expect = __le16_to_cpu(h->dlen);
2016 if (skb_tailroom(skb) < scb->expect) {
2017 kfree_skb(skb);
2018 hdev->reassembly[index] = NULL;
2019 return -ENOMEM;
2022 break;
2024 case HCI_SCODATA_PKT:
2025 if (skb->len == HCI_SCO_HDR_SIZE) {
2026 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2027 scb->expect = h->dlen;
2029 if (skb_tailroom(skb) < scb->expect) {
2030 kfree_skb(skb);
2031 hdev->reassembly[index] = NULL;
2032 return -ENOMEM;
2035 break;
2038 if (scb->expect == 0) {
2039 /* Complete frame */
2041 bt_cb(skb)->pkt_type = type;
2042 hci_recv_frame(skb);
2044 hdev->reassembly[index] = NULL;
2045 return remain;
2049 return remain;
2052 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2054 int rem = 0;
2056 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2057 return -EILSEQ;
2059 while (count) {
2060 rem = hci_reassembly(hdev, type, data, count, type - 1);
2061 if (rem < 0)
2062 return rem;
2064 data += (count - rem);
2065 count = rem;
2068 return rem;
2070 EXPORT_SYMBOL(hci_recv_fragment);
2072 #define STREAM_REASSEMBLY 0
2074 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2076 int type;
2077 int rem = 0;
2079 while (count) {
2080 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2082 if (!skb) {
2083 struct { char type; } *pkt;
2085 /* Start of the frame */
2086 pkt = data;
2087 type = pkt->type;
2089 data++;
2090 count--;
2091 } else
2092 type = bt_cb(skb)->pkt_type;
2094 rem = hci_reassembly(hdev, type, data, count,
2095 STREAM_REASSEMBLY);
2096 if (rem < 0)
2097 return rem;
2099 data += (count - rem);
2100 count = rem;
2103 return rem;
2105 EXPORT_SYMBOL(hci_recv_stream_fragment);
2107 /* ---- Interface to upper protocols ---- */
2109 int hci_register_cb(struct hci_cb *cb)
2111 BT_DBG("%p name %s", cb, cb->name);
2113 write_lock(&hci_cb_list_lock);
2114 list_add(&cb->list, &hci_cb_list);
2115 write_unlock(&hci_cb_list_lock);
2117 return 0;
2119 EXPORT_SYMBOL(hci_register_cb);
2121 int hci_unregister_cb(struct hci_cb *cb)
2123 BT_DBG("%p name %s", cb, cb->name);
2125 write_lock(&hci_cb_list_lock);
2126 list_del(&cb->list);
2127 write_unlock(&hci_cb_list_lock);
2129 return 0;
2131 EXPORT_SYMBOL(hci_unregister_cb);
2133 static int hci_send_frame(struct sk_buff *skb)
2135 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2137 if (!hdev) {
2138 kfree_skb(skb);
2139 return -ENODEV;
2142 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2144 /* Time stamp */
2145 __net_timestamp(skb);
2147 /* Send copy to monitor */
2148 hci_send_to_monitor(hdev, skb);
2150 if (atomic_read(&hdev->promisc)) {
2151 /* Send copy to the sockets */
2152 hci_send_to_sock(hdev, skb);
2155 /* Get rid of skb owner, prior to sending to the driver. */
2156 skb_orphan(skb);
2158 return hdev->send(skb);
2161 /* Send HCI command */
2162 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2164 int len = HCI_COMMAND_HDR_SIZE + plen;
2165 struct hci_command_hdr *hdr;
2166 struct sk_buff *skb;
2168 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2170 skb = bt_skb_alloc(len, GFP_ATOMIC);
2171 if (!skb) {
2172 BT_ERR("%s no memory for command", hdev->name);
2173 return -ENOMEM;
2176 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2177 hdr->opcode = cpu_to_le16(opcode);
2178 hdr->plen = plen;
2180 if (plen)
2181 memcpy(skb_put(skb, plen), param, plen);
2183 BT_DBG("skb len %d", skb->len);
2185 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2186 skb->dev = (void *) hdev;
2188 if (test_bit(HCI_INIT, &hdev->flags))
2189 hdev->init_last_cmd = opcode;
2191 skb_queue_tail(&hdev->cmd_q, skb);
2192 queue_work(hdev->workqueue, &hdev->cmd_work);
2194 return 0;
2197 /* Get data from the previously sent command */
2198 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2200 struct hci_command_hdr *hdr;
2202 if (!hdev->sent_cmd)
2203 return NULL;
2205 hdr = (void *) hdev->sent_cmd->data;
2207 if (hdr->opcode != cpu_to_le16(opcode))
2208 return NULL;
2210 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2212 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2215 /* Send ACL data */
2216 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2218 struct hci_acl_hdr *hdr;
2219 int len = skb->len;
2221 skb_push(skb, HCI_ACL_HDR_SIZE);
2222 skb_reset_transport_header(skb);
2223 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2224 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2225 hdr->dlen = cpu_to_le16(len);
2228 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2229 struct sk_buff *skb, __u16 flags)
2231 struct hci_dev *hdev = conn->hdev;
2232 struct sk_buff *list;
2234 list = skb_shinfo(skb)->frag_list;
2235 if (!list) {
2236 /* Non fragmented */
2237 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2239 skb_queue_tail(queue, skb);
2240 } else {
2241 /* Fragmented */
2242 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2244 skb_shinfo(skb)->frag_list = NULL;
2246 /* Queue all fragments atomically */
2247 spin_lock(&queue->lock);
2249 __skb_queue_tail(queue, skb);
2251 flags &= ~ACL_START;
2252 flags |= ACL_CONT;
2253 do {
2254 skb = list; list = list->next;
2256 skb->dev = (void *) hdev;
2257 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2258 hci_add_acl_hdr(skb, conn->handle, flags);
2260 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2262 __skb_queue_tail(queue, skb);
2263 } while (list);
2265 spin_unlock(&queue->lock);
2269 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2271 struct hci_conn *conn = chan->conn;
2272 struct hci_dev *hdev = conn->hdev;
2274 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2276 skb->dev = (void *) hdev;
2277 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2278 hci_add_acl_hdr(skb, conn->handle, flags);
2280 hci_queue_acl(conn, &chan->data_q, skb, flags);
2282 queue_work(hdev->workqueue, &hdev->tx_work);
2284 EXPORT_SYMBOL(hci_send_acl);
2286 /* Send SCO data */
2287 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2289 struct hci_dev *hdev = conn->hdev;
2290 struct hci_sco_hdr hdr;
2292 BT_DBG("%s len %d", hdev->name, skb->len);
2294 hdr.handle = cpu_to_le16(conn->handle);
2295 hdr.dlen = skb->len;
2297 skb_push(skb, HCI_SCO_HDR_SIZE);
2298 skb_reset_transport_header(skb);
2299 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2301 skb->dev = (void *) hdev;
2302 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2304 skb_queue_tail(&conn->data_q, skb);
2305 queue_work(hdev->workqueue, &hdev->tx_work);
2307 EXPORT_SYMBOL(hci_send_sco);
2309 /* ---- HCI TX task (outgoing data) ---- */
2311 /* HCI Connection scheduler */
2312 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2314 struct hci_conn_hash *h = &hdev->conn_hash;
2315 struct hci_conn *conn = NULL, *c;
2316 int num = 0, min = ~0;
2318 /* We don't have to lock device here. Connections are always
2319 * added and removed with TX task disabled. */
2321 rcu_read_lock();
2323 list_for_each_entry_rcu(c, &h->list, list) {
2324 if (c->type != type || skb_queue_empty(&c->data_q))
2325 continue;
2327 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2328 continue;
2330 num++;
2332 if (c->sent < min) {
2333 min = c->sent;
2334 conn = c;
2337 if (hci_conn_num(hdev, type) == num)
2338 break;
2341 rcu_read_unlock();
2343 if (conn) {
2344 int cnt, q;
2346 switch (conn->type) {
2347 case ACL_LINK:
2348 cnt = hdev->acl_cnt;
2349 break;
2350 case SCO_LINK:
2351 case ESCO_LINK:
2352 cnt = hdev->sco_cnt;
2353 break;
2354 case LE_LINK:
2355 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2356 break;
2357 default:
2358 cnt = 0;
2359 BT_ERR("Unknown link type");
2362 q = cnt / num;
2363 *quote = q ? q : 1;
2364 } else
2365 *quote = 0;
2367 BT_DBG("conn %p quote %d", conn, *quote);
2368 return conn;
2371 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2373 struct hci_conn_hash *h = &hdev->conn_hash;
2374 struct hci_conn *c;
2376 BT_ERR("%s link tx timeout", hdev->name);
2378 rcu_read_lock();
2380 /* Kill stalled connections */
2381 list_for_each_entry_rcu(c, &h->list, list) {
2382 if (c->type == type && c->sent) {
2383 BT_ERR("%s killing stalled connection %s",
2384 hdev->name, batostr(&c->dst));
2385 hci_acl_disconn(c, 0x13);
2389 rcu_read_unlock();
2392 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2393 int *quote)
2395 struct hci_conn_hash *h = &hdev->conn_hash;
2396 struct hci_chan *chan = NULL;
2397 int num = 0, min = ~0, cur_prio = 0;
2398 struct hci_conn *conn;
2399 int cnt, q, conn_num = 0;
2401 BT_DBG("%s", hdev->name);
2403 rcu_read_lock();
2405 list_for_each_entry_rcu(conn, &h->list, list) {
2406 struct hci_chan *tmp;
2408 if (conn->type != type)
2409 continue;
2411 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2412 continue;
2414 conn_num++;
2416 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2417 struct sk_buff *skb;
2419 if (skb_queue_empty(&tmp->data_q))
2420 continue;
2422 skb = skb_peek(&tmp->data_q);
2423 if (skb->priority < cur_prio)
2424 continue;
2426 if (skb->priority > cur_prio) {
2427 num = 0;
2428 min = ~0;
2429 cur_prio = skb->priority;
2432 num++;
2434 if (conn->sent < min) {
2435 min = conn->sent;
2436 chan = tmp;
2440 if (hci_conn_num(hdev, type) == conn_num)
2441 break;
2444 rcu_read_unlock();
2446 if (!chan)
2447 return NULL;
2449 switch (chan->conn->type) {
2450 case ACL_LINK:
2451 cnt = hdev->acl_cnt;
2452 break;
2453 case SCO_LINK:
2454 case ESCO_LINK:
2455 cnt = hdev->sco_cnt;
2456 break;
2457 case LE_LINK:
2458 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2459 break;
2460 default:
2461 cnt = 0;
2462 BT_ERR("Unknown link type");
2465 q = cnt / num;
2466 *quote = q ? q : 1;
2467 BT_DBG("chan %p quote %d", chan, *quote);
2468 return chan;
2471 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2473 struct hci_conn_hash *h = &hdev->conn_hash;
2474 struct hci_conn *conn;
2475 int num = 0;
2477 BT_DBG("%s", hdev->name);
2479 rcu_read_lock();
2481 list_for_each_entry_rcu(conn, &h->list, list) {
2482 struct hci_chan *chan;
2484 if (conn->type != type)
2485 continue;
2487 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2488 continue;
2490 num++;
2492 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2493 struct sk_buff *skb;
2495 if (chan->sent) {
2496 chan->sent = 0;
2497 continue;
2500 if (skb_queue_empty(&chan->data_q))
2501 continue;
2503 skb = skb_peek(&chan->data_q);
2504 if (skb->priority >= HCI_PRIO_MAX - 1)
2505 continue;
2507 skb->priority = HCI_PRIO_MAX - 1;
2509 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2510 skb->priority);
2513 if (hci_conn_num(hdev, type) == num)
2514 break;
2517 rcu_read_unlock();
2521 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2523 /* Calculate count of blocks used by this packet */
2524 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2527 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2529 if (!test_bit(HCI_RAW, &hdev->flags)) {
2530 /* ACL tx timeout must be longer than maximum
2531 * link supervision timeout (40.9 seconds) */
2532 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2533 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2534 hci_link_tx_to(hdev, ACL_LINK);
2538 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2540 unsigned int cnt = hdev->acl_cnt;
2541 struct hci_chan *chan;
2542 struct sk_buff *skb;
2543 int quote;
2545 __check_timeout(hdev, cnt);
2547 while (hdev->acl_cnt &&
2548 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2549 u32 priority = (skb_peek(&chan->data_q))->priority;
2550 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2551 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2552 skb->len, skb->priority);
2554 /* Stop if priority has changed */
2555 if (skb->priority < priority)
2556 break;
2558 skb = skb_dequeue(&chan->data_q);
2560 hci_conn_enter_active_mode(chan->conn,
2561 bt_cb(skb)->force_active);
2563 hci_send_frame(skb);
2564 hdev->acl_last_tx = jiffies;
2566 hdev->acl_cnt--;
2567 chan->sent++;
2568 chan->conn->sent++;
2572 if (cnt != hdev->acl_cnt)
2573 hci_prio_recalculate(hdev, ACL_LINK);
2576 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2578 unsigned int cnt = hdev->block_cnt;
2579 struct hci_chan *chan;
2580 struct sk_buff *skb;
2581 int quote;
2583 __check_timeout(hdev, cnt);
2585 while (hdev->block_cnt > 0 &&
2586 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2587 u32 priority = (skb_peek(&chan->data_q))->priority;
2588 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2589 int blocks;
2591 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2592 skb->len, skb->priority);
2594 /* Stop if priority has changed */
2595 if (skb->priority < priority)
2596 break;
2598 skb = skb_dequeue(&chan->data_q);
2600 blocks = __get_blocks(hdev, skb);
2601 if (blocks > hdev->block_cnt)
2602 return;
2604 hci_conn_enter_active_mode(chan->conn,
2605 bt_cb(skb)->force_active);
2607 hci_send_frame(skb);
2608 hdev->acl_last_tx = jiffies;
2610 hdev->block_cnt -= blocks;
2611 quote -= blocks;
2613 chan->sent += blocks;
2614 chan->conn->sent += blocks;
2618 if (cnt != hdev->block_cnt)
2619 hci_prio_recalculate(hdev, ACL_LINK);
2622 static inline void hci_sched_acl(struct hci_dev *hdev)
2624 BT_DBG("%s", hdev->name);
2626 if (!hci_conn_num(hdev, ACL_LINK))
2627 return;
2629 switch (hdev->flow_ctl_mode) {
2630 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2631 hci_sched_acl_pkt(hdev);
2632 break;
2634 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2635 hci_sched_acl_blk(hdev);
2636 break;
2640 /* Schedule SCO */
2641 static inline void hci_sched_sco(struct hci_dev *hdev)
2643 struct hci_conn *conn;
2644 struct sk_buff *skb;
2645 int quote;
2647 BT_DBG("%s", hdev->name);
2649 if (!hci_conn_num(hdev, SCO_LINK))
2650 return;
2652 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2653 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2654 BT_DBG("skb %p len %d", skb, skb->len);
2655 hci_send_frame(skb);
2657 conn->sent++;
2658 if (conn->sent == ~0)
2659 conn->sent = 0;
2664 static inline void hci_sched_esco(struct hci_dev *hdev)
2666 struct hci_conn *conn;
2667 struct sk_buff *skb;
2668 int quote;
2670 BT_DBG("%s", hdev->name);
2672 if (!hci_conn_num(hdev, ESCO_LINK))
2673 return;
2675 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2676 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2677 BT_DBG("skb %p len %d", skb, skb->len);
2678 hci_send_frame(skb);
2680 conn->sent++;
2681 if (conn->sent == ~0)
2682 conn->sent = 0;
2687 static inline void hci_sched_le(struct hci_dev *hdev)
2689 struct hci_chan *chan;
2690 struct sk_buff *skb;
2691 int quote, cnt, tmp;
2693 BT_DBG("%s", hdev->name);
2695 if (!hci_conn_num(hdev, LE_LINK))
2696 return;
2698 if (!test_bit(HCI_RAW, &hdev->flags)) {
2699 /* LE tx timeout must be longer than maximum
2700 * link supervision timeout (40.9 seconds) */
2701 if (!hdev->le_cnt && hdev->le_pkts &&
2702 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2703 hci_link_tx_to(hdev, LE_LINK);
2706 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2707 tmp = cnt;
2708 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2709 u32 priority = (skb_peek(&chan->data_q))->priority;
2710 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2711 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2712 skb->len, skb->priority);
2714 /* Stop if priority has changed */
2715 if (skb->priority < priority)
2716 break;
2718 skb = skb_dequeue(&chan->data_q);
2720 hci_send_frame(skb);
2721 hdev->le_last_tx = jiffies;
2723 cnt--;
2724 chan->sent++;
2725 chan->conn->sent++;
2729 if (hdev->le_pkts)
2730 hdev->le_cnt = cnt;
2731 else
2732 hdev->acl_cnt = cnt;
2734 if (cnt != tmp)
2735 hci_prio_recalculate(hdev, LE_LINK);
2738 static void hci_tx_work(struct work_struct *work)
2740 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2741 struct sk_buff *skb;
2743 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2744 hdev->sco_cnt, hdev->le_cnt);
2746 /* Schedule queues and send stuff to HCI driver */
2748 hci_sched_acl(hdev);
2750 hci_sched_sco(hdev);
2752 hci_sched_esco(hdev);
2754 hci_sched_le(hdev);
2756 /* Send next queued raw (unknown type) packet */
2757 while ((skb = skb_dequeue(&hdev->raw_q)))
2758 hci_send_frame(skb);
2761 /* ----- HCI RX task (incoming data processing) ----- */
2763 /* ACL data packet */
2764 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2766 struct hci_acl_hdr *hdr = (void *) skb->data;
2767 struct hci_conn *conn;
2768 __u16 handle, flags;
2770 skb_pull(skb, HCI_ACL_HDR_SIZE);
2772 handle = __le16_to_cpu(hdr->handle);
2773 flags = hci_flags(handle);
2774 handle = hci_handle(handle);
2776 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2778 hdev->stat.acl_rx++;
2780 hci_dev_lock(hdev);
2781 conn = hci_conn_hash_lookup_handle(hdev, handle);
2782 hci_dev_unlock(hdev);
2784 if (conn) {
2785 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2787 /* Send to upper protocol */
2788 l2cap_recv_acldata(conn, skb, flags);
2789 return;
2790 } else {
2791 BT_ERR("%s ACL packet for unknown connection handle %d",
2792 hdev->name, handle);
2795 kfree_skb(skb);
2798 /* SCO data packet */
2799 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2801 struct hci_sco_hdr *hdr = (void *) skb->data;
2802 struct hci_conn *conn;
2803 __u16 handle;
2805 skb_pull(skb, HCI_SCO_HDR_SIZE);
2807 handle = __le16_to_cpu(hdr->handle);
2809 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2811 hdev->stat.sco_rx++;
2813 hci_dev_lock(hdev);
2814 conn = hci_conn_hash_lookup_handle(hdev, handle);
2815 hci_dev_unlock(hdev);
2817 if (conn) {
2818 /* Send to upper protocol */
2819 sco_recv_scodata(conn, skb);
2820 return;
2821 } else {
2822 BT_ERR("%s SCO packet for unknown connection handle %d",
2823 hdev->name, handle);
2826 kfree_skb(skb);
2829 static void hci_rx_work(struct work_struct *work)
2831 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2832 struct sk_buff *skb;
2834 BT_DBG("%s", hdev->name);
2836 while ((skb = skb_dequeue(&hdev->rx_q))) {
2837 /* Send copy to monitor */
2838 hci_send_to_monitor(hdev, skb);
2840 if (atomic_read(&hdev->promisc)) {
2841 /* Send copy to the sockets */
2842 hci_send_to_sock(hdev, skb);
2845 if (test_bit(HCI_RAW, &hdev->flags)) {
2846 kfree_skb(skb);
2847 continue;
2850 if (test_bit(HCI_INIT, &hdev->flags)) {
2851 /* Don't process data packets in this states. */
2852 switch (bt_cb(skb)->pkt_type) {
2853 case HCI_ACLDATA_PKT:
2854 case HCI_SCODATA_PKT:
2855 kfree_skb(skb);
2856 continue;
2860 /* Process frame */
2861 switch (bt_cb(skb)->pkt_type) {
2862 case HCI_EVENT_PKT:
2863 BT_DBG("%s Event packet", hdev->name);
2864 hci_event_packet(hdev, skb);
2865 break;
2867 case HCI_ACLDATA_PKT:
2868 BT_DBG("%s ACL data packet", hdev->name);
2869 hci_acldata_packet(hdev, skb);
2870 break;
2872 case HCI_SCODATA_PKT:
2873 BT_DBG("%s SCO data packet", hdev->name);
2874 hci_scodata_packet(hdev, skb);
2875 break;
2877 default:
2878 kfree_skb(skb);
2879 break;
2884 static void hci_cmd_work(struct work_struct *work)
2886 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2887 struct sk_buff *skb;
2889 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2891 /* Send queued commands */
2892 if (atomic_read(&hdev->cmd_cnt)) {
2893 skb = skb_dequeue(&hdev->cmd_q);
2894 if (!skb)
2895 return;
2897 kfree_skb(hdev->sent_cmd);
2899 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2900 if (hdev->sent_cmd) {
2901 atomic_dec(&hdev->cmd_cnt);
2902 hci_send_frame(skb);
2903 if (test_bit(HCI_RESET, &hdev->flags))
2904 del_timer(&hdev->cmd_timer);
2905 else
2906 mod_timer(&hdev->cmd_timer,
2907 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2908 } else {
2909 skb_queue_head(&hdev->cmd_q, skb);
2910 queue_work(hdev->workqueue, &hdev->cmd_work);
2915 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2917 /* General inquiry access code (GIAC) */
2918 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2919 struct hci_cp_inquiry cp;
2921 BT_DBG("%s", hdev->name);
2923 if (test_bit(HCI_INQUIRY, &hdev->flags))
2924 return -EINPROGRESS;
2926 inquiry_cache_flush(hdev);
2928 memset(&cp, 0, sizeof(cp));
2929 memcpy(&cp.lap, lap, sizeof(cp.lap));
2930 cp.length = length;
2932 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2935 int hci_cancel_inquiry(struct hci_dev *hdev)
2937 BT_DBG("%s", hdev->name);
2939 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2940 return -EPERM;
2942 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);