Bluetooth: Add store_hint parameter to mgmt_new_key
[linux-2.6/btrfs-unstable.git] / net / bluetooth / hci_core.c
blobb6bda3fac10e2a0f821e37a76d60b509e770cf59
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <net/sock.h>
47 #include <asm/system.h>
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
54 #define AUTO_OFF_TIMEOUT 2000
56 static void hci_cmd_task(unsigned long arg);
57 static void hci_rx_task(unsigned long arg);
58 static void hci_tx_task(unsigned long arg);
60 static DEFINE_RWLOCK(hci_task_lock);
62 /* HCI device list */
63 LIST_HEAD(hci_dev_list);
64 DEFINE_RWLOCK(hci_dev_list_lock);
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list);
68 DEFINE_RWLOCK(hci_cb_list_lock);
70 /* HCI protocols */
71 #define HCI_MAX_PROTO 2
72 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77 /* ---- HCI notifications ---- */
79 int hci_register_notifier(struct notifier_block *nb)
81 return atomic_notifier_chain_register(&hci_notifier, nb);
84 int hci_unregister_notifier(struct notifier_block *nb)
86 return atomic_notifier_chain_unregister(&hci_notifier, nb);
89 static void hci_notify(struct hci_dev *hdev, int event)
91 atomic_notifier_call_chain(&hci_notifier, event, hdev);
94 /* ---- HCI requests ---- */
96 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
98 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
104 return;
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
113 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
124 /* Execute request and wait for completion. */
125 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
126 unsigned long opt, __u32 timeout)
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
131 BT_DBG("%s start", hdev->name);
133 hdev->req_status = HCI_REQ_PEND;
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
138 req(hdev, opt);
139 schedule_timeout(timeout);
141 remove_wait_queue(&hdev->req_wait_q, &wait);
143 if (signal_pending(current))
144 return -EINTR;
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
155 default:
156 err = -ETIMEDOUT;
157 break;
160 hdev->req_status = hdev->req_result = 0;
162 BT_DBG("%s end: err %d", hdev->name, err);
164 return err;
167 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168 unsigned long opt, __u32 timeout)
170 int ret;
172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
180 return ret;
183 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 BT_DBG("%s %ld", hdev->name, opt);
187 /* Reset device */
188 set_bit(HCI_RESET, &hdev->flags);
189 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
192 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194 struct hci_cp_delete_stored_link_key cp;
195 struct sk_buff *skb;
196 __le16 param;
197 __u8 flt_type;
199 BT_DBG("%s %ld", hdev->name, opt);
201 /* Driver initialization */
203 /* Special commands */
204 while ((skb = skb_dequeue(&hdev->driver_init))) {
205 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
206 skb->dev = (void *) hdev;
208 skb_queue_tail(&hdev->cmd_q, skb);
209 tasklet_schedule(&hdev->cmd_task);
211 skb_queue_purge(&hdev->driver_init);
213 /* Mandatory initialization */
215 /* Reset */
216 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
217 set_bit(HCI_RESET, &hdev->flags);
218 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
221 /* Read Local Supported Features */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
224 /* Read Local Version */
225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
227 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
228 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
230 #if 0
231 /* Host buffer size */
233 struct hci_cp_host_buffer_size cp;
234 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
235 cp.sco_mtu = HCI_MAX_SCO_SIZE;
236 cp.acl_max_pkt = cpu_to_le16(0xffff);
237 cp.sco_max_pkt = cpu_to_le16(0xffff);
238 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
240 #endif
242 /* Read BD Address */
243 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245 /* Read Class of Device */
246 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248 /* Read Local Name */
249 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
251 /* Read Voice Setting */
252 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
254 /* Optional initialization */
256 /* Clear Event Filters */
257 flt_type = HCI_FLT_CLEAR_ALL;
258 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
260 /* Connection accept timeout ~20 secs */
261 param = cpu_to_le16(0x7d00);
262 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
264 bacpy(&cp.bdaddr, BDADDR_ANY);
265 cp.delete_all = 1;
266 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
269 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271 BT_DBG("%s", hdev->name);
273 /* Read LE buffer size */
274 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
277 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279 __u8 scan = opt;
281 BT_DBG("%s %x", hdev->name, scan);
283 /* Inquiry and Page scans */
284 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
287 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289 __u8 auth = opt;
291 BT_DBG("%s %x", hdev->name, auth);
293 /* Authentication */
294 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
297 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299 __u8 encrypt = opt;
301 BT_DBG("%s %x", hdev->name, encrypt);
303 /* Encryption */
304 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
307 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309 __le16 policy = cpu_to_le16(opt);
311 BT_DBG("%s %x", hdev->name, policy);
313 /* Default link policy */
314 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
317 /* Get HCI device by index.
318 * Device is held on return. */
319 struct hci_dev *hci_dev_get(int index)
321 struct hci_dev *hdev = NULL;
322 struct list_head *p;
324 BT_DBG("%d", index);
326 if (index < 0)
327 return NULL;
329 read_lock(&hci_dev_list_lock);
330 list_for_each(p, &hci_dev_list) {
331 struct hci_dev *d = list_entry(p, struct hci_dev, list);
332 if (d->id == index) {
333 hdev = hci_dev_hold(d);
334 break;
337 read_unlock(&hci_dev_list_lock);
338 return hdev;
341 /* ---- Inquiry support ---- */
342 static void inquiry_cache_flush(struct hci_dev *hdev)
344 struct inquiry_cache *cache = &hdev->inq_cache;
345 struct inquiry_entry *next = cache->list, *e;
347 BT_DBG("cache %p", cache);
349 cache->list = NULL;
350 while ((e = next)) {
351 next = e->next;
352 kfree(e);
356 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
358 struct inquiry_cache *cache = &hdev->inq_cache;
359 struct inquiry_entry *e;
361 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
363 for (e = cache->list; e; e = e->next)
364 if (!bacmp(&e->data.bdaddr, bdaddr))
365 break;
366 return e;
369 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
371 struct inquiry_cache *cache = &hdev->inq_cache;
372 struct inquiry_entry *ie;
374 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
376 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
377 if (!ie) {
378 /* Entry not in the cache. Add new one. */
379 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
380 if (!ie)
381 return;
383 ie->next = cache->list;
384 cache->list = ie;
387 memcpy(&ie->data, data, sizeof(*data));
388 ie->timestamp = jiffies;
389 cache->timestamp = jiffies;
392 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
394 struct inquiry_cache *cache = &hdev->inq_cache;
395 struct inquiry_info *info = (struct inquiry_info *) buf;
396 struct inquiry_entry *e;
397 int copied = 0;
399 for (e = cache->list; e && copied < num; e = e->next, copied++) {
400 struct inquiry_data *data = &e->data;
401 bacpy(&info->bdaddr, &data->bdaddr);
402 info->pscan_rep_mode = data->pscan_rep_mode;
403 info->pscan_period_mode = data->pscan_period_mode;
404 info->pscan_mode = data->pscan_mode;
405 memcpy(info->dev_class, data->dev_class, 3);
406 info->clock_offset = data->clock_offset;
407 info++;
410 BT_DBG("cache %p, copied %d", cache, copied);
411 return copied;
414 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
416 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
417 struct hci_cp_inquiry cp;
419 BT_DBG("%s", hdev->name);
421 if (test_bit(HCI_INQUIRY, &hdev->flags))
422 return;
424 /* Start Inquiry */
425 memcpy(&cp.lap, &ir->lap, 3);
426 cp.length = ir->length;
427 cp.num_rsp = ir->num_rsp;
428 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
431 int hci_inquiry(void __user *arg)
433 __u8 __user *ptr = arg;
434 struct hci_inquiry_req ir;
435 struct hci_dev *hdev;
436 int err = 0, do_inquiry = 0, max_rsp;
437 long timeo;
438 __u8 *buf;
440 if (copy_from_user(&ir, ptr, sizeof(ir)))
441 return -EFAULT;
443 hdev = hci_dev_get(ir.dev_id);
444 if (!hdev)
445 return -ENODEV;
447 hci_dev_lock_bh(hdev);
448 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
449 inquiry_cache_empty(hdev) ||
450 ir.flags & IREQ_CACHE_FLUSH) {
451 inquiry_cache_flush(hdev);
452 do_inquiry = 1;
454 hci_dev_unlock_bh(hdev);
456 timeo = ir.length * msecs_to_jiffies(2000);
458 if (do_inquiry) {
459 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
460 if (err < 0)
461 goto done;
464 /* for unlimited number of responses we will use buffer with 255 entries */
465 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
467 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
468 * copy it to the user space.
470 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
471 if (!buf) {
472 err = -ENOMEM;
473 goto done;
476 hci_dev_lock_bh(hdev);
477 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
478 hci_dev_unlock_bh(hdev);
480 BT_DBG("num_rsp %d", ir.num_rsp);
482 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
483 ptr += sizeof(ir);
484 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
485 ir.num_rsp))
486 err = -EFAULT;
487 } else
488 err = -EFAULT;
490 kfree(buf);
492 done:
493 hci_dev_put(hdev);
494 return err;
497 /* ---- HCI ioctl helpers ---- */
499 int hci_dev_open(__u16 dev)
501 struct hci_dev *hdev;
502 int ret = 0;
504 hdev = hci_dev_get(dev);
505 if (!hdev)
506 return -ENODEV;
508 BT_DBG("%s %p", hdev->name, hdev);
510 hci_req_lock(hdev);
512 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
513 ret = -ERFKILL;
514 goto done;
517 if (test_bit(HCI_UP, &hdev->flags)) {
518 ret = -EALREADY;
519 goto done;
522 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
523 set_bit(HCI_RAW, &hdev->flags);
525 /* Treat all non BR/EDR controllers as raw devices for now */
526 if (hdev->dev_type != HCI_BREDR)
527 set_bit(HCI_RAW, &hdev->flags);
529 if (hdev->open(hdev)) {
530 ret = -EIO;
531 goto done;
534 if (!test_bit(HCI_RAW, &hdev->flags)) {
535 atomic_set(&hdev->cmd_cnt, 1);
536 set_bit(HCI_INIT, &hdev->flags);
537 hdev->init_last_cmd = 0;
539 ret = __hci_request(hdev, hci_init_req, 0,
540 msecs_to_jiffies(HCI_INIT_TIMEOUT));
542 if (lmp_le_capable(hdev))
543 ret = __hci_request(hdev, hci_le_init_req, 0,
544 msecs_to_jiffies(HCI_INIT_TIMEOUT));
546 clear_bit(HCI_INIT, &hdev->flags);
549 if (!ret) {
550 hci_dev_hold(hdev);
551 set_bit(HCI_UP, &hdev->flags);
552 hci_notify(hdev, HCI_DEV_UP);
553 if (!test_bit(HCI_SETUP, &hdev->flags))
554 mgmt_powered(hdev->id, 1);
555 } else {
556 /* Init failed, cleanup */
557 tasklet_kill(&hdev->rx_task);
558 tasklet_kill(&hdev->tx_task);
559 tasklet_kill(&hdev->cmd_task);
561 skb_queue_purge(&hdev->cmd_q);
562 skb_queue_purge(&hdev->rx_q);
564 if (hdev->flush)
565 hdev->flush(hdev);
567 if (hdev->sent_cmd) {
568 kfree_skb(hdev->sent_cmd);
569 hdev->sent_cmd = NULL;
572 hdev->close(hdev);
573 hdev->flags = 0;
576 done:
577 hci_req_unlock(hdev);
578 hci_dev_put(hdev);
579 return ret;
582 static int hci_dev_do_close(struct hci_dev *hdev)
584 BT_DBG("%s %p", hdev->name, hdev);
586 hci_req_cancel(hdev, ENODEV);
587 hci_req_lock(hdev);
589 /* Stop timer, it might be running */
590 del_timer_sync(&hdev->cmd_timer);
592 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
593 del_timer_sync(&hdev->cmd_timer);
594 hci_req_unlock(hdev);
595 return 0;
598 /* Kill RX and TX tasks */
599 tasklet_kill(&hdev->rx_task);
600 tasklet_kill(&hdev->tx_task);
602 hci_dev_lock_bh(hdev);
603 inquiry_cache_flush(hdev);
604 hci_conn_hash_flush(hdev);
605 hci_dev_unlock_bh(hdev);
607 hci_notify(hdev, HCI_DEV_DOWN);
609 if (hdev->flush)
610 hdev->flush(hdev);
612 /* Reset device */
613 skb_queue_purge(&hdev->cmd_q);
614 atomic_set(&hdev->cmd_cnt, 1);
615 if (!test_bit(HCI_RAW, &hdev->flags)) {
616 set_bit(HCI_INIT, &hdev->flags);
617 __hci_request(hdev, hci_reset_req, 0,
618 msecs_to_jiffies(250));
619 clear_bit(HCI_INIT, &hdev->flags);
622 /* Kill cmd task */
623 tasklet_kill(&hdev->cmd_task);
625 /* Drop queues */
626 skb_queue_purge(&hdev->rx_q);
627 skb_queue_purge(&hdev->cmd_q);
628 skb_queue_purge(&hdev->raw_q);
630 /* Drop last sent command */
631 if (hdev->sent_cmd) {
632 kfree_skb(hdev->sent_cmd);
633 hdev->sent_cmd = NULL;
636 /* After this point our queues are empty
637 * and no tasks are scheduled. */
638 hdev->close(hdev);
640 mgmt_powered(hdev->id, 0);
642 /* Clear flags */
643 hdev->flags = 0;
645 hci_req_unlock(hdev);
647 hci_dev_put(hdev);
648 return 0;
651 int hci_dev_close(__u16 dev)
653 struct hci_dev *hdev;
654 int err;
656 hdev = hci_dev_get(dev);
657 if (!hdev)
658 return -ENODEV;
659 err = hci_dev_do_close(hdev);
660 hci_dev_put(hdev);
661 return err;
664 int hci_dev_reset(__u16 dev)
666 struct hci_dev *hdev;
667 int ret = 0;
669 hdev = hci_dev_get(dev);
670 if (!hdev)
671 return -ENODEV;
673 hci_req_lock(hdev);
674 tasklet_disable(&hdev->tx_task);
676 if (!test_bit(HCI_UP, &hdev->flags))
677 goto done;
679 /* Drop queues */
680 skb_queue_purge(&hdev->rx_q);
681 skb_queue_purge(&hdev->cmd_q);
683 hci_dev_lock_bh(hdev);
684 inquiry_cache_flush(hdev);
685 hci_conn_hash_flush(hdev);
686 hci_dev_unlock_bh(hdev);
688 if (hdev->flush)
689 hdev->flush(hdev);
691 atomic_set(&hdev->cmd_cnt, 1);
692 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
694 if (!test_bit(HCI_RAW, &hdev->flags))
695 ret = __hci_request(hdev, hci_reset_req, 0,
696 msecs_to_jiffies(HCI_INIT_TIMEOUT));
698 done:
699 tasklet_enable(&hdev->tx_task);
700 hci_req_unlock(hdev);
701 hci_dev_put(hdev);
702 return ret;
705 int hci_dev_reset_stat(__u16 dev)
707 struct hci_dev *hdev;
708 int ret = 0;
710 hdev = hci_dev_get(dev);
711 if (!hdev)
712 return -ENODEV;
714 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
716 hci_dev_put(hdev);
718 return ret;
721 int hci_dev_cmd(unsigned int cmd, void __user *arg)
723 struct hci_dev *hdev;
724 struct hci_dev_req dr;
725 int err = 0;
727 if (copy_from_user(&dr, arg, sizeof(dr)))
728 return -EFAULT;
730 hdev = hci_dev_get(dr.dev_id);
731 if (!hdev)
732 return -ENODEV;
734 switch (cmd) {
735 case HCISETAUTH:
736 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
737 msecs_to_jiffies(HCI_INIT_TIMEOUT));
738 break;
740 case HCISETENCRYPT:
741 if (!lmp_encrypt_capable(hdev)) {
742 err = -EOPNOTSUPP;
743 break;
746 if (!test_bit(HCI_AUTH, &hdev->flags)) {
747 /* Auth must be enabled first */
748 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
749 msecs_to_jiffies(HCI_INIT_TIMEOUT));
750 if (err)
751 break;
754 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
755 msecs_to_jiffies(HCI_INIT_TIMEOUT));
756 break;
758 case HCISETSCAN:
759 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
760 msecs_to_jiffies(HCI_INIT_TIMEOUT));
761 break;
763 case HCISETLINKPOL:
764 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
765 msecs_to_jiffies(HCI_INIT_TIMEOUT));
766 break;
768 case HCISETLINKMODE:
769 hdev->link_mode = ((__u16) dr.dev_opt) &
770 (HCI_LM_MASTER | HCI_LM_ACCEPT);
771 break;
773 case HCISETPTYPE:
774 hdev->pkt_type = (__u16) dr.dev_opt;
775 break;
777 case HCISETACLMTU:
778 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
779 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
780 break;
782 case HCISETSCOMTU:
783 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
784 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
785 break;
787 default:
788 err = -EINVAL;
789 break;
792 hci_dev_put(hdev);
793 return err;
796 int hci_get_dev_list(void __user *arg)
798 struct hci_dev_list_req *dl;
799 struct hci_dev_req *dr;
800 struct list_head *p;
801 int n = 0, size, err;
802 __u16 dev_num;
804 if (get_user(dev_num, (__u16 __user *) arg))
805 return -EFAULT;
807 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
808 return -EINVAL;
810 size = sizeof(*dl) + dev_num * sizeof(*dr);
812 dl = kzalloc(size, GFP_KERNEL);
813 if (!dl)
814 return -ENOMEM;
816 dr = dl->dev_req;
818 read_lock_bh(&hci_dev_list_lock);
819 list_for_each(p, &hci_dev_list) {
820 struct hci_dev *hdev;
822 hdev = list_entry(p, struct hci_dev, list);
824 hci_del_off_timer(hdev);
826 if (!test_bit(HCI_MGMT, &hdev->flags))
827 set_bit(HCI_PAIRABLE, &hdev->flags);
829 (dr + n)->dev_id = hdev->id;
830 (dr + n)->dev_opt = hdev->flags;
832 if (++n >= dev_num)
833 break;
835 read_unlock_bh(&hci_dev_list_lock);
837 dl->dev_num = n;
838 size = sizeof(*dl) + n * sizeof(*dr);
840 err = copy_to_user(arg, dl, size);
841 kfree(dl);
843 return err ? -EFAULT : 0;
846 int hci_get_dev_info(void __user *arg)
848 struct hci_dev *hdev;
849 struct hci_dev_info di;
850 int err = 0;
852 if (copy_from_user(&di, arg, sizeof(di)))
853 return -EFAULT;
855 hdev = hci_dev_get(di.dev_id);
856 if (!hdev)
857 return -ENODEV;
859 hci_del_off_timer(hdev);
861 if (!test_bit(HCI_MGMT, &hdev->flags))
862 set_bit(HCI_PAIRABLE, &hdev->flags);
864 strcpy(di.name, hdev->name);
865 di.bdaddr = hdev->bdaddr;
866 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
867 di.flags = hdev->flags;
868 di.pkt_type = hdev->pkt_type;
869 di.acl_mtu = hdev->acl_mtu;
870 di.acl_pkts = hdev->acl_pkts;
871 di.sco_mtu = hdev->sco_mtu;
872 di.sco_pkts = hdev->sco_pkts;
873 di.link_policy = hdev->link_policy;
874 di.link_mode = hdev->link_mode;
876 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
877 memcpy(&di.features, &hdev->features, sizeof(di.features));
879 if (copy_to_user(arg, &di, sizeof(di)))
880 err = -EFAULT;
882 hci_dev_put(hdev);
884 return err;
887 /* ---- Interface to HCI drivers ---- */
889 static int hci_rfkill_set_block(void *data, bool blocked)
891 struct hci_dev *hdev = data;
893 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
895 if (!blocked)
896 return 0;
898 hci_dev_do_close(hdev);
900 return 0;
903 static const struct rfkill_ops hci_rfkill_ops = {
904 .set_block = hci_rfkill_set_block,
907 /* Alloc HCI device */
908 struct hci_dev *hci_alloc_dev(void)
910 struct hci_dev *hdev;
912 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
913 if (!hdev)
914 return NULL;
916 skb_queue_head_init(&hdev->driver_init);
918 return hdev;
920 EXPORT_SYMBOL(hci_alloc_dev);
922 /* Free HCI device */
923 void hci_free_dev(struct hci_dev *hdev)
925 skb_queue_purge(&hdev->driver_init);
927 /* will free via device release */
928 put_device(&hdev->dev);
930 EXPORT_SYMBOL(hci_free_dev);
932 static void hci_power_on(struct work_struct *work)
934 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
936 BT_DBG("%s", hdev->name);
938 if (hci_dev_open(hdev->id) < 0)
939 return;
941 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
942 mod_timer(&hdev->off_timer,
943 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
945 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
946 mgmt_index_added(hdev->id);
949 static void hci_power_off(struct work_struct *work)
951 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
953 BT_DBG("%s", hdev->name);
955 hci_dev_close(hdev->id);
958 static void hci_auto_off(unsigned long data)
960 struct hci_dev *hdev = (struct hci_dev *) data;
962 BT_DBG("%s", hdev->name);
964 clear_bit(HCI_AUTO_OFF, &hdev->flags);
966 queue_work(hdev->workqueue, &hdev->power_off);
969 void hci_del_off_timer(struct hci_dev *hdev)
971 BT_DBG("%s", hdev->name);
973 clear_bit(HCI_AUTO_OFF, &hdev->flags);
974 del_timer(&hdev->off_timer);
977 int hci_uuids_clear(struct hci_dev *hdev)
979 struct list_head *p, *n;
981 list_for_each_safe(p, n, &hdev->uuids) {
982 struct bt_uuid *uuid;
984 uuid = list_entry(p, struct bt_uuid, list);
986 list_del(p);
987 kfree(uuid);
990 return 0;
993 int hci_link_keys_clear(struct hci_dev *hdev)
995 struct list_head *p, *n;
997 list_for_each_safe(p, n, &hdev->link_keys) {
998 struct link_key *key;
1000 key = list_entry(p, struct link_key, list);
1002 list_del(p);
1003 kfree(key);
1006 return 0;
1009 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1011 struct list_head *p;
1013 list_for_each(p, &hdev->link_keys) {
1014 struct link_key *k;
1016 k = list_entry(p, struct link_key, list);
1018 if (bacmp(bdaddr, &k->bdaddr) == 0)
1019 return k;
1022 return NULL;
1025 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1026 u8 key_type, u8 old_key_type)
1028 /* Legacy key */
1029 if (key_type < 0x03)
1030 return 1;
1032 /* Debug keys are insecure so don't store them persistently */
1033 if (key_type == HCI_LK_DEBUG_COMBINATION)
1034 return 0;
1036 /* Changed combination key and there's no previous one */
1037 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1038 return 0;
1040 /* Security mode 3 case */
1041 if (!conn)
1042 return 1;
1044 /* Neither local nor remote side had no-bonding as requirement */
1045 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1046 return 1;
1048 /* Local side had dedicated bonding as requirement */
1049 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1050 return 1;
1052 /* Remote side had dedicated bonding as requirement */
1053 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1054 return 1;
1056 /* If none of the above criteria match, then don't store the key
1057 * persistently */
1058 return 0;
1061 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1062 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1064 struct link_key *key, *old_key;
1065 u8 old_key_type, persistent;
1067 old_key = hci_find_link_key(hdev, bdaddr);
1068 if (old_key) {
1069 old_key_type = old_key->type;
1070 key = old_key;
1071 } else {
1072 old_key_type = conn ? conn->key_type : 0xff;
1073 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1074 if (!key)
1075 return -ENOMEM;
1076 list_add(&key->list, &hdev->link_keys);
1079 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1081 /* Some buggy controller combinations generate a changed
1082 * combination key for legacy pairing even when there's no
1083 * previous key */
1084 if (type == HCI_LK_CHANGED_COMBINATION &&
1085 (!conn || conn->remote_auth == 0xff) &&
1086 old_key_type == 0xff) {
1087 type = HCI_LK_COMBINATION;
1088 if (conn)
1089 conn->key_type = type;
1092 bacpy(&key->bdaddr, bdaddr);
1093 memcpy(key->val, val, 16);
1094 key->pin_len = pin_len;
1096 if (type == HCI_LK_CHANGED_COMBINATION)
1097 key->type = old_key_type;
1098 else
1099 key->type = type;
1101 if (!new_key)
1102 return 0;
1104 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1106 mgmt_new_key(hdev->id, key, persistent);
1108 if (!persistent) {
1109 list_del(&key->list);
1110 kfree(key);
1113 return 0;
1116 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1118 struct link_key *key;
1120 key = hci_find_link_key(hdev, bdaddr);
1121 if (!key)
1122 return -ENOENT;
1124 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1126 list_del(&key->list);
1127 kfree(key);
1129 return 0;
1132 /* HCI command timer function */
1133 static void hci_cmd_timer(unsigned long arg)
1135 struct hci_dev *hdev = (void *) arg;
1137 BT_ERR("%s command tx timeout", hdev->name);
1138 atomic_set(&hdev->cmd_cnt, 1);
1139 clear_bit(HCI_RESET, &hdev->flags);
1140 tasklet_schedule(&hdev->cmd_task);
1143 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1144 bdaddr_t *bdaddr)
1146 struct oob_data *data;
1148 list_for_each_entry(data, &hdev->remote_oob_data, list)
1149 if (bacmp(bdaddr, &data->bdaddr) == 0)
1150 return data;
1152 return NULL;
1155 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1157 struct oob_data *data;
1159 data = hci_find_remote_oob_data(hdev, bdaddr);
1160 if (!data)
1161 return -ENOENT;
1163 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1165 list_del(&data->list);
1166 kfree(data);
1168 return 0;
1171 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1173 struct oob_data *data, *n;
1175 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1176 list_del(&data->list);
1177 kfree(data);
1180 return 0;
1183 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1184 u8 *randomizer)
1186 struct oob_data *data;
1188 data = hci_find_remote_oob_data(hdev, bdaddr);
1190 if (!data) {
1191 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1192 if (!data)
1193 return -ENOMEM;
1195 bacpy(&data->bdaddr, bdaddr);
1196 list_add(&data->list, &hdev->remote_oob_data);
1199 memcpy(data->hash, hash, sizeof(data->hash));
1200 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1202 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1204 return 0;
1207 /* Register HCI device */
1208 int hci_register_dev(struct hci_dev *hdev)
1210 struct list_head *head = &hci_dev_list, *p;
1211 int i, id = 0;
1213 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1214 hdev->bus, hdev->owner);
1216 if (!hdev->open || !hdev->close || !hdev->destruct)
1217 return -EINVAL;
1219 write_lock_bh(&hci_dev_list_lock);
1221 /* Find first available device id */
1222 list_for_each(p, &hci_dev_list) {
1223 if (list_entry(p, struct hci_dev, list)->id != id)
1224 break;
1225 head = p; id++;
1228 sprintf(hdev->name, "hci%d", id);
1229 hdev->id = id;
1230 list_add(&hdev->list, head);
1232 atomic_set(&hdev->refcnt, 1);
1233 spin_lock_init(&hdev->lock);
1235 hdev->flags = 0;
1236 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1237 hdev->esco_type = (ESCO_HV1);
1238 hdev->link_mode = (HCI_LM_ACCEPT);
1239 hdev->io_capability = 0x03; /* No Input No Output */
1241 hdev->idle_timeout = 0;
1242 hdev->sniff_max_interval = 800;
1243 hdev->sniff_min_interval = 80;
1245 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1246 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1247 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1249 skb_queue_head_init(&hdev->rx_q);
1250 skb_queue_head_init(&hdev->cmd_q);
1251 skb_queue_head_init(&hdev->raw_q);
1253 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1255 for (i = 0; i < NUM_REASSEMBLY; i++)
1256 hdev->reassembly[i] = NULL;
1258 init_waitqueue_head(&hdev->req_wait_q);
1259 mutex_init(&hdev->req_lock);
1261 inquiry_cache_init(hdev);
1263 hci_conn_hash_init(hdev);
1265 INIT_LIST_HEAD(&hdev->blacklist);
1267 INIT_LIST_HEAD(&hdev->uuids);
1269 INIT_LIST_HEAD(&hdev->link_keys);
1271 INIT_LIST_HEAD(&hdev->remote_oob_data);
1273 INIT_WORK(&hdev->power_on, hci_power_on);
1274 INIT_WORK(&hdev->power_off, hci_power_off);
1275 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1277 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1279 atomic_set(&hdev->promisc, 0);
1281 write_unlock_bh(&hci_dev_list_lock);
1283 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1284 if (!hdev->workqueue)
1285 goto nomem;
1287 hci_register_sysfs(hdev);
1289 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1290 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1291 if (hdev->rfkill) {
1292 if (rfkill_register(hdev->rfkill) < 0) {
1293 rfkill_destroy(hdev->rfkill);
1294 hdev->rfkill = NULL;
1298 set_bit(HCI_AUTO_OFF, &hdev->flags);
1299 set_bit(HCI_SETUP, &hdev->flags);
1300 queue_work(hdev->workqueue, &hdev->power_on);
1302 hci_notify(hdev, HCI_DEV_REG);
1304 return id;
1306 nomem:
1307 write_lock_bh(&hci_dev_list_lock);
1308 list_del(&hdev->list);
1309 write_unlock_bh(&hci_dev_list_lock);
1311 return -ENOMEM;
1313 EXPORT_SYMBOL(hci_register_dev);
1315 /* Unregister HCI device */
1316 int hci_unregister_dev(struct hci_dev *hdev)
1318 int i;
1320 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1322 write_lock_bh(&hci_dev_list_lock);
1323 list_del(&hdev->list);
1324 write_unlock_bh(&hci_dev_list_lock);
1326 hci_dev_do_close(hdev);
1328 for (i = 0; i < NUM_REASSEMBLY; i++)
1329 kfree_skb(hdev->reassembly[i]);
1331 if (!test_bit(HCI_INIT, &hdev->flags) &&
1332 !test_bit(HCI_SETUP, &hdev->flags))
1333 mgmt_index_removed(hdev->id);
1335 hci_notify(hdev, HCI_DEV_UNREG);
1337 if (hdev->rfkill) {
1338 rfkill_unregister(hdev->rfkill);
1339 rfkill_destroy(hdev->rfkill);
1342 hci_unregister_sysfs(hdev);
1344 hci_del_off_timer(hdev);
1346 destroy_workqueue(hdev->workqueue);
1348 hci_dev_lock_bh(hdev);
1349 hci_blacklist_clear(hdev);
1350 hci_uuids_clear(hdev);
1351 hci_link_keys_clear(hdev);
1352 hci_remote_oob_data_clear(hdev);
1353 hci_dev_unlock_bh(hdev);
1355 __hci_dev_put(hdev);
1357 return 0;
1359 EXPORT_SYMBOL(hci_unregister_dev);
1361 /* Suspend HCI device */
1362 int hci_suspend_dev(struct hci_dev *hdev)
1364 hci_notify(hdev, HCI_DEV_SUSPEND);
1365 return 0;
1367 EXPORT_SYMBOL(hci_suspend_dev);
1369 /* Resume HCI device */
1370 int hci_resume_dev(struct hci_dev *hdev)
1372 hci_notify(hdev, HCI_DEV_RESUME);
1373 return 0;
1375 EXPORT_SYMBOL(hci_resume_dev);
1377 /* Receive frame from HCI drivers */
1378 int hci_recv_frame(struct sk_buff *skb)
1380 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1381 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1382 && !test_bit(HCI_INIT, &hdev->flags))) {
1383 kfree_skb(skb);
1384 return -ENXIO;
1387 /* Incomming skb */
1388 bt_cb(skb)->incoming = 1;
1390 /* Time stamp */
1391 __net_timestamp(skb);
1393 /* Queue frame for rx task */
1394 skb_queue_tail(&hdev->rx_q, skb);
1395 tasklet_schedule(&hdev->rx_task);
1397 return 0;
1399 EXPORT_SYMBOL(hci_recv_frame);
1401 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1402 int count, __u8 index)
1404 int len = 0;
1405 int hlen = 0;
1406 int remain = count;
1407 struct sk_buff *skb;
1408 struct bt_skb_cb *scb;
1410 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1411 index >= NUM_REASSEMBLY)
1412 return -EILSEQ;
1414 skb = hdev->reassembly[index];
1416 if (!skb) {
1417 switch (type) {
1418 case HCI_ACLDATA_PKT:
1419 len = HCI_MAX_FRAME_SIZE;
1420 hlen = HCI_ACL_HDR_SIZE;
1421 break;
1422 case HCI_EVENT_PKT:
1423 len = HCI_MAX_EVENT_SIZE;
1424 hlen = HCI_EVENT_HDR_SIZE;
1425 break;
1426 case HCI_SCODATA_PKT:
1427 len = HCI_MAX_SCO_SIZE;
1428 hlen = HCI_SCO_HDR_SIZE;
1429 break;
1432 skb = bt_skb_alloc(len, GFP_ATOMIC);
1433 if (!skb)
1434 return -ENOMEM;
1436 scb = (void *) skb->cb;
1437 scb->expect = hlen;
1438 scb->pkt_type = type;
1440 skb->dev = (void *) hdev;
1441 hdev->reassembly[index] = skb;
1444 while (count) {
1445 scb = (void *) skb->cb;
1446 len = min(scb->expect, (__u16)count);
1448 memcpy(skb_put(skb, len), data, len);
1450 count -= len;
1451 data += len;
1452 scb->expect -= len;
1453 remain = count;
1455 switch (type) {
1456 case HCI_EVENT_PKT:
1457 if (skb->len == HCI_EVENT_HDR_SIZE) {
1458 struct hci_event_hdr *h = hci_event_hdr(skb);
1459 scb->expect = h->plen;
1461 if (skb_tailroom(skb) < scb->expect) {
1462 kfree_skb(skb);
1463 hdev->reassembly[index] = NULL;
1464 return -ENOMEM;
1467 break;
1469 case HCI_ACLDATA_PKT:
1470 if (skb->len == HCI_ACL_HDR_SIZE) {
1471 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1472 scb->expect = __le16_to_cpu(h->dlen);
1474 if (skb_tailroom(skb) < scb->expect) {
1475 kfree_skb(skb);
1476 hdev->reassembly[index] = NULL;
1477 return -ENOMEM;
1480 break;
1482 case HCI_SCODATA_PKT:
1483 if (skb->len == HCI_SCO_HDR_SIZE) {
1484 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1485 scb->expect = h->dlen;
1487 if (skb_tailroom(skb) < scb->expect) {
1488 kfree_skb(skb);
1489 hdev->reassembly[index] = NULL;
1490 return -ENOMEM;
1493 break;
1496 if (scb->expect == 0) {
1497 /* Complete frame */
1499 bt_cb(skb)->pkt_type = type;
1500 hci_recv_frame(skb);
1502 hdev->reassembly[index] = NULL;
1503 return remain;
1507 return remain;
1510 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1512 int rem = 0;
1514 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1515 return -EILSEQ;
1517 while (count) {
1518 rem = hci_reassembly(hdev, type, data, count, type - 1);
1519 if (rem < 0)
1520 return rem;
1522 data += (count - rem);
1523 count = rem;
1526 return rem;
1528 EXPORT_SYMBOL(hci_recv_fragment);
1530 #define STREAM_REASSEMBLY 0
1532 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1534 int type;
1535 int rem = 0;
1537 while (count) {
1538 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1540 if (!skb) {
1541 struct { char type; } *pkt;
1543 /* Start of the frame */
1544 pkt = data;
1545 type = pkt->type;
1547 data++;
1548 count--;
1549 } else
1550 type = bt_cb(skb)->pkt_type;
1552 rem = hci_reassembly(hdev, type, data, count,
1553 STREAM_REASSEMBLY);
1554 if (rem < 0)
1555 return rem;
1557 data += (count - rem);
1558 count = rem;
1561 return rem;
1563 EXPORT_SYMBOL(hci_recv_stream_fragment);
1565 /* ---- Interface to upper protocols ---- */
1567 /* Register/Unregister protocols.
1568 * hci_task_lock is used to ensure that no tasks are running. */
1569 int hci_register_proto(struct hci_proto *hp)
1571 int err = 0;
1573 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1575 if (hp->id >= HCI_MAX_PROTO)
1576 return -EINVAL;
1578 write_lock_bh(&hci_task_lock);
1580 if (!hci_proto[hp->id])
1581 hci_proto[hp->id] = hp;
1582 else
1583 err = -EEXIST;
1585 write_unlock_bh(&hci_task_lock);
1587 return err;
1589 EXPORT_SYMBOL(hci_register_proto);
1591 int hci_unregister_proto(struct hci_proto *hp)
1593 int err = 0;
1595 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1597 if (hp->id >= HCI_MAX_PROTO)
1598 return -EINVAL;
1600 write_lock_bh(&hci_task_lock);
1602 if (hci_proto[hp->id])
1603 hci_proto[hp->id] = NULL;
1604 else
1605 err = -ENOENT;
1607 write_unlock_bh(&hci_task_lock);
1609 return err;
1611 EXPORT_SYMBOL(hci_unregister_proto);
1613 int hci_register_cb(struct hci_cb *cb)
1615 BT_DBG("%p name %s", cb, cb->name);
1617 write_lock_bh(&hci_cb_list_lock);
1618 list_add(&cb->list, &hci_cb_list);
1619 write_unlock_bh(&hci_cb_list_lock);
1621 return 0;
1623 EXPORT_SYMBOL(hci_register_cb);
1625 int hci_unregister_cb(struct hci_cb *cb)
1627 BT_DBG("%p name %s", cb, cb->name);
1629 write_lock_bh(&hci_cb_list_lock);
1630 list_del(&cb->list);
1631 write_unlock_bh(&hci_cb_list_lock);
1633 return 0;
1635 EXPORT_SYMBOL(hci_unregister_cb);
1637 static int hci_send_frame(struct sk_buff *skb)
1639 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1641 if (!hdev) {
1642 kfree_skb(skb);
1643 return -ENODEV;
1646 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1648 if (atomic_read(&hdev->promisc)) {
1649 /* Time stamp */
1650 __net_timestamp(skb);
1652 hci_send_to_sock(hdev, skb, NULL);
1655 /* Get rid of skb owner, prior to sending to the driver. */
1656 skb_orphan(skb);
1658 return hdev->send(skb);
1661 /* Send HCI command */
1662 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1664 int len = HCI_COMMAND_HDR_SIZE + plen;
1665 struct hci_command_hdr *hdr;
1666 struct sk_buff *skb;
1668 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1670 skb = bt_skb_alloc(len, GFP_ATOMIC);
1671 if (!skb) {
1672 BT_ERR("%s no memory for command", hdev->name);
1673 return -ENOMEM;
1676 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1677 hdr->opcode = cpu_to_le16(opcode);
1678 hdr->plen = plen;
1680 if (plen)
1681 memcpy(skb_put(skb, plen), param, plen);
1683 BT_DBG("skb len %d", skb->len);
1685 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1686 skb->dev = (void *) hdev;
1688 if (test_bit(HCI_INIT, &hdev->flags))
1689 hdev->init_last_cmd = opcode;
1691 skb_queue_tail(&hdev->cmd_q, skb);
1692 tasklet_schedule(&hdev->cmd_task);
1694 return 0;
1697 /* Get data from the previously sent command */
1698 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1700 struct hci_command_hdr *hdr;
1702 if (!hdev->sent_cmd)
1703 return NULL;
1705 hdr = (void *) hdev->sent_cmd->data;
1707 if (hdr->opcode != cpu_to_le16(opcode))
1708 return NULL;
1710 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1712 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1715 /* Send ACL data */
1716 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1718 struct hci_acl_hdr *hdr;
1719 int len = skb->len;
1721 skb_push(skb, HCI_ACL_HDR_SIZE);
1722 skb_reset_transport_header(skb);
1723 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1724 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1725 hdr->dlen = cpu_to_le16(len);
1728 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1730 struct hci_dev *hdev = conn->hdev;
1731 struct sk_buff *list;
1733 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1735 skb->dev = (void *) hdev;
1736 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1737 hci_add_acl_hdr(skb, conn->handle, flags);
1739 list = skb_shinfo(skb)->frag_list;
1740 if (!list) {
1741 /* Non fragmented */
1742 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1744 skb_queue_tail(&conn->data_q, skb);
1745 } else {
1746 /* Fragmented */
1747 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1749 skb_shinfo(skb)->frag_list = NULL;
1751 /* Queue all fragments atomically */
1752 spin_lock_bh(&conn->data_q.lock);
1754 __skb_queue_tail(&conn->data_q, skb);
1756 flags &= ~ACL_START;
1757 flags |= ACL_CONT;
1758 do {
1759 skb = list; list = list->next;
1761 skb->dev = (void *) hdev;
1762 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1763 hci_add_acl_hdr(skb, conn->handle, flags);
1765 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1767 __skb_queue_tail(&conn->data_q, skb);
1768 } while (list);
1770 spin_unlock_bh(&conn->data_q.lock);
1773 tasklet_schedule(&hdev->tx_task);
1775 EXPORT_SYMBOL(hci_send_acl);
1777 /* Send SCO data */
1778 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1780 struct hci_dev *hdev = conn->hdev;
1781 struct hci_sco_hdr hdr;
1783 BT_DBG("%s len %d", hdev->name, skb->len);
1785 hdr.handle = cpu_to_le16(conn->handle);
1786 hdr.dlen = skb->len;
1788 skb_push(skb, HCI_SCO_HDR_SIZE);
1789 skb_reset_transport_header(skb);
1790 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1792 skb->dev = (void *) hdev;
1793 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1795 skb_queue_tail(&conn->data_q, skb);
1796 tasklet_schedule(&hdev->tx_task);
1798 EXPORT_SYMBOL(hci_send_sco);
1800 /* ---- HCI TX task (outgoing data) ---- */
1802 /* HCI Connection scheduler */
1803 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1805 struct hci_conn_hash *h = &hdev->conn_hash;
1806 struct hci_conn *conn = NULL;
1807 int num = 0, min = ~0;
1808 struct list_head *p;
1810 /* We don't have to lock device here. Connections are always
1811 * added and removed with TX task disabled. */
1812 list_for_each(p, &h->list) {
1813 struct hci_conn *c;
1814 c = list_entry(p, struct hci_conn, list);
1816 if (c->type != type || skb_queue_empty(&c->data_q))
1817 continue;
1819 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1820 continue;
1822 num++;
1824 if (c->sent < min) {
1825 min = c->sent;
1826 conn = c;
1830 if (conn) {
1831 int cnt, q;
1833 switch (conn->type) {
1834 case ACL_LINK:
1835 cnt = hdev->acl_cnt;
1836 break;
1837 case SCO_LINK:
1838 case ESCO_LINK:
1839 cnt = hdev->sco_cnt;
1840 break;
1841 case LE_LINK:
1842 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1843 break;
1844 default:
1845 cnt = 0;
1846 BT_ERR("Unknown link type");
1849 q = cnt / num;
1850 *quote = q ? q : 1;
1851 } else
1852 *quote = 0;
1854 BT_DBG("conn %p quote %d", conn, *quote);
1855 return conn;
1858 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1860 struct hci_conn_hash *h = &hdev->conn_hash;
1861 struct list_head *p;
1862 struct hci_conn *c;
1864 BT_ERR("%s link tx timeout", hdev->name);
1866 /* Kill stalled connections */
1867 list_for_each(p, &h->list) {
1868 c = list_entry(p, struct hci_conn, list);
1869 if (c->type == type && c->sent) {
1870 BT_ERR("%s killing stalled connection %s",
1871 hdev->name, batostr(&c->dst));
1872 hci_acl_disconn(c, 0x13);
1877 static inline void hci_sched_acl(struct hci_dev *hdev)
1879 struct hci_conn *conn;
1880 struct sk_buff *skb;
1881 int quote;
1883 BT_DBG("%s", hdev->name);
1885 if (!test_bit(HCI_RAW, &hdev->flags)) {
1886 /* ACL tx timeout must be longer than maximum
1887 * link supervision timeout (40.9 seconds) */
1888 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1889 hci_link_tx_to(hdev, ACL_LINK);
1892 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1893 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1894 BT_DBG("skb %p len %d", skb, skb->len);
1896 hci_conn_enter_active_mode(conn);
1898 hci_send_frame(skb);
1899 hdev->acl_last_tx = jiffies;
1901 hdev->acl_cnt--;
1902 conn->sent++;
1907 /* Schedule SCO */
1908 static inline void hci_sched_sco(struct hci_dev *hdev)
1910 struct hci_conn *conn;
1911 struct sk_buff *skb;
1912 int quote;
1914 BT_DBG("%s", hdev->name);
1916 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1917 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1918 BT_DBG("skb %p len %d", skb, skb->len);
1919 hci_send_frame(skb);
1921 conn->sent++;
1922 if (conn->sent == ~0)
1923 conn->sent = 0;
1928 static inline void hci_sched_esco(struct hci_dev *hdev)
1930 struct hci_conn *conn;
1931 struct sk_buff *skb;
1932 int quote;
1934 BT_DBG("%s", hdev->name);
1936 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1937 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1938 BT_DBG("skb %p len %d", skb, skb->len);
1939 hci_send_frame(skb);
1941 conn->sent++;
1942 if (conn->sent == ~0)
1943 conn->sent = 0;
1948 static inline void hci_sched_le(struct hci_dev *hdev)
1950 struct hci_conn *conn;
1951 struct sk_buff *skb;
1952 int quote, cnt;
1954 BT_DBG("%s", hdev->name);
1956 if (!test_bit(HCI_RAW, &hdev->flags)) {
1957 /* LE tx timeout must be longer than maximum
1958 * link supervision timeout (40.9 seconds) */
1959 if (!hdev->le_cnt && hdev->le_pkts &&
1960 time_after(jiffies, hdev->le_last_tx + HZ * 45))
1961 hci_link_tx_to(hdev, LE_LINK);
1964 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1965 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1966 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1967 BT_DBG("skb %p len %d", skb, skb->len);
1969 hci_send_frame(skb);
1970 hdev->le_last_tx = jiffies;
1972 cnt--;
1973 conn->sent++;
1976 if (hdev->le_pkts)
1977 hdev->le_cnt = cnt;
1978 else
1979 hdev->acl_cnt = cnt;
1982 static void hci_tx_task(unsigned long arg)
1984 struct hci_dev *hdev = (struct hci_dev *) arg;
1985 struct sk_buff *skb;
1987 read_lock(&hci_task_lock);
1989 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1990 hdev->sco_cnt, hdev->le_cnt);
1992 /* Schedule queues and send stuff to HCI driver */
1994 hci_sched_acl(hdev);
1996 hci_sched_sco(hdev);
1998 hci_sched_esco(hdev);
2000 hci_sched_le(hdev);
2002 /* Send next queued raw (unknown type) packet */
2003 while ((skb = skb_dequeue(&hdev->raw_q)))
2004 hci_send_frame(skb);
2006 read_unlock(&hci_task_lock);
2009 /* ----- HCI RX task (incoming data proccessing) ----- */
2011 /* ACL data packet */
2012 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2014 struct hci_acl_hdr *hdr = (void *) skb->data;
2015 struct hci_conn *conn;
2016 __u16 handle, flags;
2018 skb_pull(skb, HCI_ACL_HDR_SIZE);
2020 handle = __le16_to_cpu(hdr->handle);
2021 flags = hci_flags(handle);
2022 handle = hci_handle(handle);
2024 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2026 hdev->stat.acl_rx++;
2028 hci_dev_lock(hdev);
2029 conn = hci_conn_hash_lookup_handle(hdev, handle);
2030 hci_dev_unlock(hdev);
2032 if (conn) {
2033 register struct hci_proto *hp;
2035 hci_conn_enter_active_mode(conn);
2037 /* Send to upper protocol */
2038 hp = hci_proto[HCI_PROTO_L2CAP];
2039 if (hp && hp->recv_acldata) {
2040 hp->recv_acldata(conn, skb, flags);
2041 return;
2043 } else {
2044 BT_ERR("%s ACL packet for unknown connection handle %d",
2045 hdev->name, handle);
2048 kfree_skb(skb);
2051 /* SCO data packet */
2052 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2054 struct hci_sco_hdr *hdr = (void *) skb->data;
2055 struct hci_conn *conn;
2056 __u16 handle;
2058 skb_pull(skb, HCI_SCO_HDR_SIZE);
2060 handle = __le16_to_cpu(hdr->handle);
2062 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2064 hdev->stat.sco_rx++;
2066 hci_dev_lock(hdev);
2067 conn = hci_conn_hash_lookup_handle(hdev, handle);
2068 hci_dev_unlock(hdev);
2070 if (conn) {
2071 register struct hci_proto *hp;
2073 /* Send to upper protocol */
2074 hp = hci_proto[HCI_PROTO_SCO];
2075 if (hp && hp->recv_scodata) {
2076 hp->recv_scodata(conn, skb);
2077 return;
2079 } else {
2080 BT_ERR("%s SCO packet for unknown connection handle %d",
2081 hdev->name, handle);
2084 kfree_skb(skb);
2087 static void hci_rx_task(unsigned long arg)
2089 struct hci_dev *hdev = (struct hci_dev *) arg;
2090 struct sk_buff *skb;
2092 BT_DBG("%s", hdev->name);
2094 read_lock(&hci_task_lock);
2096 while ((skb = skb_dequeue(&hdev->rx_q))) {
2097 if (atomic_read(&hdev->promisc)) {
2098 /* Send copy to the sockets */
2099 hci_send_to_sock(hdev, skb, NULL);
2102 if (test_bit(HCI_RAW, &hdev->flags)) {
2103 kfree_skb(skb);
2104 continue;
2107 if (test_bit(HCI_INIT, &hdev->flags)) {
2108 /* Don't process data packets in this states. */
2109 switch (bt_cb(skb)->pkt_type) {
2110 case HCI_ACLDATA_PKT:
2111 case HCI_SCODATA_PKT:
2112 kfree_skb(skb);
2113 continue;
2117 /* Process frame */
2118 switch (bt_cb(skb)->pkt_type) {
2119 case HCI_EVENT_PKT:
2120 hci_event_packet(hdev, skb);
2121 break;
2123 case HCI_ACLDATA_PKT:
2124 BT_DBG("%s ACL data packet", hdev->name);
2125 hci_acldata_packet(hdev, skb);
2126 break;
2128 case HCI_SCODATA_PKT:
2129 BT_DBG("%s SCO data packet", hdev->name);
2130 hci_scodata_packet(hdev, skb);
2131 break;
2133 default:
2134 kfree_skb(skb);
2135 break;
2139 read_unlock(&hci_task_lock);
2142 static void hci_cmd_task(unsigned long arg)
2144 struct hci_dev *hdev = (struct hci_dev *) arg;
2145 struct sk_buff *skb;
2147 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2149 /* Send queued commands */
2150 if (atomic_read(&hdev->cmd_cnt)) {
2151 skb = skb_dequeue(&hdev->cmd_q);
2152 if (!skb)
2153 return;
2155 kfree_skb(hdev->sent_cmd);
2157 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2158 if (hdev->sent_cmd) {
2159 atomic_dec(&hdev->cmd_cnt);
2160 hci_send_frame(skb);
2161 mod_timer(&hdev->cmd_timer,
2162 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2163 } else {
2164 skb_queue_head(&hdev->cmd_q, skb);
2165 tasklet_schedule(&hdev->cmd_task);