Linux-2.6.12-rc2
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / hci_core.c
blob860dba7bdd89dedfba9aea538f0e956d5cbfe955
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/major.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <net/sock.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
52 #ifndef CONFIG_BT_HCI_CORE_DEBUG
53 #undef BT_DBG
54 #define BT_DBG(D...)
55 #endif
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
60 static void hci_notify(struct hci_dev *hdev, int event);
62 static DEFINE_RWLOCK(hci_task_lock);
64 /* HCI device list */
65 LIST_HEAD(hci_dev_list);
66 DEFINE_RWLOCK(hci_dev_list_lock);
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list);
70 DEFINE_RWLOCK(hci_cb_list_lock);
72 /* HCI protocols */
73 #define HCI_MAX_PROTO 2
74 struct hci_proto *hci_proto[HCI_MAX_PROTO];
76 /* HCI notifiers list */
77 static struct notifier_block *hci_notifier;
79 /* ---- HCI notifications ---- */
81 int hci_register_notifier(struct notifier_block *nb)
83 return notifier_chain_register(&hci_notifier, nb);
86 int hci_unregister_notifier(struct notifier_block *nb)
88 return notifier_chain_unregister(&hci_notifier, nb);
91 void hci_notify(struct hci_dev *hdev, int event)
93 notifier_call_chain(&hci_notifier, event, hdev);
96 /* ---- HCI requests ---- */
98 void hci_req_complete(struct hci_dev *hdev, int result)
100 BT_DBG("%s result 0x%2.2x", hdev->name, result);
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = result;
104 hdev->req_status = HCI_REQ_DONE;
105 wake_up_interruptible(&hdev->req_wait_q);
109 static void hci_req_cancel(struct hci_dev *hdev, int err)
111 BT_DBG("%s err 0x%2.2x", hdev->name, err);
113 if (hdev->req_status == HCI_REQ_PEND) {
114 hdev->req_result = err;
115 hdev->req_status = HCI_REQ_CANCELED;
116 wake_up_interruptible(&hdev->req_wait_q);
120 /* Execute request and wait for completion. */
121 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
122 unsigned long opt, __u32 timeout)
124 DECLARE_WAITQUEUE(wait, current);
125 int err = 0;
127 BT_DBG("%s start", hdev->name);
129 hdev->req_status = HCI_REQ_PEND;
131 add_wait_queue(&hdev->req_wait_q, &wait);
132 set_current_state(TASK_INTERRUPTIBLE);
134 req(hdev, opt);
135 schedule_timeout(timeout);
137 remove_wait_queue(&hdev->req_wait_q, &wait);
139 if (signal_pending(current))
140 return -EINTR;
142 switch (hdev->req_status) {
143 case HCI_REQ_DONE:
144 err = -bt_err(hdev->req_result);
145 break;
147 case HCI_REQ_CANCELED:
148 err = -hdev->req_result;
149 break;
151 default:
152 err = -ETIMEDOUT;
153 break;
156 hdev->req_status = hdev->req_result = 0;
158 BT_DBG("%s end: err %d", hdev->name, err);
160 return err;
163 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
164 unsigned long opt, __u32 timeout)
166 int ret;
168 /* Serialize all requests */
169 hci_req_lock(hdev);
170 ret = __hci_request(hdev, req, opt, timeout);
171 hci_req_unlock(hdev);
173 return ret;
176 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
178 BT_DBG("%s %ld", hdev->name, opt);
180 /* Reset device */
181 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
184 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
186 struct sk_buff *skb;
187 __u16 param;
189 BT_DBG("%s %ld", hdev->name, opt);
191 /* Driver initialization */
193 /* Special commands */
194 while ((skb = skb_dequeue(&hdev->driver_init))) {
195 skb->pkt_type = HCI_COMMAND_PKT;
196 skb->dev = (void *) hdev;
197 skb_queue_tail(&hdev->cmd_q, skb);
198 hci_sched_cmd(hdev);
200 skb_queue_purge(&hdev->driver_init);
202 /* Mandatory initialization */
204 /* Reset */
205 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
206 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
208 /* Read Local Supported Features */
209 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
211 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
212 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
214 #if 0
215 /* Host buffer size */
217 struct hci_cp_host_buffer_size cp;
218 cp.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE);
219 cp.sco_mtu = HCI_MAX_SCO_SIZE;
220 cp.acl_max_pkt = __cpu_to_le16(0xffff);
221 cp.sco_max_pkt = __cpu_to_le16(0xffff);
222 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp);
224 #endif
226 /* Read BD Address */
227 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
229 /* Read Voice Setting */
230 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL);
232 /* Optional initialization */
234 /* Clear Event Filters */
236 struct hci_cp_set_event_flt cp;
237 cp.flt_type = HCI_FLT_CLEAR_ALL;
238 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
241 /* Page timeout ~20 secs */
242 param = __cpu_to_le16(0x8000);
243 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, &param);
245 /* Connection accept timeout ~20 secs */
246 param = __cpu_to_le16(0x7d00);
247 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, &param);
250 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
252 __u8 scan = opt;
254 BT_DBG("%s %x", hdev->name, scan);
256 /* Inquiry and Page scans */
257 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
260 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
262 __u8 auth = opt;
264 BT_DBG("%s %x", hdev->name, auth);
266 /* Authentication */
267 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
270 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
272 __u8 encrypt = opt;
274 BT_DBG("%s %x", hdev->name, encrypt);
276 /* Authentication */
277 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
280 /* Get HCI device by index.
281 * Device is held on return. */
282 struct hci_dev *hci_dev_get(int index)
284 struct hci_dev *hdev = NULL;
285 struct list_head *p;
287 BT_DBG("%d", index);
289 if (index < 0)
290 return NULL;
292 read_lock(&hci_dev_list_lock);
293 list_for_each(p, &hci_dev_list) {
294 struct hci_dev *d = list_entry(p, struct hci_dev, list);
295 if (d->id == index) {
296 hdev = hci_dev_hold(d);
297 break;
300 read_unlock(&hci_dev_list_lock);
301 return hdev;
303 EXPORT_SYMBOL(hci_dev_get);
305 /* ---- Inquiry support ---- */
306 static void inquiry_cache_flush(struct hci_dev *hdev)
308 struct inquiry_cache *cache = &hdev->inq_cache;
309 struct inquiry_entry *next = cache->list, *e;
311 BT_DBG("cache %p", cache);
313 cache->list = NULL;
314 while ((e = next)) {
315 next = e->next;
316 kfree(e);
320 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
322 struct inquiry_cache *cache = &hdev->inq_cache;
323 struct inquiry_entry *e;
325 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
327 for (e = cache->list; e; e = e->next)
328 if (!bacmp(&e->data.bdaddr, bdaddr))
329 break;
330 return e;
333 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
335 struct inquiry_cache *cache = &hdev->inq_cache;
336 struct inquiry_entry *e;
338 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
340 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
341 /* Entry not in the cache. Add new one. */
342 if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
343 return;
344 memset(e, 0, sizeof(struct inquiry_entry));
345 e->next = cache->list;
346 cache->list = e;
349 memcpy(&e->data, data, sizeof(*data));
350 e->timestamp = jiffies;
351 cache->timestamp = jiffies;
354 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
356 struct inquiry_cache *cache = &hdev->inq_cache;
357 struct inquiry_info *info = (struct inquiry_info *) buf;
358 struct inquiry_entry *e;
359 int copied = 0;
361 for (e = cache->list; e && copied < num; e = e->next, copied++) {
362 struct inquiry_data *data = &e->data;
363 bacpy(&info->bdaddr, &data->bdaddr);
364 info->pscan_rep_mode = data->pscan_rep_mode;
365 info->pscan_period_mode = data->pscan_period_mode;
366 info->pscan_mode = data->pscan_mode;
367 memcpy(info->dev_class, data->dev_class, 3);
368 info->clock_offset = data->clock_offset;
369 info++;
372 BT_DBG("cache %p, copied %d", cache, copied);
373 return copied;
376 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
378 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
379 struct hci_cp_inquiry cp;
381 BT_DBG("%s", hdev->name);
383 if (test_bit(HCI_INQUIRY, &hdev->flags))
384 return;
386 /* Start Inquiry */
387 memcpy(&cp.lap, &ir->lap, 3);
388 cp.length = ir->length;
389 cp.num_rsp = ir->num_rsp;
390 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp);
393 int hci_inquiry(void __user *arg)
395 __u8 __user *ptr = arg;
396 struct hci_inquiry_req ir;
397 struct hci_dev *hdev;
398 int err = 0, do_inquiry = 0, max_rsp;
399 long timeo;
400 __u8 *buf;
402 if (copy_from_user(&ir, ptr, sizeof(ir)))
403 return -EFAULT;
405 if (!(hdev = hci_dev_get(ir.dev_id)))
406 return -ENODEV;
408 hci_dev_lock_bh(hdev);
409 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
410 inquiry_cache_empty(hdev) ||
411 ir.flags & IREQ_CACHE_FLUSH) {
412 inquiry_cache_flush(hdev);
413 do_inquiry = 1;
415 hci_dev_unlock_bh(hdev);
417 timeo = ir.length * 2 * HZ;
418 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
419 goto done;
421 /* for unlimited number of responses we will use buffer with 255 entries */
422 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
424 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
425 * copy it to the user space.
427 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
428 err = -ENOMEM;
429 goto done;
432 hci_dev_lock_bh(hdev);
433 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
434 hci_dev_unlock_bh(hdev);
436 BT_DBG("num_rsp %d", ir.num_rsp);
438 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
439 ptr += sizeof(ir);
440 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
441 ir.num_rsp))
442 err = -EFAULT;
443 } else
444 err = -EFAULT;
446 kfree(buf);
448 done:
449 hci_dev_put(hdev);
450 return err;
453 /* ---- HCI ioctl helpers ---- */
455 int hci_dev_open(__u16 dev)
457 struct hci_dev *hdev;
458 int ret = 0;
460 if (!(hdev = hci_dev_get(dev)))
461 return -ENODEV;
463 BT_DBG("%s %p", hdev->name, hdev);
465 hci_req_lock(hdev);
467 if (test_bit(HCI_UP, &hdev->flags)) {
468 ret = -EALREADY;
469 goto done;
472 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
473 set_bit(HCI_RAW, &hdev->flags);
475 if (hdev->open(hdev)) {
476 ret = -EIO;
477 goto done;
480 if (!test_bit(HCI_RAW, &hdev->flags)) {
481 atomic_set(&hdev->cmd_cnt, 1);
482 set_bit(HCI_INIT, &hdev->flags);
484 //__hci_request(hdev, hci_reset_req, 0, HZ);
485 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
487 clear_bit(HCI_INIT, &hdev->flags);
490 if (!ret) {
491 hci_dev_hold(hdev);
492 set_bit(HCI_UP, &hdev->flags);
493 hci_notify(hdev, HCI_DEV_UP);
494 } else {
495 /* Init failed, cleanup */
496 tasklet_kill(&hdev->rx_task);
497 tasklet_kill(&hdev->tx_task);
498 tasklet_kill(&hdev->cmd_task);
500 skb_queue_purge(&hdev->cmd_q);
501 skb_queue_purge(&hdev->rx_q);
503 if (hdev->flush)
504 hdev->flush(hdev);
506 if (hdev->sent_cmd) {
507 kfree_skb(hdev->sent_cmd);
508 hdev->sent_cmd = NULL;
511 hdev->close(hdev);
512 hdev->flags = 0;
515 done:
516 hci_req_unlock(hdev);
517 hci_dev_put(hdev);
518 return ret;
521 static int hci_dev_do_close(struct hci_dev *hdev)
523 BT_DBG("%s %p", hdev->name, hdev);
525 hci_req_cancel(hdev, ENODEV);
526 hci_req_lock(hdev);
528 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
529 hci_req_unlock(hdev);
530 return 0;
533 /* Kill RX and TX tasks */
534 tasklet_kill(&hdev->rx_task);
535 tasklet_kill(&hdev->tx_task);
537 hci_dev_lock_bh(hdev);
538 inquiry_cache_flush(hdev);
539 hci_conn_hash_flush(hdev);
540 hci_dev_unlock_bh(hdev);
542 hci_notify(hdev, HCI_DEV_DOWN);
544 if (hdev->flush)
545 hdev->flush(hdev);
547 /* Reset device */
548 skb_queue_purge(&hdev->cmd_q);
549 atomic_set(&hdev->cmd_cnt, 1);
550 if (!test_bit(HCI_RAW, &hdev->flags)) {
551 set_bit(HCI_INIT, &hdev->flags);
552 __hci_request(hdev, hci_reset_req, 0, HZ/4);
553 clear_bit(HCI_INIT, &hdev->flags);
556 /* Kill cmd task */
557 tasklet_kill(&hdev->cmd_task);
559 /* Drop queues */
560 skb_queue_purge(&hdev->rx_q);
561 skb_queue_purge(&hdev->cmd_q);
562 skb_queue_purge(&hdev->raw_q);
564 /* Drop last sent command */
565 if (hdev->sent_cmd) {
566 kfree_skb(hdev->sent_cmd);
567 hdev->sent_cmd = NULL;
570 /* After this point our queues are empty
571 * and no tasks are scheduled. */
572 hdev->close(hdev);
574 /* Clear flags */
575 hdev->flags = 0;
577 hci_req_unlock(hdev);
579 hci_dev_put(hdev);
580 return 0;
583 int hci_dev_close(__u16 dev)
585 struct hci_dev *hdev;
586 int err;
588 if (!(hdev = hci_dev_get(dev)))
589 return -ENODEV;
590 err = hci_dev_do_close(hdev);
591 hci_dev_put(hdev);
592 return err;
595 int hci_dev_reset(__u16 dev)
597 struct hci_dev *hdev;
598 int ret = 0;
600 if (!(hdev = hci_dev_get(dev)))
601 return -ENODEV;
603 hci_req_lock(hdev);
604 tasklet_disable(&hdev->tx_task);
606 if (!test_bit(HCI_UP, &hdev->flags))
607 goto done;
609 /* Drop queues */
610 skb_queue_purge(&hdev->rx_q);
611 skb_queue_purge(&hdev->cmd_q);
613 hci_dev_lock_bh(hdev);
614 inquiry_cache_flush(hdev);
615 hci_conn_hash_flush(hdev);
616 hci_dev_unlock_bh(hdev);
618 if (hdev->flush)
619 hdev->flush(hdev);
621 atomic_set(&hdev->cmd_cnt, 1);
622 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
624 if (!test_bit(HCI_RAW, &hdev->flags))
625 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
627 done:
628 tasklet_enable(&hdev->tx_task);
629 hci_req_unlock(hdev);
630 hci_dev_put(hdev);
631 return ret;
634 int hci_dev_reset_stat(__u16 dev)
636 struct hci_dev *hdev;
637 int ret = 0;
639 if (!(hdev = hci_dev_get(dev)))
640 return -ENODEV;
642 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
644 hci_dev_put(hdev);
646 return ret;
649 int hci_dev_cmd(unsigned int cmd, void __user *arg)
651 struct hci_dev *hdev;
652 struct hci_dev_req dr;
653 int err = 0;
655 if (copy_from_user(&dr, arg, sizeof(dr)))
656 return -EFAULT;
658 if (!(hdev = hci_dev_get(dr.dev_id)))
659 return -ENODEV;
661 switch (cmd) {
662 case HCISETAUTH:
663 err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT);
664 break;
666 case HCISETENCRYPT:
667 if (!lmp_encrypt_capable(hdev)) {
668 err = -EOPNOTSUPP;
669 break;
672 if (!test_bit(HCI_AUTH, &hdev->flags)) {
673 /* Auth must be enabled first */
674 err = hci_request(hdev, hci_auth_req,
675 dr.dev_opt, HCI_INIT_TIMEOUT);
676 if (err)
677 break;
680 err = hci_request(hdev, hci_encrypt_req,
681 dr.dev_opt, HCI_INIT_TIMEOUT);
682 break;
684 case HCISETSCAN:
685 err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT);
686 break;
688 case HCISETPTYPE:
689 hdev->pkt_type = (__u16) dr.dev_opt;
690 break;
692 case HCISETLINKPOL:
693 hdev->link_policy = (__u16) dr.dev_opt;
694 break;
696 case HCISETLINKMODE:
697 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
698 break;
700 case HCISETACLMTU:
701 hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1);
702 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
703 break;
705 case HCISETSCOMTU:
706 hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1);
707 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
708 break;
710 default:
711 err = -EINVAL;
712 break;
714 hci_dev_put(hdev);
715 return err;
718 int hci_get_dev_list(void __user *arg)
720 struct hci_dev_list_req *dl;
721 struct hci_dev_req *dr;
722 struct list_head *p;
723 int n = 0, size, err;
724 __u16 dev_num;
726 if (get_user(dev_num, (__u16 __user *) arg))
727 return -EFAULT;
729 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
730 return -EINVAL;
732 size = sizeof(*dl) + dev_num * sizeof(*dr);
734 if (!(dl = kmalloc(size, GFP_KERNEL)))
735 return -ENOMEM;
737 dr = dl->dev_req;
739 read_lock_bh(&hci_dev_list_lock);
740 list_for_each(p, &hci_dev_list) {
741 struct hci_dev *hdev;
742 hdev = list_entry(p, struct hci_dev, list);
743 (dr + n)->dev_id = hdev->id;
744 (dr + n)->dev_opt = hdev->flags;
745 if (++n >= dev_num)
746 break;
748 read_unlock_bh(&hci_dev_list_lock);
750 dl->dev_num = n;
751 size = sizeof(*dl) + n * sizeof(*dr);
753 err = copy_to_user(arg, dl, size);
754 kfree(dl);
756 return err ? -EFAULT : 0;
759 int hci_get_dev_info(void __user *arg)
761 struct hci_dev *hdev;
762 struct hci_dev_info di;
763 int err = 0;
765 if (copy_from_user(&di, arg, sizeof(di)))
766 return -EFAULT;
768 if (!(hdev = hci_dev_get(di.dev_id)))
769 return -ENODEV;
771 strcpy(di.name, hdev->name);
772 di.bdaddr = hdev->bdaddr;
773 di.type = hdev->type;
774 di.flags = hdev->flags;
775 di.pkt_type = hdev->pkt_type;
776 di.acl_mtu = hdev->acl_mtu;
777 di.acl_pkts = hdev->acl_pkts;
778 di.sco_mtu = hdev->sco_mtu;
779 di.sco_pkts = hdev->sco_pkts;
780 di.link_policy = hdev->link_policy;
781 di.link_mode = hdev->link_mode;
783 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
784 memcpy(&di.features, &hdev->features, sizeof(di.features));
786 if (copy_to_user(arg, &di, sizeof(di)))
787 err = -EFAULT;
789 hci_dev_put(hdev);
791 return err;
794 /* ---- Interface to HCI drivers ---- */
796 /* Alloc HCI device */
797 struct hci_dev *hci_alloc_dev(void)
799 struct hci_dev *hdev;
801 hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL);
802 if (!hdev)
803 return NULL;
805 memset(hdev, 0, sizeof(struct hci_dev));
807 skb_queue_head_init(&hdev->driver_init);
809 return hdev;
811 EXPORT_SYMBOL(hci_alloc_dev);
813 /* Free HCI device */
814 void hci_free_dev(struct hci_dev *hdev)
816 skb_queue_purge(&hdev->driver_init);
818 /* will free via class release */
819 class_device_put(&hdev->class_dev);
821 EXPORT_SYMBOL(hci_free_dev);
823 /* Register HCI device */
824 int hci_register_dev(struct hci_dev *hdev)
826 struct list_head *head = &hci_dev_list, *p;
827 int id = 0;
829 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
831 if (!hdev->open || !hdev->close || !hdev->destruct)
832 return -EINVAL;
834 write_lock_bh(&hci_dev_list_lock);
836 /* Find first available device id */
837 list_for_each(p, &hci_dev_list) {
838 if (list_entry(p, struct hci_dev, list)->id != id)
839 break;
840 head = p; id++;
843 sprintf(hdev->name, "hci%d", id);
844 hdev->id = id;
845 list_add(&hdev->list, head);
847 atomic_set(&hdev->refcnt, 1);
848 spin_lock_init(&hdev->lock);
850 hdev->flags = 0;
851 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
852 hdev->link_mode = (HCI_LM_ACCEPT);
854 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
855 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
856 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
858 skb_queue_head_init(&hdev->rx_q);
859 skb_queue_head_init(&hdev->cmd_q);
860 skb_queue_head_init(&hdev->raw_q);
862 init_waitqueue_head(&hdev->req_wait_q);
863 init_MUTEX(&hdev->req_lock);
865 inquiry_cache_init(hdev);
867 hci_conn_hash_init(hdev);
869 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
871 atomic_set(&hdev->promisc, 0);
873 write_unlock_bh(&hci_dev_list_lock);
875 hci_register_sysfs(hdev);
877 hci_notify(hdev, HCI_DEV_REG);
879 return id;
881 EXPORT_SYMBOL(hci_register_dev);
883 /* Unregister HCI device */
884 int hci_unregister_dev(struct hci_dev *hdev)
886 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
888 hci_unregister_sysfs(hdev);
890 write_lock_bh(&hci_dev_list_lock);
891 list_del(&hdev->list);
892 write_unlock_bh(&hci_dev_list_lock);
894 hci_dev_do_close(hdev);
896 hci_notify(hdev, HCI_DEV_UNREG);
898 __hci_dev_put(hdev);
899 return 0;
901 EXPORT_SYMBOL(hci_unregister_dev);
903 /* Suspend HCI device */
904 int hci_suspend_dev(struct hci_dev *hdev)
906 hci_notify(hdev, HCI_DEV_SUSPEND);
907 return 0;
909 EXPORT_SYMBOL(hci_suspend_dev);
911 /* Resume HCI device */
912 int hci_resume_dev(struct hci_dev *hdev)
914 hci_notify(hdev, HCI_DEV_RESUME);
915 return 0;
917 EXPORT_SYMBOL(hci_resume_dev);
919 /* ---- Interface to upper protocols ---- */
921 /* Register/Unregister protocols.
922 * hci_task_lock is used to ensure that no tasks are running. */
923 int hci_register_proto(struct hci_proto *hp)
925 int err = 0;
927 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
929 if (hp->id >= HCI_MAX_PROTO)
930 return -EINVAL;
932 write_lock_bh(&hci_task_lock);
934 if (!hci_proto[hp->id])
935 hci_proto[hp->id] = hp;
936 else
937 err = -EEXIST;
939 write_unlock_bh(&hci_task_lock);
941 return err;
943 EXPORT_SYMBOL(hci_register_proto);
945 int hci_unregister_proto(struct hci_proto *hp)
947 int err = 0;
949 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
951 if (hp->id >= HCI_MAX_PROTO)
952 return -EINVAL;
954 write_lock_bh(&hci_task_lock);
956 if (hci_proto[hp->id])
957 hci_proto[hp->id] = NULL;
958 else
959 err = -ENOENT;
961 write_unlock_bh(&hci_task_lock);
963 return err;
965 EXPORT_SYMBOL(hci_unregister_proto);
967 int hci_register_cb(struct hci_cb *cb)
969 BT_DBG("%p name %s", cb, cb->name);
971 write_lock_bh(&hci_cb_list_lock);
972 list_add(&cb->list, &hci_cb_list);
973 write_unlock_bh(&hci_cb_list_lock);
975 return 0;
977 EXPORT_SYMBOL(hci_register_cb);
979 int hci_unregister_cb(struct hci_cb *cb)
981 BT_DBG("%p name %s", cb, cb->name);
983 write_lock_bh(&hci_cb_list_lock);
984 list_del(&cb->list);
985 write_unlock_bh(&hci_cb_list_lock);
987 return 0;
989 EXPORT_SYMBOL(hci_unregister_cb);
991 static int hci_send_frame(struct sk_buff *skb)
993 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
995 if (!hdev) {
996 kfree_skb(skb);
997 return -ENODEV;
1000 BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
1002 if (atomic_read(&hdev->promisc)) {
1003 /* Time stamp */
1004 do_gettimeofday(&skb->stamp);
1006 hci_send_to_sock(hdev, skb);
1009 /* Get rid of skb owner, prior to sending to the driver. */
1010 skb_orphan(skb);
1012 return hdev->send(skb);
1015 /* Send HCI command */
1016 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
1018 int len = HCI_COMMAND_HDR_SIZE + plen;
1019 struct hci_command_hdr *hdr;
1020 struct sk_buff *skb;
1022 BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
1024 skb = bt_skb_alloc(len, GFP_ATOMIC);
1025 if (!skb) {
1026 BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
1027 return -ENOMEM;
1030 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1031 hdr->opcode = __cpu_to_le16(hci_opcode_pack(ogf, ocf));
1032 hdr->plen = plen;
1034 if (plen)
1035 memcpy(skb_put(skb, plen), param, plen);
1037 BT_DBG("skb len %d", skb->len);
1039 skb->pkt_type = HCI_COMMAND_PKT;
1040 skb->dev = (void *) hdev;
1041 skb_queue_tail(&hdev->cmd_q, skb);
1042 hci_sched_cmd(hdev);
1044 return 0;
1046 EXPORT_SYMBOL(hci_send_cmd);
1048 /* Get data from the previously sent command */
1049 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
1051 struct hci_command_hdr *hdr;
1053 if (!hdev->sent_cmd)
1054 return NULL;
1056 hdr = (void *) hdev->sent_cmd->data;
1058 if (hdr->opcode != __cpu_to_le16(hci_opcode_pack(ogf, ocf)))
1059 return NULL;
1061 BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
1063 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1066 /* Send ACL data */
1067 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1069 struct hci_acl_hdr *hdr;
1070 int len = skb->len;
1072 hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE);
1073 hdr->handle = __cpu_to_le16(hci_handle_pack(handle, flags));
1074 hdr->dlen = __cpu_to_le16(len);
1076 skb->h.raw = (void *) hdr;
1079 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1081 struct hci_dev *hdev = conn->hdev;
1082 struct sk_buff *list;
1084 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1086 skb->dev = (void *) hdev;
1087 skb->pkt_type = HCI_ACLDATA_PKT;
1088 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1090 if (!(list = skb_shinfo(skb)->frag_list)) {
1091 /* Non fragmented */
1092 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1094 skb_queue_tail(&conn->data_q, skb);
1095 } else {
1096 /* Fragmented */
1097 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1099 skb_shinfo(skb)->frag_list = NULL;
1101 /* Queue all fragments atomically */
1102 spin_lock_bh(&conn->data_q.lock);
1104 __skb_queue_tail(&conn->data_q, skb);
1105 do {
1106 skb = list; list = list->next;
1108 skb->dev = (void *) hdev;
1109 skb->pkt_type = HCI_ACLDATA_PKT;
1110 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1112 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1114 __skb_queue_tail(&conn->data_q, skb);
1115 } while (list);
1117 spin_unlock_bh(&conn->data_q.lock);
1120 hci_sched_tx(hdev);
1121 return 0;
1123 EXPORT_SYMBOL(hci_send_acl);
1125 /* Send SCO data */
1126 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1128 struct hci_dev *hdev = conn->hdev;
1129 struct hci_sco_hdr hdr;
1131 BT_DBG("%s len %d", hdev->name, skb->len);
1133 if (skb->len > hdev->sco_mtu) {
1134 kfree_skb(skb);
1135 return -EINVAL;
1138 hdr.handle = __cpu_to_le16(conn->handle);
1139 hdr.dlen = skb->len;
1141 skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE);
1142 memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE);
1144 skb->dev = (void *) hdev;
1145 skb->pkt_type = HCI_SCODATA_PKT;
1146 skb_queue_tail(&conn->data_q, skb);
1147 hci_sched_tx(hdev);
1148 return 0;
1150 EXPORT_SYMBOL(hci_send_sco);
1152 /* ---- HCI TX task (outgoing data) ---- */
1154 /* HCI Connection scheduler */
1155 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1157 struct hci_conn_hash *h = &hdev->conn_hash;
1158 struct hci_conn *conn = NULL;
1159 int num = 0, min = ~0;
1160 struct list_head *p;
1162 /* We don't have to lock device here. Connections are always
1163 * added and removed with TX task disabled. */
1164 list_for_each(p, &h->list) {
1165 struct hci_conn *c;
1166 c = list_entry(p, struct hci_conn, list);
1168 if (c->type != type || c->state != BT_CONNECTED
1169 || skb_queue_empty(&c->data_q))
1170 continue;
1171 num++;
1173 if (c->sent < min) {
1174 min = c->sent;
1175 conn = c;
1179 if (conn) {
1180 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1181 int q = cnt / num;
1182 *quote = q ? q : 1;
1183 } else
1184 *quote = 0;
1186 BT_DBG("conn %p quote %d", conn, *quote);
1187 return conn;
1190 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1192 struct hci_conn_hash *h = &hdev->conn_hash;
1193 struct list_head *p;
1194 struct hci_conn *c;
1196 BT_ERR("%s ACL tx timeout", hdev->name);
1198 /* Kill stalled connections */
1199 list_for_each(p, &h->list) {
1200 c = list_entry(p, struct hci_conn, list);
1201 if (c->type == ACL_LINK && c->sent) {
1202 BT_ERR("%s killing stalled ACL connection %s",
1203 hdev->name, batostr(&c->dst));
1204 hci_acl_disconn(c, 0x13);
1209 static inline void hci_sched_acl(struct hci_dev *hdev)
1211 struct hci_conn *conn;
1212 struct sk_buff *skb;
1213 int quote;
1215 BT_DBG("%s", hdev->name);
1217 if (!test_bit(HCI_RAW, &hdev->flags)) {
1218 /* ACL tx timeout must be longer than maximum
1219 * link supervision timeout (40.9 seconds) */
1220 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1221 hci_acl_tx_to(hdev);
1224 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1225 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1226 BT_DBG("skb %p len %d", skb, skb->len);
1227 hci_send_frame(skb);
1228 hdev->acl_last_tx = jiffies;
1230 hdev->acl_cnt--;
1231 conn->sent++;
1236 /* Schedule SCO */
1237 static inline void hci_sched_sco(struct hci_dev *hdev)
1239 struct hci_conn *conn;
1240 struct sk_buff *skb;
1241 int quote;
1243 BT_DBG("%s", hdev->name);
1245 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1246 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1247 BT_DBG("skb %p len %d", skb, skb->len);
1248 hci_send_frame(skb);
1250 conn->sent++;
1251 if (conn->sent == ~0)
1252 conn->sent = 0;
1257 static void hci_tx_task(unsigned long arg)
1259 struct hci_dev *hdev = (struct hci_dev *) arg;
1260 struct sk_buff *skb;
1262 read_lock(&hci_task_lock);
1264 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1266 /* Schedule queues and send stuff to HCI driver */
1268 hci_sched_acl(hdev);
1270 hci_sched_sco(hdev);
1272 /* Send next queued raw (unknown type) packet */
1273 while ((skb = skb_dequeue(&hdev->raw_q)))
1274 hci_send_frame(skb);
1276 read_unlock(&hci_task_lock);
1279 /* ----- HCI RX task (incoming data proccessing) ----- */
1281 /* ACL data packet */
1282 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1284 struct hci_acl_hdr *hdr = (void *) skb->data;
1285 struct hci_conn *conn;
1286 __u16 handle, flags;
1288 skb_pull(skb, HCI_ACL_HDR_SIZE);
1290 handle = __le16_to_cpu(hdr->handle);
1291 flags = hci_flags(handle);
1292 handle = hci_handle(handle);
1294 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1296 hdev->stat.acl_rx++;
1298 hci_dev_lock(hdev);
1299 conn = hci_conn_hash_lookup_handle(hdev, handle);
1300 hci_dev_unlock(hdev);
1302 if (conn) {
1303 register struct hci_proto *hp;
1305 /* Send to upper protocol */
1306 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1307 hp->recv_acldata(conn, skb, flags);
1308 return;
1310 } else {
1311 BT_ERR("%s ACL packet for unknown connection handle %d",
1312 hdev->name, handle);
1315 kfree_skb(skb);
1318 /* SCO data packet */
1319 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1321 struct hci_sco_hdr *hdr = (void *) skb->data;
1322 struct hci_conn *conn;
1323 __u16 handle;
1325 skb_pull(skb, HCI_SCO_HDR_SIZE);
1327 handle = __le16_to_cpu(hdr->handle);
1329 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1331 hdev->stat.sco_rx++;
1333 hci_dev_lock(hdev);
1334 conn = hci_conn_hash_lookup_handle(hdev, handle);
1335 hci_dev_unlock(hdev);
1337 if (conn) {
1338 register struct hci_proto *hp;
1340 /* Send to upper protocol */
1341 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1342 hp->recv_scodata(conn, skb);
1343 return;
1345 } else {
1346 BT_ERR("%s SCO packet for unknown connection handle %d",
1347 hdev->name, handle);
1350 kfree_skb(skb);
1353 void hci_rx_task(unsigned long arg)
1355 struct hci_dev *hdev = (struct hci_dev *) arg;
1356 struct sk_buff *skb;
1358 BT_DBG("%s", hdev->name);
1360 read_lock(&hci_task_lock);
1362 while ((skb = skb_dequeue(&hdev->rx_q))) {
1363 if (atomic_read(&hdev->promisc)) {
1364 /* Send copy to the sockets */
1365 hci_send_to_sock(hdev, skb);
1368 if (test_bit(HCI_RAW, &hdev->flags)) {
1369 kfree_skb(skb);
1370 continue;
1373 if (test_bit(HCI_INIT, &hdev->flags)) {
1374 /* Don't process data packets in this states. */
1375 switch (skb->pkt_type) {
1376 case HCI_ACLDATA_PKT:
1377 case HCI_SCODATA_PKT:
1378 kfree_skb(skb);
1379 continue;
1383 /* Process frame */
1384 switch (skb->pkt_type) {
1385 case HCI_EVENT_PKT:
1386 hci_event_packet(hdev, skb);
1387 break;
1389 case HCI_ACLDATA_PKT:
1390 BT_DBG("%s ACL data packet", hdev->name);
1391 hci_acldata_packet(hdev, skb);
1392 break;
1394 case HCI_SCODATA_PKT:
1395 BT_DBG("%s SCO data packet", hdev->name);
1396 hci_scodata_packet(hdev, skb);
1397 break;
1399 default:
1400 kfree_skb(skb);
1401 break;
1405 read_unlock(&hci_task_lock);
1408 static void hci_cmd_task(unsigned long arg)
1410 struct hci_dev *hdev = (struct hci_dev *) arg;
1411 struct sk_buff *skb;
1413 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1415 if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1416 BT_ERR("%s command tx timeout", hdev->name);
1417 atomic_set(&hdev->cmd_cnt, 1);
1420 /* Send queued commands */
1421 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1422 if (hdev->sent_cmd)
1423 kfree_skb(hdev->sent_cmd);
1425 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1426 atomic_dec(&hdev->cmd_cnt);
1427 hci_send_frame(skb);
1428 hdev->cmd_last_tx = jiffies;
1429 } else {
1430 skb_queue_head(&hdev->cmd_q, skb);
1431 hci_sched_cmd(hdev);