2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/module.h>
28 #include <linux/kmod.h>
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/skbuff.h>
39 #include <linux/interrupt.h>
40 #include <linux/notifier.h>
43 #include <asm/system.h>
44 #include <asm/uaccess.h>
45 #include <asm/unaligned.h>
47 #include <net/bluetooth/bluetooth.h>
48 #include <net/bluetooth/hci_core.h>
50 #ifndef CONFIG_BT_HCI_CORE_DEBUG
55 static void hci_cmd_task(unsigned long arg
);
56 static void hci_rx_task(unsigned long arg
);
57 static void hci_tx_task(unsigned long arg
);
58 static void hci_notify(struct hci_dev
*hdev
, int event
);
60 static DEFINE_RWLOCK(hci_task_lock
);
63 LIST_HEAD(hci_dev_list
);
64 DEFINE_RWLOCK(hci_dev_list_lock
);
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list
);
68 DEFINE_RWLOCK(hci_cb_list_lock
);
71 #define HCI_MAX_PROTO 2
72 struct hci_proto
*hci_proto
[HCI_MAX_PROTO
];
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
77 /* ---- HCI notifications ---- */
79 int hci_register_notifier(struct notifier_block
*nb
)
81 return atomic_notifier_chain_register(&hci_notifier
, nb
);
84 int hci_unregister_notifier(struct notifier_block
*nb
)
86 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
89 static void hci_notify(struct hci_dev
*hdev
, int event
)
91 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
94 /* ---- HCI requests ---- */
96 void hci_req_complete(struct hci_dev
*hdev
, int result
)
98 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
100 if (hdev
->req_status
== HCI_REQ_PEND
) {
101 hdev
->req_result
= result
;
102 hdev
->req_status
= HCI_REQ_DONE
;
103 wake_up_interruptible(&hdev
->req_wait_q
);
107 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
109 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
111 if (hdev
->req_status
== HCI_REQ_PEND
) {
112 hdev
->req_result
= err
;
113 hdev
->req_status
= HCI_REQ_CANCELED
;
114 wake_up_interruptible(&hdev
->req_wait_q
);
118 /* Execute request and wait for completion. */
119 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
120 unsigned long opt
, __u32 timeout
)
122 DECLARE_WAITQUEUE(wait
, current
);
125 BT_DBG("%s start", hdev
->name
);
127 hdev
->req_status
= HCI_REQ_PEND
;
129 add_wait_queue(&hdev
->req_wait_q
, &wait
);
130 set_current_state(TASK_INTERRUPTIBLE
);
133 schedule_timeout(timeout
);
135 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
137 if (signal_pending(current
))
140 switch (hdev
->req_status
) {
142 err
= -bt_err(hdev
->req_result
);
145 case HCI_REQ_CANCELED
:
146 err
= -hdev
->req_result
;
154 hdev
->req_status
= hdev
->req_result
= 0;
156 BT_DBG("%s end: err %d", hdev
->name
, err
);
161 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
162 unsigned long opt
, __u32 timeout
)
166 /* Serialize all requests */
168 ret
= __hci_request(hdev
, req
, opt
, timeout
);
169 hci_req_unlock(hdev
);
174 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
176 BT_DBG("%s %ld", hdev
->name
, opt
);
179 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_RESET
, 0, NULL
);
182 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
187 BT_DBG("%s %ld", hdev
->name
, opt
);
189 /* Driver initialization */
191 /* Special commands */
192 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
193 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
194 skb
->dev
= (void *) hdev
;
195 skb_queue_tail(&hdev
->cmd_q
, skb
);
198 skb_queue_purge(&hdev
->driver_init
);
200 /* Mandatory initialization */
203 if (test_bit(HCI_QUIRK_RESET_ON_INIT
, &hdev
->quirks
))
204 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_RESET
, 0, NULL
);
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev
, OGF_INFO_PARAM
, OCF_READ_LOCAL_FEATURES
, 0, NULL
);
209 /* Read Local Version */
210 hci_send_cmd(hdev
, OGF_INFO_PARAM
, OCF_READ_LOCAL_VERSION
, 0, NULL
);
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev
, OGF_INFO_PARAM
, OCF_READ_BUFFER_SIZE
, 0, NULL
);
216 /* Host buffer size */
218 struct hci_cp_host_buffer_size cp
;
219 cp
.acl_mtu
= __cpu_to_le16(HCI_MAX_ACL_SIZE
);
220 cp
.sco_mtu
= HCI_MAX_SCO_SIZE
;
221 cp
.acl_max_pkt
= __cpu_to_le16(0xffff);
222 cp
.sco_max_pkt
= __cpu_to_le16(0xffff);
223 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_HOST_BUFFER_SIZE
, sizeof(cp
), &cp
);
227 /* Read BD Address */
228 hci_send_cmd(hdev
, OGF_INFO_PARAM
, OCF_READ_BD_ADDR
, 0, NULL
);
230 /* Read Voice Setting */
231 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_READ_VOICE_SETTING
, 0, NULL
);
233 /* Optional initialization */
235 /* Clear Event Filters */
237 struct hci_cp_set_event_flt cp
;
238 cp
.flt_type
= HCI_FLT_CLEAR_ALL
;
239 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_SET_EVENT_FLT
, sizeof(cp
), &cp
);
242 /* Page timeout ~20 secs */
243 param
= __cpu_to_le16(0x8000);
244 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_WRITE_PG_TIMEOUT
, 2, ¶m
);
246 /* Connection accept timeout ~20 secs */
247 param
= __cpu_to_le16(0x7d00);
248 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_WRITE_CA_TIMEOUT
, 2, ¶m
);
251 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
255 BT_DBG("%s %x", hdev
->name
, scan
);
257 /* Inquiry and Page scans */
258 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_WRITE_SCAN_ENABLE
, 1, &scan
);
261 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
265 BT_DBG("%s %x", hdev
->name
, auth
);
268 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_WRITE_AUTH_ENABLE
, 1, &auth
);
271 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
275 BT_DBG("%s %x", hdev
->name
, encrypt
);
278 hci_send_cmd(hdev
, OGF_HOST_CTL
, OCF_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
281 /* Get HCI device by index.
282 * Device is held on return. */
283 struct hci_dev
*hci_dev_get(int index
)
285 struct hci_dev
*hdev
= NULL
;
293 read_lock(&hci_dev_list_lock
);
294 list_for_each(p
, &hci_dev_list
) {
295 struct hci_dev
*d
= list_entry(p
, struct hci_dev
, list
);
296 if (d
->id
== index
) {
297 hdev
= hci_dev_hold(d
);
301 read_unlock(&hci_dev_list_lock
);
305 /* ---- Inquiry support ---- */
306 static void inquiry_cache_flush(struct hci_dev
*hdev
)
308 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
309 struct inquiry_entry
*next
= cache
->list
, *e
;
311 BT_DBG("cache %p", cache
);
320 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
322 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
323 struct inquiry_entry
*e
;
325 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
327 for (e
= cache
->list
; e
; e
= e
->next
)
328 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
333 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
)
335 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
336 struct inquiry_entry
*e
;
338 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
340 if (!(e
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
))) {
341 /* Entry not in the cache. Add new one. */
342 if (!(e
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
)))
344 e
->next
= cache
->list
;
348 memcpy(&e
->data
, data
, sizeof(*data
));
349 e
->timestamp
= jiffies
;
350 cache
->timestamp
= jiffies
;
353 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
355 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
356 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
357 struct inquiry_entry
*e
;
360 for (e
= cache
->list
; e
&& copied
< num
; e
= e
->next
, copied
++) {
361 struct inquiry_data
*data
= &e
->data
;
362 bacpy(&info
->bdaddr
, &data
->bdaddr
);
363 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
364 info
->pscan_period_mode
= data
->pscan_period_mode
;
365 info
->pscan_mode
= data
->pscan_mode
;
366 memcpy(info
->dev_class
, data
->dev_class
, 3);
367 info
->clock_offset
= data
->clock_offset
;
371 BT_DBG("cache %p, copied %d", cache
, copied
);
375 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
377 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
378 struct hci_cp_inquiry cp
;
380 BT_DBG("%s", hdev
->name
);
382 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
386 memcpy(&cp
.lap
, &ir
->lap
, 3);
387 cp
.length
= ir
->length
;
388 cp
.num_rsp
= ir
->num_rsp
;
389 hci_send_cmd(hdev
, OGF_LINK_CTL
, OCF_INQUIRY
, sizeof(cp
), &cp
);
392 int hci_inquiry(void __user
*arg
)
394 __u8 __user
*ptr
= arg
;
395 struct hci_inquiry_req ir
;
396 struct hci_dev
*hdev
;
397 int err
= 0, do_inquiry
= 0, max_rsp
;
401 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
404 if (!(hdev
= hci_dev_get(ir
.dev_id
)))
407 hci_dev_lock_bh(hdev
);
408 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
409 inquiry_cache_empty(hdev
) ||
410 ir
.flags
& IREQ_CACHE_FLUSH
) {
411 inquiry_cache_flush(hdev
);
414 hci_dev_unlock_bh(hdev
);
416 timeo
= ir
.length
* msecs_to_jiffies(2000);
417 if (do_inquiry
&& (err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
)) < 0)
420 /* for unlimited number of responses we will use buffer with 255 entries */
421 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
423 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
424 * copy it to the user space.
426 if (!(buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
))) {
431 hci_dev_lock_bh(hdev
);
432 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
433 hci_dev_unlock_bh(hdev
);
435 BT_DBG("num_rsp %d", ir
.num_rsp
);
437 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
439 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
452 /* ---- HCI ioctl helpers ---- */
454 int hci_dev_open(__u16 dev
)
456 struct hci_dev
*hdev
;
459 if (!(hdev
= hci_dev_get(dev
)))
462 BT_DBG("%s %p", hdev
->name
, hdev
);
466 if (test_bit(HCI_UP
, &hdev
->flags
)) {
471 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
472 set_bit(HCI_RAW
, &hdev
->flags
);
474 if (hdev
->open(hdev
)) {
479 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
480 atomic_set(&hdev
->cmd_cnt
, 1);
481 set_bit(HCI_INIT
, &hdev
->flags
);
483 //__hci_request(hdev, hci_reset_req, 0, HZ);
484 ret
= __hci_request(hdev
, hci_init_req
, 0,
485 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
487 clear_bit(HCI_INIT
, &hdev
->flags
);
492 set_bit(HCI_UP
, &hdev
->flags
);
493 hci_notify(hdev
, HCI_DEV_UP
);
495 /* Init failed, cleanup */
496 tasklet_kill(&hdev
->rx_task
);
497 tasklet_kill(&hdev
->tx_task
);
498 tasklet_kill(&hdev
->cmd_task
);
500 skb_queue_purge(&hdev
->cmd_q
);
501 skb_queue_purge(&hdev
->rx_q
);
506 if (hdev
->sent_cmd
) {
507 kfree_skb(hdev
->sent_cmd
);
508 hdev
->sent_cmd
= NULL
;
516 hci_req_unlock(hdev
);
521 static int hci_dev_do_close(struct hci_dev
*hdev
)
523 BT_DBG("%s %p", hdev
->name
, hdev
);
525 hci_req_cancel(hdev
, ENODEV
);
528 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
529 hci_req_unlock(hdev
);
533 /* Kill RX and TX tasks */
534 tasklet_kill(&hdev
->rx_task
);
535 tasklet_kill(&hdev
->tx_task
);
537 hci_dev_lock_bh(hdev
);
538 inquiry_cache_flush(hdev
);
539 hci_conn_hash_flush(hdev
);
540 hci_dev_unlock_bh(hdev
);
542 hci_notify(hdev
, HCI_DEV_DOWN
);
548 skb_queue_purge(&hdev
->cmd_q
);
549 atomic_set(&hdev
->cmd_cnt
, 1);
550 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
551 set_bit(HCI_INIT
, &hdev
->flags
);
552 __hci_request(hdev
, hci_reset_req
, 0,
553 msecs_to_jiffies(250));
554 clear_bit(HCI_INIT
, &hdev
->flags
);
558 tasklet_kill(&hdev
->cmd_task
);
561 skb_queue_purge(&hdev
->rx_q
);
562 skb_queue_purge(&hdev
->cmd_q
);
563 skb_queue_purge(&hdev
->raw_q
);
565 /* Drop last sent command */
566 if (hdev
->sent_cmd
) {
567 kfree_skb(hdev
->sent_cmd
);
568 hdev
->sent_cmd
= NULL
;
571 /* After this point our queues are empty
572 * and no tasks are scheduled. */
578 hci_req_unlock(hdev
);
584 int hci_dev_close(__u16 dev
)
586 struct hci_dev
*hdev
;
589 if (!(hdev
= hci_dev_get(dev
)))
591 err
= hci_dev_do_close(hdev
);
596 int hci_dev_reset(__u16 dev
)
598 struct hci_dev
*hdev
;
601 if (!(hdev
= hci_dev_get(dev
)))
605 tasklet_disable(&hdev
->tx_task
);
607 if (!test_bit(HCI_UP
, &hdev
->flags
))
611 skb_queue_purge(&hdev
->rx_q
);
612 skb_queue_purge(&hdev
->cmd_q
);
614 hci_dev_lock_bh(hdev
);
615 inquiry_cache_flush(hdev
);
616 hci_conn_hash_flush(hdev
);
617 hci_dev_unlock_bh(hdev
);
622 atomic_set(&hdev
->cmd_cnt
, 1);
623 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0;
625 if (!test_bit(HCI_RAW
, &hdev
->flags
))
626 ret
= __hci_request(hdev
, hci_reset_req
, 0,
627 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
630 tasklet_enable(&hdev
->tx_task
);
631 hci_req_unlock(hdev
);
636 int hci_dev_reset_stat(__u16 dev
)
638 struct hci_dev
*hdev
;
641 if (!(hdev
= hci_dev_get(dev
)))
644 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
651 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
653 struct hci_dev
*hdev
;
654 struct hci_dev_req dr
;
657 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
660 if (!(hdev
= hci_dev_get(dr
.dev_id
)))
665 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
666 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
670 if (!lmp_encrypt_capable(hdev
)) {
675 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
676 /* Auth must be enabled first */
677 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
678 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
683 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
684 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
688 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
689 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
693 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
697 hdev
->link_policy
= (__u16
) dr
.dev_opt
;
701 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) & (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
705 hdev
->acl_mtu
= *((__u16
*)&dr
.dev_opt
+ 1);
706 hdev
->acl_pkts
= *((__u16
*)&dr
.dev_opt
+ 0);
710 hdev
->sco_mtu
= *((__u16
*)&dr
.dev_opt
+ 1);
711 hdev
->sco_pkts
= *((__u16
*)&dr
.dev_opt
+ 0);
722 int hci_get_dev_list(void __user
*arg
)
724 struct hci_dev_list_req
*dl
;
725 struct hci_dev_req
*dr
;
727 int n
= 0, size
, err
;
730 if (get_user(dev_num
, (__u16 __user
*) arg
))
733 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
736 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
738 if (!(dl
= kmalloc(size
, GFP_KERNEL
)))
743 read_lock_bh(&hci_dev_list_lock
);
744 list_for_each(p
, &hci_dev_list
) {
745 struct hci_dev
*hdev
;
746 hdev
= list_entry(p
, struct hci_dev
, list
);
747 (dr
+ n
)->dev_id
= hdev
->id
;
748 (dr
+ n
)->dev_opt
= hdev
->flags
;
752 read_unlock_bh(&hci_dev_list_lock
);
755 size
= sizeof(*dl
) + n
* sizeof(*dr
);
757 err
= copy_to_user(arg
, dl
, size
);
760 return err
? -EFAULT
: 0;
763 int hci_get_dev_info(void __user
*arg
)
765 struct hci_dev
*hdev
;
766 struct hci_dev_info di
;
769 if (copy_from_user(&di
, arg
, sizeof(di
)))
772 if (!(hdev
= hci_dev_get(di
.dev_id
)))
775 strcpy(di
.name
, hdev
->name
);
776 di
.bdaddr
= hdev
->bdaddr
;
777 di
.type
= hdev
->type
;
778 di
.flags
= hdev
->flags
;
779 di
.pkt_type
= hdev
->pkt_type
;
780 di
.acl_mtu
= hdev
->acl_mtu
;
781 di
.acl_pkts
= hdev
->acl_pkts
;
782 di
.sco_mtu
= hdev
->sco_mtu
;
783 di
.sco_pkts
= hdev
->sco_pkts
;
784 di
.link_policy
= hdev
->link_policy
;
785 di
.link_mode
= hdev
->link_mode
;
787 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
788 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
790 if (copy_to_user(arg
, &di
, sizeof(di
)))
798 /* ---- Interface to HCI drivers ---- */
800 /* Alloc HCI device */
801 struct hci_dev
*hci_alloc_dev(void)
803 struct hci_dev
*hdev
;
805 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
809 skb_queue_head_init(&hdev
->driver_init
);
813 EXPORT_SYMBOL(hci_alloc_dev
);
815 /* Free HCI device */
816 void hci_free_dev(struct hci_dev
*hdev
)
818 skb_queue_purge(&hdev
->driver_init
);
820 /* will free via device release */
821 put_device(&hdev
->dev
);
823 EXPORT_SYMBOL(hci_free_dev
);
825 /* Register HCI device */
826 int hci_register_dev(struct hci_dev
*hdev
)
828 struct list_head
*head
= &hci_dev_list
, *p
;
831 BT_DBG("%p name %s type %d owner %p", hdev
, hdev
->name
, hdev
->type
, hdev
->owner
);
833 if (!hdev
->open
|| !hdev
->close
|| !hdev
->destruct
)
836 write_lock_bh(&hci_dev_list_lock
);
838 /* Find first available device id */
839 list_for_each(p
, &hci_dev_list
) {
840 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
845 sprintf(hdev
->name
, "hci%d", id
);
847 list_add(&hdev
->list
, head
);
849 atomic_set(&hdev
->refcnt
, 1);
850 spin_lock_init(&hdev
->lock
);
853 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
854 hdev
->link_mode
= (HCI_LM_ACCEPT
);
856 hdev
->idle_timeout
= 0;
857 hdev
->sniff_max_interval
= 800;
858 hdev
->sniff_min_interval
= 80;
860 tasklet_init(&hdev
->cmd_task
, hci_cmd_task
,(unsigned long) hdev
);
861 tasklet_init(&hdev
->rx_task
, hci_rx_task
, (unsigned long) hdev
);
862 tasklet_init(&hdev
->tx_task
, hci_tx_task
, (unsigned long) hdev
);
864 skb_queue_head_init(&hdev
->rx_q
);
865 skb_queue_head_init(&hdev
->cmd_q
);
866 skb_queue_head_init(&hdev
->raw_q
);
868 init_waitqueue_head(&hdev
->req_wait_q
);
869 init_MUTEX(&hdev
->req_lock
);
871 inquiry_cache_init(hdev
);
873 hci_conn_hash_init(hdev
);
875 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
877 atomic_set(&hdev
->promisc
, 0);
879 write_unlock_bh(&hci_dev_list_lock
);
881 hci_register_sysfs(hdev
);
883 hci_notify(hdev
, HCI_DEV_REG
);
887 EXPORT_SYMBOL(hci_register_dev
);
889 /* Unregister HCI device */
890 int hci_unregister_dev(struct hci_dev
*hdev
)
892 BT_DBG("%p name %s type %d", hdev
, hdev
->name
, hdev
->type
);
894 hci_unregister_sysfs(hdev
);
896 write_lock_bh(&hci_dev_list_lock
);
897 list_del(&hdev
->list
);
898 write_unlock_bh(&hci_dev_list_lock
);
900 hci_dev_do_close(hdev
);
902 hci_notify(hdev
, HCI_DEV_UNREG
);
907 EXPORT_SYMBOL(hci_unregister_dev
);
909 /* Suspend HCI device */
910 int hci_suspend_dev(struct hci_dev
*hdev
)
912 hci_notify(hdev
, HCI_DEV_SUSPEND
);
915 EXPORT_SYMBOL(hci_suspend_dev
);
917 /* Resume HCI device */
918 int hci_resume_dev(struct hci_dev
*hdev
)
920 hci_notify(hdev
, HCI_DEV_RESUME
);
923 EXPORT_SYMBOL(hci_resume_dev
);
925 /* ---- Interface to upper protocols ---- */
927 /* Register/Unregister protocols.
928 * hci_task_lock is used to ensure that no tasks are running. */
929 int hci_register_proto(struct hci_proto
*hp
)
933 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
935 if (hp
->id
>= HCI_MAX_PROTO
)
938 write_lock_bh(&hci_task_lock
);
940 if (!hci_proto
[hp
->id
])
941 hci_proto
[hp
->id
] = hp
;
945 write_unlock_bh(&hci_task_lock
);
949 EXPORT_SYMBOL(hci_register_proto
);
951 int hci_unregister_proto(struct hci_proto
*hp
)
955 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
957 if (hp
->id
>= HCI_MAX_PROTO
)
960 write_lock_bh(&hci_task_lock
);
962 if (hci_proto
[hp
->id
])
963 hci_proto
[hp
->id
] = NULL
;
967 write_unlock_bh(&hci_task_lock
);
971 EXPORT_SYMBOL(hci_unregister_proto
);
973 int hci_register_cb(struct hci_cb
*cb
)
975 BT_DBG("%p name %s", cb
, cb
->name
);
977 write_lock_bh(&hci_cb_list_lock
);
978 list_add(&cb
->list
, &hci_cb_list
);
979 write_unlock_bh(&hci_cb_list_lock
);
983 EXPORT_SYMBOL(hci_register_cb
);
985 int hci_unregister_cb(struct hci_cb
*cb
)
987 BT_DBG("%p name %s", cb
, cb
->name
);
989 write_lock_bh(&hci_cb_list_lock
);
991 write_unlock_bh(&hci_cb_list_lock
);
995 EXPORT_SYMBOL(hci_unregister_cb
);
997 static int hci_send_frame(struct sk_buff
*skb
)
999 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1006 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
1008 if (atomic_read(&hdev
->promisc
)) {
1010 __net_timestamp(skb
);
1012 hci_send_to_sock(hdev
, skb
);
1015 /* Get rid of skb owner, prior to sending to the driver. */
1018 return hdev
->send(skb
);
1021 /* Send HCI command */
1022 int hci_send_cmd(struct hci_dev
*hdev
, __u16 ogf
, __u16 ocf
, __u32 plen
, void *param
)
1024 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
1025 struct hci_command_hdr
*hdr
;
1026 struct sk_buff
*skb
;
1028 BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev
->name
, ogf
, ocf
, plen
);
1030 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1032 BT_ERR("%s Can't allocate memory for HCI command", hdev
->name
);
1036 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
1037 hdr
->opcode
= __cpu_to_le16(hci_opcode_pack(ogf
, ocf
));
1041 memcpy(skb_put(skb
, plen
), param
, plen
);
1043 BT_DBG("skb len %d", skb
->len
);
1045 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
1046 skb
->dev
= (void *) hdev
;
1047 skb_queue_tail(&hdev
->cmd_q
, skb
);
1048 hci_sched_cmd(hdev
);
1053 /* Get data from the previously sent command */
1054 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 ogf
, __u16 ocf
)
1056 struct hci_command_hdr
*hdr
;
1058 if (!hdev
->sent_cmd
)
1061 hdr
= (void *) hdev
->sent_cmd
->data
;
1063 if (hdr
->opcode
!= __cpu_to_le16(hci_opcode_pack(ogf
, ocf
)))
1066 BT_DBG("%s ogf 0x%x ocf 0x%x", hdev
->name
, ogf
, ocf
);
1068 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
1072 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
1074 struct hci_acl_hdr
*hdr
;
1077 hdr
= (struct hci_acl_hdr
*) skb_push(skb
, HCI_ACL_HDR_SIZE
);
1078 hdr
->handle
= __cpu_to_le16(hci_handle_pack(handle
, flags
));
1079 hdr
->dlen
= __cpu_to_le16(len
);
1081 skb
->h
.raw
= (void *) hdr
;
1084 int hci_send_acl(struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
)
1086 struct hci_dev
*hdev
= conn
->hdev
;
1087 struct sk_buff
*list
;
1089 BT_DBG("%s conn %p flags 0x%x", hdev
->name
, conn
, flags
);
1091 skb
->dev
= (void *) hdev
;
1092 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1093 hci_add_acl_hdr(skb
, conn
->handle
, flags
| ACL_START
);
1095 if (!(list
= skb_shinfo(skb
)->frag_list
)) {
1096 /* Non fragmented */
1097 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
1099 skb_queue_tail(&conn
->data_q
, skb
);
1102 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1104 skb_shinfo(skb
)->frag_list
= NULL
;
1106 /* Queue all fragments atomically */
1107 spin_lock_bh(&conn
->data_q
.lock
);
1109 __skb_queue_tail(&conn
->data_q
, skb
);
1111 skb
= list
; list
= list
->next
;
1113 skb
->dev
= (void *) hdev
;
1114 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1115 hci_add_acl_hdr(skb
, conn
->handle
, flags
| ACL_CONT
);
1117 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1119 __skb_queue_tail(&conn
->data_q
, skb
);
1122 spin_unlock_bh(&conn
->data_q
.lock
);
1128 EXPORT_SYMBOL(hci_send_acl
);
1131 int hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
1133 struct hci_dev
*hdev
= conn
->hdev
;
1134 struct hci_sco_hdr hdr
;
1136 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
1138 if (skb
->len
> hdev
->sco_mtu
) {
1143 hdr
.handle
= __cpu_to_le16(conn
->handle
);
1144 hdr
.dlen
= skb
->len
;
1146 skb
->h
.raw
= skb_push(skb
, HCI_SCO_HDR_SIZE
);
1147 memcpy(skb
->h
.raw
, &hdr
, HCI_SCO_HDR_SIZE
);
1149 skb
->dev
= (void *) hdev
;
1150 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
1151 skb_queue_tail(&conn
->data_q
, skb
);
1155 EXPORT_SYMBOL(hci_send_sco
);
1157 /* ---- HCI TX task (outgoing data) ---- */
1159 /* HCI Connection scheduler */
1160 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
1162 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1163 struct hci_conn
*conn
= NULL
;
1164 int num
= 0, min
= ~0;
1165 struct list_head
*p
;
1167 /* We don't have to lock device here. Connections are always
1168 * added and removed with TX task disabled. */
1169 list_for_each(p
, &h
->list
) {
1171 c
= list_entry(p
, struct hci_conn
, list
);
1173 if (c
->type
!= type
|| c
->state
!= BT_CONNECTED
1174 || skb_queue_empty(&c
->data_q
))
1178 if (c
->sent
< min
) {
1185 int cnt
= (type
== ACL_LINK
? hdev
->acl_cnt
: hdev
->sco_cnt
);
1191 BT_DBG("conn %p quote %d", conn
, *quote
);
1195 static inline void hci_acl_tx_to(struct hci_dev
*hdev
)
1197 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1198 struct list_head
*p
;
1201 BT_ERR("%s ACL tx timeout", hdev
->name
);
1203 /* Kill stalled connections */
1204 list_for_each(p
, &h
->list
) {
1205 c
= list_entry(p
, struct hci_conn
, list
);
1206 if (c
->type
== ACL_LINK
&& c
->sent
) {
1207 BT_ERR("%s killing stalled ACL connection %s",
1208 hdev
->name
, batostr(&c
->dst
));
1209 hci_acl_disconn(c
, 0x13);
1214 static inline void hci_sched_acl(struct hci_dev
*hdev
)
1216 struct hci_conn
*conn
;
1217 struct sk_buff
*skb
;
1220 BT_DBG("%s", hdev
->name
);
1222 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
1223 /* ACL tx timeout must be longer than maximum
1224 * link supervision timeout (40.9 seconds) */
1225 if (!hdev
->acl_cnt
&& (jiffies
- hdev
->acl_last_tx
) > (HZ
* 45))
1226 hci_acl_tx_to(hdev
);
1229 while (hdev
->acl_cnt
&& (conn
= hci_low_sent(hdev
, ACL_LINK
, "e
))) {
1230 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1231 BT_DBG("skb %p len %d", skb
, skb
->len
);
1233 hci_conn_enter_active_mode(conn
);
1235 hci_send_frame(skb
);
1236 hdev
->acl_last_tx
= jiffies
;
1245 static inline void hci_sched_sco(struct hci_dev
*hdev
)
1247 struct hci_conn
*conn
;
1248 struct sk_buff
*skb
;
1251 BT_DBG("%s", hdev
->name
);
1253 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
1254 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1255 BT_DBG("skb %p len %d", skb
, skb
->len
);
1256 hci_send_frame(skb
);
1259 if (conn
->sent
== ~0)
1265 static void hci_tx_task(unsigned long arg
)
1267 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1268 struct sk_buff
*skb
;
1270 read_lock(&hci_task_lock
);
1272 BT_DBG("%s acl %d sco %d", hdev
->name
, hdev
->acl_cnt
, hdev
->sco_cnt
);
1274 /* Schedule queues and send stuff to HCI driver */
1276 hci_sched_acl(hdev
);
1278 hci_sched_sco(hdev
);
1280 /* Send next queued raw (unknown type) packet */
1281 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
1282 hci_send_frame(skb
);
1284 read_unlock(&hci_task_lock
);
1287 /* ----- HCI RX task (incoming data proccessing) ----- */
1289 /* ACL data packet */
1290 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1292 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
1293 struct hci_conn
*conn
;
1294 __u16 handle
, flags
;
1296 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
1298 handle
= __le16_to_cpu(hdr
->handle
);
1299 flags
= hci_flags(handle
);
1300 handle
= hci_handle(handle
);
1302 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
1304 hdev
->stat
.acl_rx
++;
1307 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1308 hci_dev_unlock(hdev
);
1311 register struct hci_proto
*hp
;
1313 hci_conn_enter_active_mode(conn
);
1315 /* Send to upper protocol */
1316 if ((hp
= hci_proto
[HCI_PROTO_L2CAP
]) && hp
->recv_acldata
) {
1317 hp
->recv_acldata(conn
, skb
, flags
);
1321 BT_ERR("%s ACL packet for unknown connection handle %d",
1322 hdev
->name
, handle
);
1328 /* SCO data packet */
1329 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1331 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
1332 struct hci_conn
*conn
;
1335 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
1337 handle
= __le16_to_cpu(hdr
->handle
);
1339 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
1341 hdev
->stat
.sco_rx
++;
1344 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1345 hci_dev_unlock(hdev
);
1348 register struct hci_proto
*hp
;
1350 /* Send to upper protocol */
1351 if ((hp
= hci_proto
[HCI_PROTO_SCO
]) && hp
->recv_scodata
) {
1352 hp
->recv_scodata(conn
, skb
);
1356 BT_ERR("%s SCO packet for unknown connection handle %d",
1357 hdev
->name
, handle
);
1363 static void hci_rx_task(unsigned long arg
)
1365 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1366 struct sk_buff
*skb
;
1368 BT_DBG("%s", hdev
->name
);
1370 read_lock(&hci_task_lock
);
1372 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
1373 if (atomic_read(&hdev
->promisc
)) {
1374 /* Send copy to the sockets */
1375 hci_send_to_sock(hdev
, skb
);
1378 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
1383 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
1384 /* Don't process data packets in this states. */
1385 switch (bt_cb(skb
)->pkt_type
) {
1386 case HCI_ACLDATA_PKT
:
1387 case HCI_SCODATA_PKT
:
1394 switch (bt_cb(skb
)->pkt_type
) {
1396 hci_event_packet(hdev
, skb
);
1399 case HCI_ACLDATA_PKT
:
1400 BT_DBG("%s ACL data packet", hdev
->name
);
1401 hci_acldata_packet(hdev
, skb
);
1404 case HCI_SCODATA_PKT
:
1405 BT_DBG("%s SCO data packet", hdev
->name
);
1406 hci_scodata_packet(hdev
, skb
);
1415 read_unlock(&hci_task_lock
);
1418 static void hci_cmd_task(unsigned long arg
)
1420 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1421 struct sk_buff
*skb
;
1423 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
1425 if (!atomic_read(&hdev
->cmd_cnt
) && (jiffies
- hdev
->cmd_last_tx
) > HZ
) {
1426 BT_ERR("%s command tx timeout", hdev
->name
);
1427 atomic_set(&hdev
->cmd_cnt
, 1);
1430 /* Send queued commands */
1431 if (atomic_read(&hdev
->cmd_cnt
) && (skb
= skb_dequeue(&hdev
->cmd_q
))) {
1433 kfree_skb(hdev
->sent_cmd
);
1435 if ((hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
))) {
1436 atomic_dec(&hdev
->cmd_cnt
);
1437 hci_send_frame(skb
);
1438 hdev
->cmd_last_tx
= jiffies
;
1440 skb_queue_head(&hdev
->cmd_q
, skb
);
1441 hci_sched_cmd(hdev
);