2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
47 #include <asm/system.h>
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
54 #define AUTO_OFF_TIMEOUT 2000
56 static void hci_cmd_task(unsigned long arg
);
57 static void hci_rx_task(unsigned long arg
);
58 static void hci_tx_task(unsigned long arg
);
60 static DEFINE_RWLOCK(hci_task_lock
);
63 LIST_HEAD(hci_dev_list
);
64 DEFINE_RWLOCK(hci_dev_list_lock
);
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list
);
68 DEFINE_RWLOCK(hci_cb_list_lock
);
71 #define HCI_MAX_PROTO 2
72 struct hci_proto
*hci_proto
[HCI_MAX_PROTO
];
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
77 /* ---- HCI notifications ---- */
79 int hci_register_notifier(struct notifier_block
*nb
)
81 return atomic_notifier_chain_register(&hci_notifier
, nb
);
84 int hci_unregister_notifier(struct notifier_block
*nb
)
86 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
89 static void hci_notify(struct hci_dev
*hdev
, int event
)
91 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
94 /* ---- HCI requests ---- */
96 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
98 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
103 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
)
106 if (hdev
->req_status
== HCI_REQ_PEND
) {
107 hdev
->req_result
= result
;
108 hdev
->req_status
= HCI_REQ_DONE
;
109 wake_up_interruptible(&hdev
->req_wait_q
);
113 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
115 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
117 if (hdev
->req_status
== HCI_REQ_PEND
) {
118 hdev
->req_result
= err
;
119 hdev
->req_status
= HCI_REQ_CANCELED
;
120 wake_up_interruptible(&hdev
->req_wait_q
);
124 /* Execute request and wait for completion. */
125 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
126 unsigned long opt
, __u32 timeout
)
128 DECLARE_WAITQUEUE(wait
, current
);
131 BT_DBG("%s start", hdev
->name
);
133 hdev
->req_status
= HCI_REQ_PEND
;
135 add_wait_queue(&hdev
->req_wait_q
, &wait
);
136 set_current_state(TASK_INTERRUPTIBLE
);
139 schedule_timeout(timeout
);
141 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
143 if (signal_pending(current
))
146 switch (hdev
->req_status
) {
148 err
= -bt_err(hdev
->req_result
);
151 case HCI_REQ_CANCELED
:
152 err
= -hdev
->req_result
;
160 hdev
->req_status
= hdev
->req_result
= 0;
162 BT_DBG("%s end: err %d", hdev
->name
, err
);
167 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
168 unsigned long opt
, __u32 timeout
)
172 if (!test_bit(HCI_UP
, &hdev
->flags
))
175 /* Serialize all requests */
177 ret
= __hci_request(hdev
, req
, opt
, timeout
);
178 hci_req_unlock(hdev
);
183 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
185 BT_DBG("%s %ld", hdev
->name
, opt
);
188 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
191 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
193 struct hci_cp_delete_stored_link_key cp
;
198 BT_DBG("%s %ld", hdev
->name
, opt
);
200 /* Driver initialization */
202 /* Special commands */
203 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
204 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
205 skb
->dev
= (void *) hdev
;
207 skb_queue_tail(&hdev
->cmd_q
, skb
);
208 tasklet_schedule(&hdev
->cmd_task
);
210 skb_queue_purge(&hdev
->driver_init
);
212 /* Mandatory initialization */
215 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
))
216 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
218 /* Read Local Supported Features */
219 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
221 /* Read Local Version */
222 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
225 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
228 /* Host buffer size */
230 struct hci_cp_host_buffer_size cp
;
231 cp
.acl_mtu
= cpu_to_le16(HCI_MAX_ACL_SIZE
);
232 cp
.sco_mtu
= HCI_MAX_SCO_SIZE
;
233 cp
.acl_max_pkt
= cpu_to_le16(0xffff);
234 cp
.sco_max_pkt
= cpu_to_le16(0xffff);
235 hci_send_cmd(hdev
, HCI_OP_HOST_BUFFER_SIZE
, sizeof(cp
), &cp
);
239 /* Read BD Address */
240 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
242 /* Read Class of Device */
243 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
245 /* Read Local Name */
246 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
248 /* Read Voice Setting */
249 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
251 /* Optional initialization */
253 /* Clear Event Filters */
254 flt_type
= HCI_FLT_CLEAR_ALL
;
255 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
257 /* Connection accept timeout ~20 secs */
258 param
= cpu_to_le16(0x7d00);
259 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
261 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
263 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
266 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
268 BT_DBG("%s", hdev
->name
);
270 /* Read LE buffer size */
271 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
274 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
278 BT_DBG("%s %x", hdev
->name
, scan
);
280 /* Inquiry and Page scans */
281 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
284 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
288 BT_DBG("%s %x", hdev
->name
, auth
);
291 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
294 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
298 BT_DBG("%s %x", hdev
->name
, encrypt
);
301 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
304 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
306 __le16 policy
= cpu_to_le16(opt
);
308 BT_DBG("%s %x", hdev
->name
, policy
);
310 /* Default link policy */
311 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
314 /* Get HCI device by index.
315 * Device is held on return. */
316 struct hci_dev
*hci_dev_get(int index
)
318 struct hci_dev
*hdev
= NULL
;
326 read_lock(&hci_dev_list_lock
);
327 list_for_each(p
, &hci_dev_list
) {
328 struct hci_dev
*d
= list_entry(p
, struct hci_dev
, list
);
329 if (d
->id
== index
) {
330 hdev
= hci_dev_hold(d
);
334 read_unlock(&hci_dev_list_lock
);
338 /* ---- Inquiry support ---- */
339 static void inquiry_cache_flush(struct hci_dev
*hdev
)
341 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
342 struct inquiry_entry
*next
= cache
->list
, *e
;
344 BT_DBG("cache %p", cache
);
353 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
355 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
356 struct inquiry_entry
*e
;
358 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
360 for (e
= cache
->list
; e
; e
= e
->next
)
361 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
366 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
)
368 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
369 struct inquiry_entry
*ie
;
371 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
373 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
375 /* Entry not in the cache. Add new one. */
376 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
380 ie
->next
= cache
->list
;
384 memcpy(&ie
->data
, data
, sizeof(*data
));
385 ie
->timestamp
= jiffies
;
386 cache
->timestamp
= jiffies
;
389 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
391 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
392 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
393 struct inquiry_entry
*e
;
396 for (e
= cache
->list
; e
&& copied
< num
; e
= e
->next
, copied
++) {
397 struct inquiry_data
*data
= &e
->data
;
398 bacpy(&info
->bdaddr
, &data
->bdaddr
);
399 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
400 info
->pscan_period_mode
= data
->pscan_period_mode
;
401 info
->pscan_mode
= data
->pscan_mode
;
402 memcpy(info
->dev_class
, data
->dev_class
, 3);
403 info
->clock_offset
= data
->clock_offset
;
407 BT_DBG("cache %p, copied %d", cache
, copied
);
411 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
413 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
414 struct hci_cp_inquiry cp
;
416 BT_DBG("%s", hdev
->name
);
418 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
422 memcpy(&cp
.lap
, &ir
->lap
, 3);
423 cp
.length
= ir
->length
;
424 cp
.num_rsp
= ir
->num_rsp
;
425 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
428 int hci_inquiry(void __user
*arg
)
430 __u8 __user
*ptr
= arg
;
431 struct hci_inquiry_req ir
;
432 struct hci_dev
*hdev
;
433 int err
= 0, do_inquiry
= 0, max_rsp
;
437 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
440 hdev
= hci_dev_get(ir
.dev_id
);
444 hci_dev_lock_bh(hdev
);
445 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
446 inquiry_cache_empty(hdev
) ||
447 ir
.flags
& IREQ_CACHE_FLUSH
) {
448 inquiry_cache_flush(hdev
);
451 hci_dev_unlock_bh(hdev
);
453 timeo
= ir
.length
* msecs_to_jiffies(2000);
456 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
461 /* for unlimited number of responses we will use buffer with 255 entries */
462 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
464 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
465 * copy it to the user space.
467 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
473 hci_dev_lock_bh(hdev
);
474 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
475 hci_dev_unlock_bh(hdev
);
477 BT_DBG("num_rsp %d", ir
.num_rsp
);
479 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
481 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
494 /* ---- HCI ioctl helpers ---- */
496 int hci_dev_open(__u16 dev
)
498 struct hci_dev
*hdev
;
501 hdev
= hci_dev_get(dev
);
505 BT_DBG("%s %p", hdev
->name
, hdev
);
509 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
514 if (test_bit(HCI_UP
, &hdev
->flags
)) {
519 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
520 set_bit(HCI_RAW
, &hdev
->flags
);
522 /* Treat all non BR/EDR controllers as raw devices for now */
523 if (hdev
->dev_type
!= HCI_BREDR
)
524 set_bit(HCI_RAW
, &hdev
->flags
);
526 if (hdev
->open(hdev
)) {
531 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
532 atomic_set(&hdev
->cmd_cnt
, 1);
533 set_bit(HCI_INIT
, &hdev
->flags
);
534 hdev
->init_last_cmd
= 0;
536 ret
= __hci_request(hdev
, hci_init_req
, 0,
537 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
539 if (lmp_le_capable(hdev
))
540 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
541 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
543 clear_bit(HCI_INIT
, &hdev
->flags
);
548 set_bit(HCI_UP
, &hdev
->flags
);
549 hci_notify(hdev
, HCI_DEV_UP
);
550 if (!test_bit(HCI_SETUP
, &hdev
->flags
))
551 mgmt_powered(hdev
->id
, 1);
553 /* Init failed, cleanup */
554 tasklet_kill(&hdev
->rx_task
);
555 tasklet_kill(&hdev
->tx_task
);
556 tasklet_kill(&hdev
->cmd_task
);
558 skb_queue_purge(&hdev
->cmd_q
);
559 skb_queue_purge(&hdev
->rx_q
);
564 if (hdev
->sent_cmd
) {
565 kfree_skb(hdev
->sent_cmd
);
566 hdev
->sent_cmd
= NULL
;
574 hci_req_unlock(hdev
);
579 static int hci_dev_do_close(struct hci_dev
*hdev
)
581 BT_DBG("%s %p", hdev
->name
, hdev
);
583 hci_req_cancel(hdev
, ENODEV
);
586 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
587 hci_req_unlock(hdev
);
591 /* Kill RX and TX tasks */
592 tasklet_kill(&hdev
->rx_task
);
593 tasklet_kill(&hdev
->tx_task
);
595 hci_dev_lock_bh(hdev
);
596 inquiry_cache_flush(hdev
);
597 hci_conn_hash_flush(hdev
);
598 hci_dev_unlock_bh(hdev
);
600 hci_notify(hdev
, HCI_DEV_DOWN
);
606 skb_queue_purge(&hdev
->cmd_q
);
607 atomic_set(&hdev
->cmd_cnt
, 1);
608 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
609 set_bit(HCI_INIT
, &hdev
->flags
);
610 __hci_request(hdev
, hci_reset_req
, 0,
611 msecs_to_jiffies(250));
612 clear_bit(HCI_INIT
, &hdev
->flags
);
616 tasklet_kill(&hdev
->cmd_task
);
619 skb_queue_purge(&hdev
->rx_q
);
620 skb_queue_purge(&hdev
->cmd_q
);
621 skb_queue_purge(&hdev
->raw_q
);
623 /* Drop last sent command */
624 if (hdev
->sent_cmd
) {
625 del_timer_sync(&hdev
->cmd_timer
);
626 kfree_skb(hdev
->sent_cmd
);
627 hdev
->sent_cmd
= NULL
;
630 /* After this point our queues are empty
631 * and no tasks are scheduled. */
634 mgmt_powered(hdev
->id
, 0);
639 hci_req_unlock(hdev
);
645 int hci_dev_close(__u16 dev
)
647 struct hci_dev
*hdev
;
650 hdev
= hci_dev_get(dev
);
653 err
= hci_dev_do_close(hdev
);
658 int hci_dev_reset(__u16 dev
)
660 struct hci_dev
*hdev
;
663 hdev
= hci_dev_get(dev
);
668 tasklet_disable(&hdev
->tx_task
);
670 if (!test_bit(HCI_UP
, &hdev
->flags
))
674 skb_queue_purge(&hdev
->rx_q
);
675 skb_queue_purge(&hdev
->cmd_q
);
677 hci_dev_lock_bh(hdev
);
678 inquiry_cache_flush(hdev
);
679 hci_conn_hash_flush(hdev
);
680 hci_dev_unlock_bh(hdev
);
685 atomic_set(&hdev
->cmd_cnt
, 1);
686 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
688 if (!test_bit(HCI_RAW
, &hdev
->flags
))
689 ret
= __hci_request(hdev
, hci_reset_req
, 0,
690 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
693 tasklet_enable(&hdev
->tx_task
);
694 hci_req_unlock(hdev
);
699 int hci_dev_reset_stat(__u16 dev
)
701 struct hci_dev
*hdev
;
704 hdev
= hci_dev_get(dev
);
708 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
715 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
717 struct hci_dev
*hdev
;
718 struct hci_dev_req dr
;
721 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
724 hdev
= hci_dev_get(dr
.dev_id
);
730 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
731 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
735 if (!lmp_encrypt_capable(hdev
)) {
740 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
741 /* Auth must be enabled first */
742 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
743 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
748 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
749 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
753 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
754 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
758 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
759 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
763 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
764 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
768 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
772 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
773 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
777 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
778 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
790 int hci_get_dev_list(void __user
*arg
)
792 struct hci_dev_list_req
*dl
;
793 struct hci_dev_req
*dr
;
795 int n
= 0, size
, err
;
798 if (get_user(dev_num
, (__u16 __user
*) arg
))
801 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
804 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
806 dl
= kzalloc(size
, GFP_KERNEL
);
812 read_lock_bh(&hci_dev_list_lock
);
813 list_for_each(p
, &hci_dev_list
) {
814 struct hci_dev
*hdev
;
816 hdev
= list_entry(p
, struct hci_dev
, list
);
818 hci_del_off_timer(hdev
);
820 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
821 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
823 (dr
+ n
)->dev_id
= hdev
->id
;
824 (dr
+ n
)->dev_opt
= hdev
->flags
;
829 read_unlock_bh(&hci_dev_list_lock
);
832 size
= sizeof(*dl
) + n
* sizeof(*dr
);
834 err
= copy_to_user(arg
, dl
, size
);
837 return err
? -EFAULT
: 0;
840 int hci_get_dev_info(void __user
*arg
)
842 struct hci_dev
*hdev
;
843 struct hci_dev_info di
;
846 if (copy_from_user(&di
, arg
, sizeof(di
)))
849 hdev
= hci_dev_get(di
.dev_id
);
853 hci_del_off_timer(hdev
);
855 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
856 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
858 strcpy(di
.name
, hdev
->name
);
859 di
.bdaddr
= hdev
->bdaddr
;
860 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
861 di
.flags
= hdev
->flags
;
862 di
.pkt_type
= hdev
->pkt_type
;
863 di
.acl_mtu
= hdev
->acl_mtu
;
864 di
.acl_pkts
= hdev
->acl_pkts
;
865 di
.sco_mtu
= hdev
->sco_mtu
;
866 di
.sco_pkts
= hdev
->sco_pkts
;
867 di
.link_policy
= hdev
->link_policy
;
868 di
.link_mode
= hdev
->link_mode
;
870 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
871 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
873 if (copy_to_user(arg
, &di
, sizeof(di
)))
881 /* ---- Interface to HCI drivers ---- */
883 static int hci_rfkill_set_block(void *data
, bool blocked
)
885 struct hci_dev
*hdev
= data
;
887 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
892 hci_dev_do_close(hdev
);
897 static const struct rfkill_ops hci_rfkill_ops
= {
898 .set_block
= hci_rfkill_set_block
,
901 /* Alloc HCI device */
902 struct hci_dev
*hci_alloc_dev(void)
904 struct hci_dev
*hdev
;
906 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
910 skb_queue_head_init(&hdev
->driver_init
);
914 EXPORT_SYMBOL(hci_alloc_dev
);
916 /* Free HCI device */
917 void hci_free_dev(struct hci_dev
*hdev
)
919 skb_queue_purge(&hdev
->driver_init
);
921 /* will free via device release */
922 put_device(&hdev
->dev
);
924 EXPORT_SYMBOL(hci_free_dev
);
926 static void hci_power_on(struct work_struct
*work
)
928 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
930 BT_DBG("%s", hdev
->name
);
932 if (hci_dev_open(hdev
->id
) < 0)
935 if (test_bit(HCI_AUTO_OFF
, &hdev
->flags
))
936 mod_timer(&hdev
->off_timer
,
937 jiffies
+ msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
939 if (test_and_clear_bit(HCI_SETUP
, &hdev
->flags
))
940 mgmt_index_added(hdev
->id
);
943 static void hci_power_off(struct work_struct
*work
)
945 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_off
);
947 BT_DBG("%s", hdev
->name
);
949 hci_dev_close(hdev
->id
);
952 static void hci_auto_off(unsigned long data
)
954 struct hci_dev
*hdev
= (struct hci_dev
*) data
;
956 BT_DBG("%s", hdev
->name
);
958 clear_bit(HCI_AUTO_OFF
, &hdev
->flags
);
960 queue_work(hdev
->workqueue
, &hdev
->power_off
);
963 void hci_del_off_timer(struct hci_dev
*hdev
)
965 BT_DBG("%s", hdev
->name
);
967 clear_bit(HCI_AUTO_OFF
, &hdev
->flags
);
968 del_timer(&hdev
->off_timer
);
971 int hci_uuids_clear(struct hci_dev
*hdev
)
973 struct list_head
*p
, *n
;
975 list_for_each_safe(p
, n
, &hdev
->uuids
) {
976 struct bt_uuid
*uuid
;
978 uuid
= list_entry(p
, struct bt_uuid
, list
);
987 int hci_link_keys_clear(struct hci_dev
*hdev
)
989 struct list_head
*p
, *n
;
991 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
992 struct link_key
*key
;
994 key
= list_entry(p
, struct link_key
, list
);
1003 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1005 struct list_head
*p
;
1007 list_for_each(p
, &hdev
->link_keys
) {
1010 k
= list_entry(p
, struct link_key
, list
);
1012 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1019 int hci_add_link_key(struct hci_dev
*hdev
, int new_key
, bdaddr_t
*bdaddr
,
1020 u8
*val
, u8 type
, u8 pin_len
)
1022 struct link_key
*key
, *old_key
;
1025 old_key
= hci_find_link_key(hdev
, bdaddr
);
1027 old_key_type
= old_key
->type
;
1030 old_key_type
= 0xff;
1031 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1034 list_add(&key
->list
, &hdev
->link_keys
);
1037 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1039 bacpy(&key
->bdaddr
, bdaddr
);
1040 memcpy(key
->val
, val
, 16);
1042 key
->pin_len
= pin_len
;
1045 mgmt_new_key(hdev
->id
, key
, old_key_type
);
1048 key
->type
= old_key_type
;
1053 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1055 struct link_key
*key
;
1057 key
= hci_find_link_key(hdev
, bdaddr
);
1061 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1063 list_del(&key
->list
);
1069 /* HCI command timer function */
1070 static void hci_cmd_timer(unsigned long arg
)
1072 struct hci_dev
*hdev
= (void *) arg
;
1074 BT_ERR("%s command tx timeout", hdev
->name
);
1075 atomic_set(&hdev
->cmd_cnt
, 1);
1076 tasklet_schedule(&hdev
->cmd_task
);
1079 /* Register HCI device */
1080 int hci_register_dev(struct hci_dev
*hdev
)
1082 struct list_head
*head
= &hci_dev_list
, *p
;
1085 BT_DBG("%p name %s bus %d owner %p", hdev
, hdev
->name
,
1086 hdev
->bus
, hdev
->owner
);
1088 if (!hdev
->open
|| !hdev
->close
|| !hdev
->destruct
)
1091 write_lock_bh(&hci_dev_list_lock
);
1093 /* Find first available device id */
1094 list_for_each(p
, &hci_dev_list
) {
1095 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
1100 sprintf(hdev
->name
, "hci%d", id
);
1102 list_add(&hdev
->list
, head
);
1104 atomic_set(&hdev
->refcnt
, 1);
1105 spin_lock_init(&hdev
->lock
);
1108 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1109 hdev
->esco_type
= (ESCO_HV1
);
1110 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1111 hdev
->io_capability
= 0x03; /* No Input No Output */
1113 hdev
->idle_timeout
= 0;
1114 hdev
->sniff_max_interval
= 800;
1115 hdev
->sniff_min_interval
= 80;
1117 tasklet_init(&hdev
->cmd_task
, hci_cmd_task
, (unsigned long) hdev
);
1118 tasklet_init(&hdev
->rx_task
, hci_rx_task
, (unsigned long) hdev
);
1119 tasklet_init(&hdev
->tx_task
, hci_tx_task
, (unsigned long) hdev
);
1121 skb_queue_head_init(&hdev
->rx_q
);
1122 skb_queue_head_init(&hdev
->cmd_q
);
1123 skb_queue_head_init(&hdev
->raw_q
);
1125 setup_timer(&hdev
->cmd_timer
, hci_cmd_timer
, (unsigned long) hdev
);
1127 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1128 hdev
->reassembly
[i
] = NULL
;
1130 init_waitqueue_head(&hdev
->req_wait_q
);
1131 mutex_init(&hdev
->req_lock
);
1133 inquiry_cache_init(hdev
);
1135 hci_conn_hash_init(hdev
);
1137 INIT_LIST_HEAD(&hdev
->blacklist
);
1139 INIT_LIST_HEAD(&hdev
->uuids
);
1141 INIT_LIST_HEAD(&hdev
->link_keys
);
1143 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1144 INIT_WORK(&hdev
->power_off
, hci_power_off
);
1145 setup_timer(&hdev
->off_timer
, hci_auto_off
, (unsigned long) hdev
);
1147 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1149 atomic_set(&hdev
->promisc
, 0);
1151 write_unlock_bh(&hci_dev_list_lock
);
1153 hdev
->workqueue
= create_singlethread_workqueue(hdev
->name
);
1154 if (!hdev
->workqueue
)
1157 hci_register_sysfs(hdev
);
1159 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1160 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
1162 if (rfkill_register(hdev
->rfkill
) < 0) {
1163 rfkill_destroy(hdev
->rfkill
);
1164 hdev
->rfkill
= NULL
;
1168 set_bit(HCI_AUTO_OFF
, &hdev
->flags
);
1169 set_bit(HCI_SETUP
, &hdev
->flags
);
1170 queue_work(hdev
->workqueue
, &hdev
->power_on
);
1172 hci_notify(hdev
, HCI_DEV_REG
);
1177 write_lock_bh(&hci_dev_list_lock
);
1178 list_del(&hdev
->list
);
1179 write_unlock_bh(&hci_dev_list_lock
);
1183 EXPORT_SYMBOL(hci_register_dev
);
1185 /* Unregister HCI device */
1186 int hci_unregister_dev(struct hci_dev
*hdev
)
1190 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1192 write_lock_bh(&hci_dev_list_lock
);
1193 list_del(&hdev
->list
);
1194 write_unlock_bh(&hci_dev_list_lock
);
1196 hci_dev_do_close(hdev
);
1198 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1199 kfree_skb(hdev
->reassembly
[i
]);
1201 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1202 !test_bit(HCI_SETUP
, &hdev
->flags
))
1203 mgmt_index_removed(hdev
->id
);
1205 hci_notify(hdev
, HCI_DEV_UNREG
);
1208 rfkill_unregister(hdev
->rfkill
);
1209 rfkill_destroy(hdev
->rfkill
);
1212 hci_unregister_sysfs(hdev
);
1214 hci_del_off_timer(hdev
);
1216 destroy_workqueue(hdev
->workqueue
);
1218 hci_dev_lock_bh(hdev
);
1219 hci_blacklist_clear(hdev
);
1220 hci_uuids_clear(hdev
);
1221 hci_link_keys_clear(hdev
);
1222 hci_dev_unlock_bh(hdev
);
1224 __hci_dev_put(hdev
);
1228 EXPORT_SYMBOL(hci_unregister_dev
);
1230 /* Suspend HCI device */
1231 int hci_suspend_dev(struct hci_dev
*hdev
)
1233 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1236 EXPORT_SYMBOL(hci_suspend_dev
);
1238 /* Resume HCI device */
1239 int hci_resume_dev(struct hci_dev
*hdev
)
1241 hci_notify(hdev
, HCI_DEV_RESUME
);
1244 EXPORT_SYMBOL(hci_resume_dev
);
1246 /* Receive frame from HCI drivers */
1247 int hci_recv_frame(struct sk_buff
*skb
)
1249 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1250 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1251 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1257 bt_cb(skb
)->incoming
= 1;
1260 __net_timestamp(skb
);
1262 /* Queue frame for rx task */
1263 skb_queue_tail(&hdev
->rx_q
, skb
);
1264 tasklet_schedule(&hdev
->rx_task
);
1268 EXPORT_SYMBOL(hci_recv_frame
);
1270 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1271 int count
, __u8 index
, gfp_t gfp_mask
)
1276 struct sk_buff
*skb
;
1277 struct bt_skb_cb
*scb
;
1279 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1280 index
>= NUM_REASSEMBLY
)
1283 skb
= hdev
->reassembly
[index
];
1287 case HCI_ACLDATA_PKT
:
1288 len
= HCI_MAX_FRAME_SIZE
;
1289 hlen
= HCI_ACL_HDR_SIZE
;
1292 len
= HCI_MAX_EVENT_SIZE
;
1293 hlen
= HCI_EVENT_HDR_SIZE
;
1295 case HCI_SCODATA_PKT
:
1296 len
= HCI_MAX_SCO_SIZE
;
1297 hlen
= HCI_SCO_HDR_SIZE
;
1301 skb
= bt_skb_alloc(len
, gfp_mask
);
1305 scb
= (void *) skb
->cb
;
1307 scb
->pkt_type
= type
;
1309 skb
->dev
= (void *) hdev
;
1310 hdev
->reassembly
[index
] = skb
;
1314 scb
= (void *) skb
->cb
;
1315 len
= min(scb
->expect
, (__u16
)count
);
1317 memcpy(skb_put(skb
, len
), data
, len
);
1326 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1327 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1328 scb
->expect
= h
->plen
;
1330 if (skb_tailroom(skb
) < scb
->expect
) {
1332 hdev
->reassembly
[index
] = NULL
;
1338 case HCI_ACLDATA_PKT
:
1339 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1340 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1341 scb
->expect
= __le16_to_cpu(h
->dlen
);
1343 if (skb_tailroom(skb
) < scb
->expect
) {
1345 hdev
->reassembly
[index
] = NULL
;
1351 case HCI_SCODATA_PKT
:
1352 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1353 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1354 scb
->expect
= h
->dlen
;
1356 if (skb_tailroom(skb
) < scb
->expect
) {
1358 hdev
->reassembly
[index
] = NULL
;
1365 if (scb
->expect
== 0) {
1366 /* Complete frame */
1368 bt_cb(skb
)->pkt_type
= type
;
1369 hci_recv_frame(skb
);
1371 hdev
->reassembly
[index
] = NULL
;
1379 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1383 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1387 rem
= hci_reassembly(hdev
, type
, data
, count
,
1388 type
- 1, GFP_ATOMIC
);
1392 data
+= (count
- rem
);
1398 EXPORT_SYMBOL(hci_recv_fragment
);
1400 #define STREAM_REASSEMBLY 0
1402 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
1408 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
1411 struct { char type
; } *pkt
;
1413 /* Start of the frame */
1420 type
= bt_cb(skb
)->pkt_type
;
1422 rem
= hci_reassembly(hdev
, type
, data
,
1423 count
, STREAM_REASSEMBLY
, GFP_ATOMIC
);
1427 data
+= (count
- rem
);
1433 EXPORT_SYMBOL(hci_recv_stream_fragment
);
1435 /* ---- Interface to upper protocols ---- */
1437 /* Register/Unregister protocols.
1438 * hci_task_lock is used to ensure that no tasks are running. */
1439 int hci_register_proto(struct hci_proto
*hp
)
1443 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1445 if (hp
->id
>= HCI_MAX_PROTO
)
1448 write_lock_bh(&hci_task_lock
);
1450 if (!hci_proto
[hp
->id
])
1451 hci_proto
[hp
->id
] = hp
;
1455 write_unlock_bh(&hci_task_lock
);
1459 EXPORT_SYMBOL(hci_register_proto
);
1461 int hci_unregister_proto(struct hci_proto
*hp
)
1465 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1467 if (hp
->id
>= HCI_MAX_PROTO
)
1470 write_lock_bh(&hci_task_lock
);
1472 if (hci_proto
[hp
->id
])
1473 hci_proto
[hp
->id
] = NULL
;
1477 write_unlock_bh(&hci_task_lock
);
1481 EXPORT_SYMBOL(hci_unregister_proto
);
1483 int hci_register_cb(struct hci_cb
*cb
)
1485 BT_DBG("%p name %s", cb
, cb
->name
);
1487 write_lock_bh(&hci_cb_list_lock
);
1488 list_add(&cb
->list
, &hci_cb_list
);
1489 write_unlock_bh(&hci_cb_list_lock
);
1493 EXPORT_SYMBOL(hci_register_cb
);
1495 int hci_unregister_cb(struct hci_cb
*cb
)
1497 BT_DBG("%p name %s", cb
, cb
->name
);
1499 write_lock_bh(&hci_cb_list_lock
);
1500 list_del(&cb
->list
);
1501 write_unlock_bh(&hci_cb_list_lock
);
1505 EXPORT_SYMBOL(hci_unregister_cb
);
1507 static int hci_send_frame(struct sk_buff
*skb
)
1509 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1516 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
1518 if (atomic_read(&hdev
->promisc
)) {
1520 __net_timestamp(skb
);
1522 hci_send_to_sock(hdev
, skb
, NULL
);
1525 /* Get rid of skb owner, prior to sending to the driver. */
1528 return hdev
->send(skb
);
1531 /* Send HCI command */
1532 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
1534 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
1535 struct hci_command_hdr
*hdr
;
1536 struct sk_buff
*skb
;
1538 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
1540 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1542 BT_ERR("%s no memory for command", hdev
->name
);
1546 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
1547 hdr
->opcode
= cpu_to_le16(opcode
);
1551 memcpy(skb_put(skb
, plen
), param
, plen
);
1553 BT_DBG("skb len %d", skb
->len
);
1555 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
1556 skb
->dev
= (void *) hdev
;
1558 if (test_bit(HCI_INIT
, &hdev
->flags
))
1559 hdev
->init_last_cmd
= opcode
;
1561 skb_queue_tail(&hdev
->cmd_q
, skb
);
1562 tasklet_schedule(&hdev
->cmd_task
);
1567 /* Get data from the previously sent command */
1568 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
1570 struct hci_command_hdr
*hdr
;
1572 if (!hdev
->sent_cmd
)
1575 hdr
= (void *) hdev
->sent_cmd
->data
;
1577 if (hdr
->opcode
!= cpu_to_le16(opcode
))
1580 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
1582 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
1586 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
1588 struct hci_acl_hdr
*hdr
;
1591 skb_push(skb
, HCI_ACL_HDR_SIZE
);
1592 skb_reset_transport_header(skb
);
1593 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
1594 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
1595 hdr
->dlen
= cpu_to_le16(len
);
1598 void hci_send_acl(struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
)
1600 struct hci_dev
*hdev
= conn
->hdev
;
1601 struct sk_buff
*list
;
1603 BT_DBG("%s conn %p flags 0x%x", hdev
->name
, conn
, flags
);
1605 skb
->dev
= (void *) hdev
;
1606 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1607 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
1609 list
= skb_shinfo(skb
)->frag_list
;
1611 /* Non fragmented */
1612 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
1614 skb_queue_tail(&conn
->data_q
, skb
);
1617 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1619 skb_shinfo(skb
)->frag_list
= NULL
;
1621 /* Queue all fragments atomically */
1622 spin_lock_bh(&conn
->data_q
.lock
);
1624 __skb_queue_tail(&conn
->data_q
, skb
);
1626 flags
&= ~ACL_START
;
1629 skb
= list
; list
= list
->next
;
1631 skb
->dev
= (void *) hdev
;
1632 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1633 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
1635 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1637 __skb_queue_tail(&conn
->data_q
, skb
);
1640 spin_unlock_bh(&conn
->data_q
.lock
);
1643 tasklet_schedule(&hdev
->tx_task
);
1645 EXPORT_SYMBOL(hci_send_acl
);
1648 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
1650 struct hci_dev
*hdev
= conn
->hdev
;
1651 struct hci_sco_hdr hdr
;
1653 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
1655 hdr
.handle
= cpu_to_le16(conn
->handle
);
1656 hdr
.dlen
= skb
->len
;
1658 skb_push(skb
, HCI_SCO_HDR_SIZE
);
1659 skb_reset_transport_header(skb
);
1660 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
1662 skb
->dev
= (void *) hdev
;
1663 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
1665 skb_queue_tail(&conn
->data_q
, skb
);
1666 tasklet_schedule(&hdev
->tx_task
);
1668 EXPORT_SYMBOL(hci_send_sco
);
1670 /* ---- HCI TX task (outgoing data) ---- */
1672 /* HCI Connection scheduler */
1673 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
1675 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1676 struct hci_conn
*conn
= NULL
;
1677 int num
= 0, min
= ~0;
1678 struct list_head
*p
;
1680 /* We don't have to lock device here. Connections are always
1681 * added and removed with TX task disabled. */
1682 list_for_each(p
, &h
->list
) {
1684 c
= list_entry(p
, struct hci_conn
, list
);
1686 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
1689 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
1694 if (c
->sent
< min
) {
1703 switch (conn
->type
) {
1705 cnt
= hdev
->acl_cnt
;
1709 cnt
= hdev
->sco_cnt
;
1712 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
1716 BT_ERR("Unknown link type");
1724 BT_DBG("conn %p quote %d", conn
, *quote
);
1728 static inline void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
1730 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1731 struct list_head
*p
;
1734 BT_ERR("%s link tx timeout", hdev
->name
);
1736 /* Kill stalled connections */
1737 list_for_each(p
, &h
->list
) {
1738 c
= list_entry(p
, struct hci_conn
, list
);
1739 if (c
->type
== type
&& c
->sent
) {
1740 BT_ERR("%s killing stalled connection %s",
1741 hdev
->name
, batostr(&c
->dst
));
1742 hci_acl_disconn(c
, 0x13);
1747 static inline void hci_sched_acl(struct hci_dev
*hdev
)
1749 struct hci_conn
*conn
;
1750 struct sk_buff
*skb
;
1753 BT_DBG("%s", hdev
->name
);
1755 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
1756 /* ACL tx timeout must be longer than maximum
1757 * link supervision timeout (40.9 seconds) */
1758 if (!hdev
->acl_cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+ HZ
* 45))
1759 hci_link_tx_to(hdev
, ACL_LINK
);
1762 while (hdev
->acl_cnt
&& (conn
= hci_low_sent(hdev
, ACL_LINK
, "e
))) {
1763 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1764 BT_DBG("skb %p len %d", skb
, skb
->len
);
1766 hci_conn_enter_active_mode(conn
);
1768 hci_send_frame(skb
);
1769 hdev
->acl_last_tx
= jiffies
;
1778 static inline void hci_sched_sco(struct hci_dev
*hdev
)
1780 struct hci_conn
*conn
;
1781 struct sk_buff
*skb
;
1784 BT_DBG("%s", hdev
->name
);
1786 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
1787 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1788 BT_DBG("skb %p len %d", skb
, skb
->len
);
1789 hci_send_frame(skb
);
1792 if (conn
->sent
== ~0)
1798 static inline void hci_sched_esco(struct hci_dev
*hdev
)
1800 struct hci_conn
*conn
;
1801 struct sk_buff
*skb
;
1804 BT_DBG("%s", hdev
->name
);
1806 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
1807 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1808 BT_DBG("skb %p len %d", skb
, skb
->len
);
1809 hci_send_frame(skb
);
1812 if (conn
->sent
== ~0)
1818 static inline void hci_sched_le(struct hci_dev
*hdev
)
1820 struct hci_conn
*conn
;
1821 struct sk_buff
*skb
;
1824 BT_DBG("%s", hdev
->name
);
1826 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
1827 /* LE tx timeout must be longer than maximum
1828 * link supervision timeout (40.9 seconds) */
1829 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
1830 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
1831 hci_link_tx_to(hdev
, LE_LINK
);
1834 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
1835 while (cnt
&& (conn
= hci_low_sent(hdev
, LE_LINK
, "e
))) {
1836 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1837 BT_DBG("skb %p len %d", skb
, skb
->len
);
1839 hci_send_frame(skb
);
1840 hdev
->le_last_tx
= jiffies
;
1849 hdev
->acl_cnt
= cnt
;
1852 static void hci_tx_task(unsigned long arg
)
1854 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1855 struct sk_buff
*skb
;
1857 read_lock(&hci_task_lock
);
1859 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
1860 hdev
->sco_cnt
, hdev
->le_cnt
);
1862 /* Schedule queues and send stuff to HCI driver */
1864 hci_sched_acl(hdev
);
1866 hci_sched_sco(hdev
);
1868 hci_sched_esco(hdev
);
1872 /* Send next queued raw (unknown type) packet */
1873 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
1874 hci_send_frame(skb
);
1876 read_unlock(&hci_task_lock
);
1879 /* ----- HCI RX task (incoming data proccessing) ----- */
1881 /* ACL data packet */
1882 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1884 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
1885 struct hci_conn
*conn
;
1886 __u16 handle
, flags
;
1888 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
1890 handle
= __le16_to_cpu(hdr
->handle
);
1891 flags
= hci_flags(handle
);
1892 handle
= hci_handle(handle
);
1894 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
1896 hdev
->stat
.acl_rx
++;
1899 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1900 hci_dev_unlock(hdev
);
1903 register struct hci_proto
*hp
;
1905 hci_conn_enter_active_mode(conn
);
1907 /* Send to upper protocol */
1908 hp
= hci_proto
[HCI_PROTO_L2CAP
];
1909 if (hp
&& hp
->recv_acldata
) {
1910 hp
->recv_acldata(conn
, skb
, flags
);
1914 BT_ERR("%s ACL packet for unknown connection handle %d",
1915 hdev
->name
, handle
);
1921 /* SCO data packet */
1922 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1924 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
1925 struct hci_conn
*conn
;
1928 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
1930 handle
= __le16_to_cpu(hdr
->handle
);
1932 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
1934 hdev
->stat
.sco_rx
++;
1937 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1938 hci_dev_unlock(hdev
);
1941 register struct hci_proto
*hp
;
1943 /* Send to upper protocol */
1944 hp
= hci_proto
[HCI_PROTO_SCO
];
1945 if (hp
&& hp
->recv_scodata
) {
1946 hp
->recv_scodata(conn
, skb
);
1950 BT_ERR("%s SCO packet for unknown connection handle %d",
1951 hdev
->name
, handle
);
1957 static void hci_rx_task(unsigned long arg
)
1959 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1960 struct sk_buff
*skb
;
1962 BT_DBG("%s", hdev
->name
);
1964 read_lock(&hci_task_lock
);
1966 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
1967 if (atomic_read(&hdev
->promisc
)) {
1968 /* Send copy to the sockets */
1969 hci_send_to_sock(hdev
, skb
, NULL
);
1972 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
1977 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
1978 /* Don't process data packets in this states. */
1979 switch (bt_cb(skb
)->pkt_type
) {
1980 case HCI_ACLDATA_PKT
:
1981 case HCI_SCODATA_PKT
:
1988 switch (bt_cb(skb
)->pkt_type
) {
1990 hci_event_packet(hdev
, skb
);
1993 case HCI_ACLDATA_PKT
:
1994 BT_DBG("%s ACL data packet", hdev
->name
);
1995 hci_acldata_packet(hdev
, skb
);
1998 case HCI_SCODATA_PKT
:
1999 BT_DBG("%s SCO data packet", hdev
->name
);
2000 hci_scodata_packet(hdev
, skb
);
2009 read_unlock(&hci_task_lock
);
2012 static void hci_cmd_task(unsigned long arg
)
2014 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
2015 struct sk_buff
*skb
;
2017 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
2019 /* Send queued commands */
2020 if (atomic_read(&hdev
->cmd_cnt
)) {
2021 skb
= skb_dequeue(&hdev
->cmd_q
);
2025 kfree_skb(hdev
->sent_cmd
);
2027 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2028 if (hdev
->sent_cmd
) {
2029 atomic_dec(&hdev
->cmd_cnt
);
2030 hci_send_frame(skb
);
2031 mod_timer(&hdev
->cmd_timer
,
2032 jiffies
+ msecs_to_jiffies(HCI_CMD_TIMEOUT
));
2034 skb_queue_head(&hdev
->cmd_q
, skb
);
2035 tasklet_schedule(&hdev
->cmd_task
);