2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
42 #include <linux/rfkill.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
52 static void hci_cmd_task(unsigned long arg
);
53 static void hci_rx_task(unsigned long arg
);
54 static void hci_tx_task(unsigned long arg
);
55 static void hci_notify(struct hci_dev
*hdev
, int event
);
57 static DEFINE_RWLOCK(hci_task_lock
);
60 LIST_HEAD(hci_dev_list
);
61 DEFINE_RWLOCK(hci_dev_list_lock
);
63 /* HCI callback list */
64 LIST_HEAD(hci_cb_list
);
65 DEFINE_RWLOCK(hci_cb_list_lock
);
68 #define HCI_MAX_PROTO 2
69 struct hci_proto
*hci_proto
[HCI_MAX_PROTO
];
71 /* HCI notifiers list */
72 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
74 /* ---- HCI notifications ---- */
76 int hci_register_notifier(struct notifier_block
*nb
)
78 return atomic_notifier_chain_register(&hci_notifier
, nb
);
81 int hci_unregister_notifier(struct notifier_block
*nb
)
83 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
86 static void hci_notify(struct hci_dev
*hdev
, int event
)
88 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
91 /* ---- HCI requests ---- */
93 void hci_req_complete(struct hci_dev
*hdev
, int result
)
95 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
97 if (hdev
->req_status
== HCI_REQ_PEND
) {
98 hdev
->req_result
= result
;
99 hdev
->req_status
= HCI_REQ_DONE
;
100 wake_up_interruptible(&hdev
->req_wait_q
);
104 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
106 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
108 if (hdev
->req_status
== HCI_REQ_PEND
) {
109 hdev
->req_result
= err
;
110 hdev
->req_status
= HCI_REQ_CANCELED
;
111 wake_up_interruptible(&hdev
->req_wait_q
);
115 /* Execute request and wait for completion. */
116 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
117 unsigned long opt
, __u32 timeout
)
119 DECLARE_WAITQUEUE(wait
, current
);
122 BT_DBG("%s start", hdev
->name
);
124 hdev
->req_status
= HCI_REQ_PEND
;
126 add_wait_queue(&hdev
->req_wait_q
, &wait
);
127 set_current_state(TASK_INTERRUPTIBLE
);
130 schedule_timeout(timeout
);
132 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
134 if (signal_pending(current
))
137 switch (hdev
->req_status
) {
139 err
= -bt_err(hdev
->req_result
);
142 case HCI_REQ_CANCELED
:
143 err
= -hdev
->req_result
;
151 hdev
->req_status
= hdev
->req_result
= 0;
153 BT_DBG("%s end: err %d", hdev
->name
, err
);
158 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
159 unsigned long opt
, __u32 timeout
)
163 if (!test_bit(HCI_UP
, &hdev
->flags
))
166 /* Serialize all requests */
168 ret
= __hci_request(hdev
, req
, opt
, timeout
);
169 hci_req_unlock(hdev
);
174 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
176 BT_DBG("%s %ld", hdev
->name
, opt
);
179 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
182 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
188 BT_DBG("%s %ld", hdev
->name
, opt
);
190 /* Driver initialization */
192 /* Special commands */
193 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
194 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
195 skb
->dev
= (void *) hdev
;
197 skb_queue_tail(&hdev
->cmd_q
, skb
);
198 tasklet_schedule(&hdev
->cmd_task
);
200 skb_queue_purge(&hdev
->driver_init
);
202 /* Mandatory initialization */
205 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
))
206 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
208 /* Read Local Supported Features */
209 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
211 /* Read Local Version */
212 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
214 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
215 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
218 /* Host buffer size */
220 struct hci_cp_host_buffer_size cp
;
221 cp
.acl_mtu
= cpu_to_le16(HCI_MAX_ACL_SIZE
);
222 cp
.sco_mtu
= HCI_MAX_SCO_SIZE
;
223 cp
.acl_max_pkt
= cpu_to_le16(0xffff);
224 cp
.sco_max_pkt
= cpu_to_le16(0xffff);
225 hci_send_cmd(hdev
, HCI_OP_HOST_BUFFER_SIZE
, sizeof(cp
), &cp
);
229 /* Read BD Address */
230 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
232 /* Read Class of Device */
233 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
235 /* Read Local Name */
236 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
238 /* Read Voice Setting */
239 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
241 /* Optional initialization */
243 /* Clear Event Filters */
244 flt_type
= HCI_FLT_CLEAR_ALL
;
245 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
247 /* Page timeout ~20 secs */
248 param
= cpu_to_le16(0x8000);
249 hci_send_cmd(hdev
, HCI_OP_WRITE_PG_TIMEOUT
, 2, ¶m
);
251 /* Connection accept timeout ~20 secs */
252 param
= cpu_to_le16(0x7d00);
253 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
256 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
260 BT_DBG("%s %x", hdev
->name
, scan
);
262 /* Inquiry and Page scans */
263 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
266 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
270 BT_DBG("%s %x", hdev
->name
, auth
);
273 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
276 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
280 BT_DBG("%s %x", hdev
->name
, encrypt
);
283 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
286 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
288 __le16 policy
= cpu_to_le16(opt
);
290 BT_DBG("%s %x", hdev
->name
, policy
);
292 /* Default link policy */
293 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
296 /* Get HCI device by index.
297 * Device is held on return. */
298 struct hci_dev
*hci_dev_get(int index
)
300 struct hci_dev
*hdev
= NULL
;
308 read_lock(&hci_dev_list_lock
);
309 list_for_each(p
, &hci_dev_list
) {
310 struct hci_dev
*d
= list_entry(p
, struct hci_dev
, list
);
311 if (d
->id
== index
) {
312 hdev
= hci_dev_hold(d
);
316 read_unlock(&hci_dev_list_lock
);
320 /* ---- Inquiry support ---- */
321 static void inquiry_cache_flush(struct hci_dev
*hdev
)
323 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
324 struct inquiry_entry
*next
= cache
->list
, *e
;
326 BT_DBG("cache %p", cache
);
335 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
337 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
338 struct inquiry_entry
*e
;
340 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
342 for (e
= cache
->list
; e
; e
= e
->next
)
343 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
348 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
)
350 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
351 struct inquiry_entry
*e
;
353 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
355 if (!(e
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
))) {
356 /* Entry not in the cache. Add new one. */
357 if (!(e
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
)))
359 e
->next
= cache
->list
;
363 memcpy(&e
->data
, data
, sizeof(*data
));
364 e
->timestamp
= jiffies
;
365 cache
->timestamp
= jiffies
;
368 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
370 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
371 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
372 struct inquiry_entry
*e
;
375 for (e
= cache
->list
; e
&& copied
< num
; e
= e
->next
, copied
++) {
376 struct inquiry_data
*data
= &e
->data
;
377 bacpy(&info
->bdaddr
, &data
->bdaddr
);
378 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
379 info
->pscan_period_mode
= data
->pscan_period_mode
;
380 info
->pscan_mode
= data
->pscan_mode
;
381 memcpy(info
->dev_class
, data
->dev_class
, 3);
382 info
->clock_offset
= data
->clock_offset
;
386 BT_DBG("cache %p, copied %d", cache
, copied
);
390 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
392 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
393 struct hci_cp_inquiry cp
;
395 BT_DBG("%s", hdev
->name
);
397 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
401 memcpy(&cp
.lap
, &ir
->lap
, 3);
402 cp
.length
= ir
->length
;
403 cp
.num_rsp
= ir
->num_rsp
;
404 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
407 int hci_inquiry(void __user
*arg
)
409 __u8 __user
*ptr
= arg
;
410 struct hci_inquiry_req ir
;
411 struct hci_dev
*hdev
;
412 int err
= 0, do_inquiry
= 0, max_rsp
;
416 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
419 if (!(hdev
= hci_dev_get(ir
.dev_id
)))
422 hci_dev_lock_bh(hdev
);
423 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
424 inquiry_cache_empty(hdev
) ||
425 ir
.flags
& IREQ_CACHE_FLUSH
) {
426 inquiry_cache_flush(hdev
);
429 hci_dev_unlock_bh(hdev
);
431 timeo
= ir
.length
* msecs_to_jiffies(2000);
432 if (do_inquiry
&& (err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
)) < 0)
435 /* for unlimited number of responses we will use buffer with 255 entries */
436 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
438 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
439 * copy it to the user space.
441 if (!(buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
))) {
446 hci_dev_lock_bh(hdev
);
447 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
448 hci_dev_unlock_bh(hdev
);
450 BT_DBG("num_rsp %d", ir
.num_rsp
);
452 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
454 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
467 /* ---- HCI ioctl helpers ---- */
469 int hci_dev_open(__u16 dev
)
471 struct hci_dev
*hdev
;
474 if (!(hdev
= hci_dev_get(dev
)))
477 BT_DBG("%s %p", hdev
->name
, hdev
);
481 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
486 if (test_bit(HCI_UP
, &hdev
->flags
)) {
491 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
492 set_bit(HCI_RAW
, &hdev
->flags
);
494 /* Treat all non BR/EDR controllers as raw devices for now */
495 if (hdev
->dev_type
!= HCI_BREDR
)
496 set_bit(HCI_RAW
, &hdev
->flags
);
498 if (hdev
->open(hdev
)) {
503 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
504 atomic_set(&hdev
->cmd_cnt
, 1);
505 set_bit(HCI_INIT
, &hdev
->flags
);
507 //__hci_request(hdev, hci_reset_req, 0, HZ);
508 ret
= __hci_request(hdev
, hci_init_req
, 0,
509 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
511 clear_bit(HCI_INIT
, &hdev
->flags
);
516 set_bit(HCI_UP
, &hdev
->flags
);
517 hci_notify(hdev
, HCI_DEV_UP
);
519 /* Init failed, cleanup */
520 tasklet_kill(&hdev
->rx_task
);
521 tasklet_kill(&hdev
->tx_task
);
522 tasklet_kill(&hdev
->cmd_task
);
524 skb_queue_purge(&hdev
->cmd_q
);
525 skb_queue_purge(&hdev
->rx_q
);
530 if (hdev
->sent_cmd
) {
531 kfree_skb(hdev
->sent_cmd
);
532 hdev
->sent_cmd
= NULL
;
540 hci_req_unlock(hdev
);
545 static int hci_dev_do_close(struct hci_dev
*hdev
)
547 BT_DBG("%s %p", hdev
->name
, hdev
);
549 hci_req_cancel(hdev
, ENODEV
);
552 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
553 hci_req_unlock(hdev
);
557 /* Kill RX and TX tasks */
558 tasklet_kill(&hdev
->rx_task
);
559 tasklet_kill(&hdev
->tx_task
);
561 hci_dev_lock_bh(hdev
);
562 inquiry_cache_flush(hdev
);
563 hci_conn_hash_flush(hdev
);
564 hci_dev_unlock_bh(hdev
);
566 hci_notify(hdev
, HCI_DEV_DOWN
);
572 skb_queue_purge(&hdev
->cmd_q
);
573 atomic_set(&hdev
->cmd_cnt
, 1);
574 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
575 set_bit(HCI_INIT
, &hdev
->flags
);
576 __hci_request(hdev
, hci_reset_req
, 0,
577 msecs_to_jiffies(250));
578 clear_bit(HCI_INIT
, &hdev
->flags
);
582 tasklet_kill(&hdev
->cmd_task
);
585 skb_queue_purge(&hdev
->rx_q
);
586 skb_queue_purge(&hdev
->cmd_q
);
587 skb_queue_purge(&hdev
->raw_q
);
589 /* Drop last sent command */
590 if (hdev
->sent_cmd
) {
591 kfree_skb(hdev
->sent_cmd
);
592 hdev
->sent_cmd
= NULL
;
595 /* After this point our queues are empty
596 * and no tasks are scheduled. */
602 hci_req_unlock(hdev
);
608 int hci_dev_close(__u16 dev
)
610 struct hci_dev
*hdev
;
613 if (!(hdev
= hci_dev_get(dev
)))
615 err
= hci_dev_do_close(hdev
);
620 int hci_dev_reset(__u16 dev
)
622 struct hci_dev
*hdev
;
625 if (!(hdev
= hci_dev_get(dev
)))
629 tasklet_disable(&hdev
->tx_task
);
631 if (!test_bit(HCI_UP
, &hdev
->flags
))
635 skb_queue_purge(&hdev
->rx_q
);
636 skb_queue_purge(&hdev
->cmd_q
);
638 hci_dev_lock_bh(hdev
);
639 inquiry_cache_flush(hdev
);
640 hci_conn_hash_flush(hdev
);
641 hci_dev_unlock_bh(hdev
);
646 atomic_set(&hdev
->cmd_cnt
, 1);
647 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0;
649 if (!test_bit(HCI_RAW
, &hdev
->flags
))
650 ret
= __hci_request(hdev
, hci_reset_req
, 0,
651 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
654 tasklet_enable(&hdev
->tx_task
);
655 hci_req_unlock(hdev
);
660 int hci_dev_reset_stat(__u16 dev
)
662 struct hci_dev
*hdev
;
665 if (!(hdev
= hci_dev_get(dev
)))
668 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
675 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
677 struct hci_dev
*hdev
;
678 struct hci_dev_req dr
;
681 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
684 if (!(hdev
= hci_dev_get(dr
.dev_id
)))
689 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
690 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
694 if (!lmp_encrypt_capable(hdev
)) {
699 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
700 /* Auth must be enabled first */
701 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
702 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
707 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
708 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
712 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
713 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
717 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
718 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
722 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
723 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
727 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
731 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
732 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
736 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
737 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
749 int hci_get_dev_list(void __user
*arg
)
751 struct hci_dev_list_req
*dl
;
752 struct hci_dev_req
*dr
;
754 int n
= 0, size
, err
;
757 if (get_user(dev_num
, (__u16 __user
*) arg
))
760 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
763 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
765 if (!(dl
= kzalloc(size
, GFP_KERNEL
)))
770 read_lock_bh(&hci_dev_list_lock
);
771 list_for_each(p
, &hci_dev_list
) {
772 struct hci_dev
*hdev
;
773 hdev
= list_entry(p
, struct hci_dev
, list
);
774 (dr
+ n
)->dev_id
= hdev
->id
;
775 (dr
+ n
)->dev_opt
= hdev
->flags
;
779 read_unlock_bh(&hci_dev_list_lock
);
782 size
= sizeof(*dl
) + n
* sizeof(*dr
);
784 err
= copy_to_user(arg
, dl
, size
);
787 return err
? -EFAULT
: 0;
790 int hci_get_dev_info(void __user
*arg
)
792 struct hci_dev
*hdev
;
793 struct hci_dev_info di
;
796 if (copy_from_user(&di
, arg
, sizeof(di
)))
799 if (!(hdev
= hci_dev_get(di
.dev_id
)))
802 strcpy(di
.name
, hdev
->name
);
803 di
.bdaddr
= hdev
->bdaddr
;
804 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
805 di
.flags
= hdev
->flags
;
806 di
.pkt_type
= hdev
->pkt_type
;
807 di
.acl_mtu
= hdev
->acl_mtu
;
808 di
.acl_pkts
= hdev
->acl_pkts
;
809 di
.sco_mtu
= hdev
->sco_mtu
;
810 di
.sco_pkts
= hdev
->sco_pkts
;
811 di
.link_policy
= hdev
->link_policy
;
812 di
.link_mode
= hdev
->link_mode
;
814 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
815 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
817 if (copy_to_user(arg
, &di
, sizeof(di
)))
825 /* ---- Interface to HCI drivers ---- */
827 static int hci_rfkill_set_block(void *data
, bool blocked
)
829 struct hci_dev
*hdev
= data
;
831 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
836 hci_dev_do_close(hdev
);
841 static const struct rfkill_ops hci_rfkill_ops
= {
842 .set_block
= hci_rfkill_set_block
,
845 /* Alloc HCI device */
846 struct hci_dev
*hci_alloc_dev(void)
848 struct hci_dev
*hdev
;
850 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
854 skb_queue_head_init(&hdev
->driver_init
);
858 EXPORT_SYMBOL(hci_alloc_dev
);
860 /* Free HCI device */
861 void hci_free_dev(struct hci_dev
*hdev
)
863 skb_queue_purge(&hdev
->driver_init
);
865 /* will free via device release */
866 put_device(&hdev
->dev
);
868 EXPORT_SYMBOL(hci_free_dev
);
870 /* Register HCI device */
871 int hci_register_dev(struct hci_dev
*hdev
)
873 struct list_head
*head
= &hci_dev_list
, *p
;
876 BT_DBG("%p name %s bus %d owner %p", hdev
, hdev
->name
,
877 hdev
->bus
, hdev
->owner
);
879 if (!hdev
->open
|| !hdev
->close
|| !hdev
->destruct
)
882 write_lock_bh(&hci_dev_list_lock
);
884 /* Find first available device id */
885 list_for_each(p
, &hci_dev_list
) {
886 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
891 sprintf(hdev
->name
, "hci%d", id
);
893 list_add(&hdev
->list
, head
);
895 atomic_set(&hdev
->refcnt
, 1);
896 spin_lock_init(&hdev
->lock
);
899 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
900 hdev
->esco_type
= (ESCO_HV1
);
901 hdev
->link_mode
= (HCI_LM_ACCEPT
);
903 hdev
->idle_timeout
= 0;
904 hdev
->sniff_max_interval
= 800;
905 hdev
->sniff_min_interval
= 80;
907 tasklet_init(&hdev
->cmd_task
, hci_cmd_task
,(unsigned long) hdev
);
908 tasklet_init(&hdev
->rx_task
, hci_rx_task
, (unsigned long) hdev
);
909 tasklet_init(&hdev
->tx_task
, hci_tx_task
, (unsigned long) hdev
);
911 skb_queue_head_init(&hdev
->rx_q
);
912 skb_queue_head_init(&hdev
->cmd_q
);
913 skb_queue_head_init(&hdev
->raw_q
);
915 for (i
= 0; i
< 3; i
++)
916 hdev
->reassembly
[i
] = NULL
;
918 init_waitqueue_head(&hdev
->req_wait_q
);
919 mutex_init(&hdev
->req_lock
);
921 inquiry_cache_init(hdev
);
923 hci_conn_hash_init(hdev
);
925 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
927 atomic_set(&hdev
->promisc
, 0);
929 write_unlock_bh(&hci_dev_list_lock
);
931 hci_register_sysfs(hdev
);
933 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
934 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
936 if (rfkill_register(hdev
->rfkill
) < 0) {
937 rfkill_destroy(hdev
->rfkill
);
942 hci_notify(hdev
, HCI_DEV_REG
);
946 EXPORT_SYMBOL(hci_register_dev
);
948 /* Unregister HCI device */
949 int hci_unregister_dev(struct hci_dev
*hdev
)
953 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
955 write_lock_bh(&hci_dev_list_lock
);
956 list_del(&hdev
->list
);
957 write_unlock_bh(&hci_dev_list_lock
);
959 hci_dev_do_close(hdev
);
961 for (i
= 0; i
< 3; i
++)
962 kfree_skb(hdev
->reassembly
[i
]);
964 hci_notify(hdev
, HCI_DEV_UNREG
);
967 rfkill_unregister(hdev
->rfkill
);
968 rfkill_destroy(hdev
->rfkill
);
971 hci_unregister_sysfs(hdev
);
977 EXPORT_SYMBOL(hci_unregister_dev
);
979 /* Suspend HCI device */
980 int hci_suspend_dev(struct hci_dev
*hdev
)
982 hci_notify(hdev
, HCI_DEV_SUSPEND
);
985 EXPORT_SYMBOL(hci_suspend_dev
);
987 /* Resume HCI device */
988 int hci_resume_dev(struct hci_dev
*hdev
)
990 hci_notify(hdev
, HCI_DEV_RESUME
);
993 EXPORT_SYMBOL(hci_resume_dev
);
995 /* Receive frame from HCI drivers */
996 int hci_recv_frame(struct sk_buff
*skb
)
998 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
999 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1000 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1006 bt_cb(skb
)->incoming
= 1;
1009 __net_timestamp(skb
);
1011 /* Queue frame for rx task */
1012 skb_queue_tail(&hdev
->rx_q
, skb
);
1013 tasklet_schedule(&hdev
->rx_task
);
1017 EXPORT_SYMBOL(hci_recv_frame
);
1019 /* Receive packet type fragment */
1020 #define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
1022 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1024 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1028 struct sk_buff
*skb
= __reassembly(hdev
, type
);
1029 struct { int expect
; } *scb
;
1033 /* Start of the frame */
1037 if (count
>= HCI_EVENT_HDR_SIZE
) {
1038 struct hci_event_hdr
*h
= data
;
1039 len
= HCI_EVENT_HDR_SIZE
+ h
->plen
;
1044 case HCI_ACLDATA_PKT
:
1045 if (count
>= HCI_ACL_HDR_SIZE
) {
1046 struct hci_acl_hdr
*h
= data
;
1047 len
= HCI_ACL_HDR_SIZE
+ __le16_to_cpu(h
->dlen
);
1052 case HCI_SCODATA_PKT
:
1053 if (count
>= HCI_SCO_HDR_SIZE
) {
1054 struct hci_sco_hdr
*h
= data
;
1055 len
= HCI_SCO_HDR_SIZE
+ h
->dlen
;
1061 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1063 BT_ERR("%s no memory for packet", hdev
->name
);
1067 skb
->dev
= (void *) hdev
;
1068 bt_cb(skb
)->pkt_type
= type
;
1070 __reassembly(hdev
, type
) = skb
;
1072 scb
= (void *) skb
->cb
;
1077 scb
= (void *) skb
->cb
;
1081 len
= min(len
, count
);
1083 memcpy(skb_put(skb
, len
), data
, len
);
1087 if (scb
->expect
== 0) {
1088 /* Complete frame */
1090 __reassembly(hdev
, type
) = NULL
;
1092 bt_cb(skb
)->pkt_type
= type
;
1093 hci_recv_frame(skb
);
1096 count
-= len
; data
+= len
;
1101 EXPORT_SYMBOL(hci_recv_fragment
);
1103 /* ---- Interface to upper protocols ---- */
1105 /* Register/Unregister protocols.
1106 * hci_task_lock is used to ensure that no tasks are running. */
1107 int hci_register_proto(struct hci_proto
*hp
)
1111 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1113 if (hp
->id
>= HCI_MAX_PROTO
)
1116 write_lock_bh(&hci_task_lock
);
1118 if (!hci_proto
[hp
->id
])
1119 hci_proto
[hp
->id
] = hp
;
1123 write_unlock_bh(&hci_task_lock
);
1127 EXPORT_SYMBOL(hci_register_proto
);
1129 int hci_unregister_proto(struct hci_proto
*hp
)
1133 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1135 if (hp
->id
>= HCI_MAX_PROTO
)
1138 write_lock_bh(&hci_task_lock
);
1140 if (hci_proto
[hp
->id
])
1141 hci_proto
[hp
->id
] = NULL
;
1145 write_unlock_bh(&hci_task_lock
);
1149 EXPORT_SYMBOL(hci_unregister_proto
);
1151 int hci_register_cb(struct hci_cb
*cb
)
1153 BT_DBG("%p name %s", cb
, cb
->name
);
1155 write_lock_bh(&hci_cb_list_lock
);
1156 list_add(&cb
->list
, &hci_cb_list
);
1157 write_unlock_bh(&hci_cb_list_lock
);
1161 EXPORT_SYMBOL(hci_register_cb
);
1163 int hci_unregister_cb(struct hci_cb
*cb
)
1165 BT_DBG("%p name %s", cb
, cb
->name
);
1167 write_lock_bh(&hci_cb_list_lock
);
1168 list_del(&cb
->list
);
1169 write_unlock_bh(&hci_cb_list_lock
);
1173 EXPORT_SYMBOL(hci_unregister_cb
);
1175 static int hci_send_frame(struct sk_buff
*skb
)
1177 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1184 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
1186 if (atomic_read(&hdev
->promisc
)) {
1188 __net_timestamp(skb
);
1190 hci_send_to_sock(hdev
, skb
);
1193 /* Get rid of skb owner, prior to sending to the driver. */
1196 return hdev
->send(skb
);
1199 /* Send HCI command */
1200 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
1202 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
1203 struct hci_command_hdr
*hdr
;
1204 struct sk_buff
*skb
;
1206 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
1208 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1210 BT_ERR("%s no memory for command", hdev
->name
);
1214 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
1215 hdr
->opcode
= cpu_to_le16(opcode
);
1219 memcpy(skb_put(skb
, plen
), param
, plen
);
1221 BT_DBG("skb len %d", skb
->len
);
1223 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
1224 skb
->dev
= (void *) hdev
;
1226 skb_queue_tail(&hdev
->cmd_q
, skb
);
1227 tasklet_schedule(&hdev
->cmd_task
);
1232 /* Get data from the previously sent command */
1233 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
1235 struct hci_command_hdr
*hdr
;
1237 if (!hdev
->sent_cmd
)
1240 hdr
= (void *) hdev
->sent_cmd
->data
;
1242 if (hdr
->opcode
!= cpu_to_le16(opcode
))
1245 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
1247 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
1251 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
1253 struct hci_acl_hdr
*hdr
;
1256 skb_push(skb
, HCI_ACL_HDR_SIZE
);
1257 skb_reset_transport_header(skb
);
1258 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
1259 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
1260 hdr
->dlen
= cpu_to_le16(len
);
1263 int hci_send_acl(struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
)
1265 struct hci_dev
*hdev
= conn
->hdev
;
1266 struct sk_buff
*list
;
1268 BT_DBG("%s conn %p flags 0x%x", hdev
->name
, conn
, flags
);
1270 skb
->dev
= (void *) hdev
;
1271 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1272 hci_add_acl_hdr(skb
, conn
->handle
, flags
| ACL_START
);
1274 if (!(list
= skb_shinfo(skb
)->frag_list
)) {
1275 /* Non fragmented */
1276 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
1278 skb_queue_tail(&conn
->data_q
, skb
);
1281 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1283 skb_shinfo(skb
)->frag_list
= NULL
;
1285 /* Queue all fragments atomically */
1286 spin_lock_bh(&conn
->data_q
.lock
);
1288 __skb_queue_tail(&conn
->data_q
, skb
);
1290 skb
= list
; list
= list
->next
;
1292 skb
->dev
= (void *) hdev
;
1293 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1294 hci_add_acl_hdr(skb
, conn
->handle
, flags
| ACL_CONT
);
1296 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1298 __skb_queue_tail(&conn
->data_q
, skb
);
1301 spin_unlock_bh(&conn
->data_q
.lock
);
1304 tasklet_schedule(&hdev
->tx_task
);
1308 EXPORT_SYMBOL(hci_send_acl
);
1311 int hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
1313 struct hci_dev
*hdev
= conn
->hdev
;
1314 struct hci_sco_hdr hdr
;
1316 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
1318 if (skb
->len
> hdev
->sco_mtu
) {
1323 hdr
.handle
= cpu_to_le16(conn
->handle
);
1324 hdr
.dlen
= skb
->len
;
1326 skb_push(skb
, HCI_SCO_HDR_SIZE
);
1327 skb_reset_transport_header(skb
);
1328 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
1330 skb
->dev
= (void *) hdev
;
1331 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
1333 skb_queue_tail(&conn
->data_q
, skb
);
1334 tasklet_schedule(&hdev
->tx_task
);
1338 EXPORT_SYMBOL(hci_send_sco
);
1340 /* ---- HCI TX task (outgoing data) ---- */
1342 /* HCI Connection scheduler */
1343 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
1345 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1346 struct hci_conn
*conn
= NULL
;
1347 int num
= 0, min
= ~0;
1348 struct list_head
*p
;
1350 /* We don't have to lock device here. Connections are always
1351 * added and removed with TX task disabled. */
1352 list_for_each(p
, &h
->list
) {
1354 c
= list_entry(p
, struct hci_conn
, list
);
1356 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
1359 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
1364 if (c
->sent
< min
) {
1371 int cnt
= (type
== ACL_LINK
? hdev
->acl_cnt
: hdev
->sco_cnt
);
1377 BT_DBG("conn %p quote %d", conn
, *quote
);
1381 static inline void hci_acl_tx_to(struct hci_dev
*hdev
)
1383 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1384 struct list_head
*p
;
1387 BT_ERR("%s ACL tx timeout", hdev
->name
);
1389 /* Kill stalled connections */
1390 list_for_each(p
, &h
->list
) {
1391 c
= list_entry(p
, struct hci_conn
, list
);
1392 if (c
->type
== ACL_LINK
&& c
->sent
) {
1393 BT_ERR("%s killing stalled ACL connection %s",
1394 hdev
->name
, batostr(&c
->dst
));
1395 hci_acl_disconn(c
, 0x13);
1400 static inline void hci_sched_acl(struct hci_dev
*hdev
)
1402 struct hci_conn
*conn
;
1403 struct sk_buff
*skb
;
1406 BT_DBG("%s", hdev
->name
);
1408 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
1409 /* ACL tx timeout must be longer than maximum
1410 * link supervision timeout (40.9 seconds) */
1411 if (!hdev
->acl_cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+ HZ
* 45))
1412 hci_acl_tx_to(hdev
);
1415 while (hdev
->acl_cnt
&& (conn
= hci_low_sent(hdev
, ACL_LINK
, "e
))) {
1416 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1417 BT_DBG("skb %p len %d", skb
, skb
->len
);
1419 hci_conn_enter_active_mode(conn
);
1421 hci_send_frame(skb
);
1422 hdev
->acl_last_tx
= jiffies
;
1431 static inline void hci_sched_sco(struct hci_dev
*hdev
)
1433 struct hci_conn
*conn
;
1434 struct sk_buff
*skb
;
1437 BT_DBG("%s", hdev
->name
);
1439 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
1440 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1441 BT_DBG("skb %p len %d", skb
, skb
->len
);
1442 hci_send_frame(skb
);
1445 if (conn
->sent
== ~0)
1451 static inline void hci_sched_esco(struct hci_dev
*hdev
)
1453 struct hci_conn
*conn
;
1454 struct sk_buff
*skb
;
1457 BT_DBG("%s", hdev
->name
);
1459 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
1460 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1461 BT_DBG("skb %p len %d", skb
, skb
->len
);
1462 hci_send_frame(skb
);
1465 if (conn
->sent
== ~0)
1471 static void hci_tx_task(unsigned long arg
)
1473 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1474 struct sk_buff
*skb
;
1476 read_lock(&hci_task_lock
);
1478 BT_DBG("%s acl %d sco %d", hdev
->name
, hdev
->acl_cnt
, hdev
->sco_cnt
);
1480 /* Schedule queues and send stuff to HCI driver */
1482 hci_sched_acl(hdev
);
1484 hci_sched_sco(hdev
);
1486 hci_sched_esco(hdev
);
1488 /* Send next queued raw (unknown type) packet */
1489 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
1490 hci_send_frame(skb
);
1492 read_unlock(&hci_task_lock
);
1495 /* ----- HCI RX task (incoming data proccessing) ----- */
1497 /* ACL data packet */
1498 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1500 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
1501 struct hci_conn
*conn
;
1502 __u16 handle
, flags
;
1504 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
1506 handle
= __le16_to_cpu(hdr
->handle
);
1507 flags
= hci_flags(handle
);
1508 handle
= hci_handle(handle
);
1510 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
1512 hdev
->stat
.acl_rx
++;
1515 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1516 hci_dev_unlock(hdev
);
1519 register struct hci_proto
*hp
;
1521 hci_conn_enter_active_mode(conn
);
1523 /* Send to upper protocol */
1524 if ((hp
= hci_proto
[HCI_PROTO_L2CAP
]) && hp
->recv_acldata
) {
1525 hp
->recv_acldata(conn
, skb
, flags
);
1529 BT_ERR("%s ACL packet for unknown connection handle %d",
1530 hdev
->name
, handle
);
1536 /* SCO data packet */
1537 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1539 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
1540 struct hci_conn
*conn
;
1543 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
1545 handle
= __le16_to_cpu(hdr
->handle
);
1547 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
1549 hdev
->stat
.sco_rx
++;
1552 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1553 hci_dev_unlock(hdev
);
1556 register struct hci_proto
*hp
;
1558 /* Send to upper protocol */
1559 if ((hp
= hci_proto
[HCI_PROTO_SCO
]) && hp
->recv_scodata
) {
1560 hp
->recv_scodata(conn
, skb
);
1564 BT_ERR("%s SCO packet for unknown connection handle %d",
1565 hdev
->name
, handle
);
1571 static void hci_rx_task(unsigned long arg
)
1573 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1574 struct sk_buff
*skb
;
1576 BT_DBG("%s", hdev
->name
);
1578 read_lock(&hci_task_lock
);
1580 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
1581 if (atomic_read(&hdev
->promisc
)) {
1582 /* Send copy to the sockets */
1583 hci_send_to_sock(hdev
, skb
);
1586 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
1591 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
1592 /* Don't process data packets in this states. */
1593 switch (bt_cb(skb
)->pkt_type
) {
1594 case HCI_ACLDATA_PKT
:
1595 case HCI_SCODATA_PKT
:
1602 switch (bt_cb(skb
)->pkt_type
) {
1604 hci_event_packet(hdev
, skb
);
1607 case HCI_ACLDATA_PKT
:
1608 BT_DBG("%s ACL data packet", hdev
->name
);
1609 hci_acldata_packet(hdev
, skb
);
1612 case HCI_SCODATA_PKT
:
1613 BT_DBG("%s SCO data packet", hdev
->name
);
1614 hci_scodata_packet(hdev
, skb
);
1623 read_unlock(&hci_task_lock
);
1626 static void hci_cmd_task(unsigned long arg
)
1628 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1629 struct sk_buff
*skb
;
1631 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
1633 if (!atomic_read(&hdev
->cmd_cnt
) && time_after(jiffies
, hdev
->cmd_last_tx
+ HZ
)) {
1634 BT_ERR("%s command tx timeout", hdev
->name
);
1635 atomic_set(&hdev
->cmd_cnt
, 1);
1638 /* Send queued commands */
1639 if (atomic_read(&hdev
->cmd_cnt
) && (skb
= skb_dequeue(&hdev
->cmd_q
))) {
1640 kfree_skb(hdev
->sent_cmd
);
1642 if ((hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
))) {
1643 atomic_dec(&hdev
->cmd_cnt
);
1644 hci_send_frame(skb
);
1645 hdev
->cmd_last_tx
= jiffies
;
1647 skb_queue_head(&hdev
->cmd_q
, skb
);
1648 tasklet_schedule(&hdev
->cmd_task
);