2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
46 #include <asm/system.h>
47 #include <asm/uaccess.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
53 static void hci_cmd_task(unsigned long arg
);
54 static void hci_rx_task(unsigned long arg
);
55 static void hci_tx_task(unsigned long arg
);
56 static void hci_notify(struct hci_dev
*hdev
, int event
);
58 static DEFINE_RWLOCK(hci_task_lock
);
61 LIST_HEAD(hci_dev_list
);
62 DEFINE_RWLOCK(hci_dev_list_lock
);
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list
);
66 DEFINE_RWLOCK(hci_cb_list_lock
);
69 #define HCI_MAX_PROTO 2
70 struct hci_proto
*hci_proto
[HCI_MAX_PROTO
];
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block
*nb
)
79 return atomic_notifier_chain_register(&hci_notifier
, nb
);
82 int hci_unregister_notifier(struct notifier_block
*nb
)
84 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
87 static void hci_notify(struct hci_dev
*hdev
, int event
)
89 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev
*hdev
, int result
)
96 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
98 if (hdev
->req_status
== HCI_REQ_PEND
) {
99 hdev
->req_result
= result
;
100 hdev
->req_status
= HCI_REQ_DONE
;
101 wake_up_interruptible(&hdev
->req_wait_q
);
105 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
107 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
109 if (hdev
->req_status
== HCI_REQ_PEND
) {
110 hdev
->req_result
= err
;
111 hdev
->req_status
= HCI_REQ_CANCELED
;
112 wake_up_interruptible(&hdev
->req_wait_q
);
116 /* Execute request and wait for completion. */
117 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
118 unsigned long opt
, __u32 timeout
)
120 DECLARE_WAITQUEUE(wait
, current
);
123 BT_DBG("%s start", hdev
->name
);
125 hdev
->req_status
= HCI_REQ_PEND
;
127 add_wait_queue(&hdev
->req_wait_q
, &wait
);
128 set_current_state(TASK_INTERRUPTIBLE
);
131 schedule_timeout(timeout
);
133 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
135 if (signal_pending(current
))
138 switch (hdev
->req_status
) {
140 err
= -bt_err(hdev
->req_result
);
143 case HCI_REQ_CANCELED
:
144 err
= -hdev
->req_result
;
152 hdev
->req_status
= hdev
->req_result
= 0;
154 BT_DBG("%s end: err %d", hdev
->name
, err
);
159 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
160 unsigned long opt
, __u32 timeout
)
164 if (!test_bit(HCI_UP
, &hdev
->flags
))
167 /* Serialize all requests */
169 ret
= __hci_request(hdev
, req
, opt
, timeout
);
170 hci_req_unlock(hdev
);
175 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
177 BT_DBG("%s %ld", hdev
->name
, opt
);
180 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
183 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
189 BT_DBG("%s %ld", hdev
->name
, opt
);
191 /* Driver initialization */
193 /* Special commands */
194 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
195 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
196 skb
->dev
= (void *) hdev
;
198 skb_queue_tail(&hdev
->cmd_q
, skb
);
199 tasklet_schedule(&hdev
->cmd_task
);
201 skb_queue_purge(&hdev
->driver_init
);
203 /* Mandatory initialization */
206 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
))
207 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
209 /* Read Local Supported Features */
210 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
212 /* Read Local Version */
213 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
215 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
219 /* Read BD Address */
220 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
222 /* Read Class of Device */
223 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
225 /* Read Local Name */
226 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
228 /* Read Voice Setting */
229 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
231 /* Optional initialization */
233 /* Clear Event Filters */
234 flt_type
= HCI_FLT_CLEAR_ALL
;
235 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
237 /* Page timeout ~20 secs */
238 param
= cpu_to_le16(0x8000);
239 hci_send_cmd(hdev
, HCI_OP_WRITE_PG_TIMEOUT
, 2, ¶m
);
241 /* Connection accept timeout ~20 secs */
242 param
= cpu_to_le16(0x7d00);
243 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
246 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
250 BT_DBG("%s %x", hdev
->name
, scan
);
252 /* Inquiry and Page scans */
253 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
256 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
260 BT_DBG("%s %x", hdev
->name
, auth
);
263 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
266 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
270 BT_DBG("%s %x", hdev
->name
, encrypt
);
273 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
276 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
278 __le16 policy
= cpu_to_le16(opt
);
280 BT_DBG("%s %x", hdev
->name
, policy
);
282 /* Default link policy */
283 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
286 /* Get HCI device by index.
287 * Device is held on return. */
288 struct hci_dev
*hci_dev_get(int index
)
290 struct hci_dev
*hdev
= NULL
;
298 read_lock(&hci_dev_list_lock
);
299 list_for_each(p
, &hci_dev_list
) {
300 struct hci_dev
*d
= list_entry(p
, struct hci_dev
, list
);
301 if (d
->id
== index
) {
302 hdev
= hci_dev_hold(d
);
306 read_unlock(&hci_dev_list_lock
);
310 /* ---- Inquiry support ---- */
311 static void inquiry_cache_flush(struct hci_dev
*hdev
)
313 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
314 struct inquiry_entry
*next
= cache
->list
, *e
;
316 BT_DBG("cache %p", cache
);
325 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
327 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
328 struct inquiry_entry
*e
;
330 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
332 for (e
= cache
->list
; e
; e
= e
->next
)
333 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
338 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
)
340 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
341 struct inquiry_entry
*e
;
343 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
345 if (!(e
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
))) {
346 /* Entry not in the cache. Add new one. */
347 if (!(e
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
)))
349 e
->next
= cache
->list
;
353 memcpy(&e
->data
, data
, sizeof(*data
));
354 e
->timestamp
= jiffies
;
355 cache
->timestamp
= jiffies
;
358 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
360 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
361 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
362 struct inquiry_entry
*e
;
365 for (e
= cache
->list
; e
&& copied
< num
; e
= e
->next
, copied
++) {
366 struct inquiry_data
*data
= &e
->data
;
367 bacpy(&info
->bdaddr
, &data
->bdaddr
);
368 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
369 info
->pscan_period_mode
= data
->pscan_period_mode
;
370 info
->pscan_mode
= data
->pscan_mode
;
371 memcpy(info
->dev_class
, data
->dev_class
, 3);
372 info
->clock_offset
= data
->clock_offset
;
376 BT_DBG("cache %p, copied %d", cache
, copied
);
380 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
382 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
383 struct hci_cp_inquiry cp
;
385 BT_DBG("%s", hdev
->name
);
387 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
391 memcpy(&cp
.lap
, &ir
->lap
, 3);
392 cp
.length
= ir
->length
;
393 cp
.num_rsp
= ir
->num_rsp
;
394 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
397 int hci_inquiry(void __user
*arg
)
399 __u8 __user
*ptr
= arg
;
400 struct hci_inquiry_req ir
;
401 struct hci_dev
*hdev
;
402 int err
= 0, do_inquiry
= 0, max_rsp
;
406 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
409 if (!(hdev
= hci_dev_get(ir
.dev_id
)))
412 hci_dev_lock_bh(hdev
);
413 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
414 inquiry_cache_empty(hdev
) ||
415 ir
.flags
& IREQ_CACHE_FLUSH
) {
416 inquiry_cache_flush(hdev
);
419 hci_dev_unlock_bh(hdev
);
421 timeo
= ir
.length
* msecs_to_jiffies(2000);
422 if (do_inquiry
&& (err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
)) < 0)
425 /* for unlimited number of responses we will use buffer with 255 entries */
426 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
428 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
429 * copy it to the user space.
431 if (!(buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
))) {
436 hci_dev_lock_bh(hdev
);
437 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
438 hci_dev_unlock_bh(hdev
);
440 BT_DBG("num_rsp %d", ir
.num_rsp
);
442 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
444 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
457 /* ---- HCI ioctl helpers ---- */
459 int hci_dev_open(__u16 dev
)
461 struct hci_dev
*hdev
;
464 if (!(hdev
= hci_dev_get(dev
)))
467 BT_DBG("%s %p", hdev
->name
, hdev
);
471 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
476 if (test_bit(HCI_UP
, &hdev
->flags
)) {
481 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
482 set_bit(HCI_RAW
, &hdev
->flags
);
484 /* Treat all non BR/EDR controllers as raw devices for now */
485 if (hdev
->dev_type
!= HCI_BREDR
)
486 set_bit(HCI_RAW
, &hdev
->flags
);
488 if (hdev
->open(hdev
)) {
493 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
494 atomic_set(&hdev
->cmd_cnt
, 1);
495 set_bit(HCI_INIT
, &hdev
->flags
);
497 //__hci_request(hdev, hci_reset_req, 0, HZ);
498 ret
= __hci_request(hdev
, hci_init_req
, 0,
499 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
501 clear_bit(HCI_INIT
, &hdev
->flags
);
506 set_bit(HCI_UP
, &hdev
->flags
);
507 hci_notify(hdev
, HCI_DEV_UP
);
509 /* Init failed, cleanup */
510 tasklet_kill(&hdev
->rx_task
);
511 tasklet_kill(&hdev
->tx_task
);
512 tasklet_kill(&hdev
->cmd_task
);
514 skb_queue_purge(&hdev
->cmd_q
);
515 skb_queue_purge(&hdev
->rx_q
);
520 if (hdev
->sent_cmd
) {
521 kfree_skb(hdev
->sent_cmd
);
522 hdev
->sent_cmd
= NULL
;
530 hci_req_unlock(hdev
);
535 static int hci_dev_do_close(struct hci_dev
*hdev
)
537 BT_DBG("%s %p", hdev
->name
, hdev
);
539 hci_req_cancel(hdev
, ENODEV
);
542 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
543 hci_req_unlock(hdev
);
547 /* Kill RX and TX tasks */
548 tasklet_kill(&hdev
->rx_task
);
549 tasklet_kill(&hdev
->tx_task
);
551 hci_dev_lock_bh(hdev
);
552 inquiry_cache_flush(hdev
);
553 hci_conn_hash_flush(hdev
);
554 hci_blacklist_clear(hdev
);
555 hci_dev_unlock_bh(hdev
);
557 hci_notify(hdev
, HCI_DEV_DOWN
);
563 skb_queue_purge(&hdev
->cmd_q
);
564 atomic_set(&hdev
->cmd_cnt
, 1);
565 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
566 set_bit(HCI_INIT
, &hdev
->flags
);
567 __hci_request(hdev
, hci_reset_req
, 0,
568 msecs_to_jiffies(250));
569 clear_bit(HCI_INIT
, &hdev
->flags
);
573 tasklet_kill(&hdev
->cmd_task
);
576 skb_queue_purge(&hdev
->rx_q
);
577 skb_queue_purge(&hdev
->cmd_q
);
578 skb_queue_purge(&hdev
->raw_q
);
580 /* Drop last sent command */
581 if (hdev
->sent_cmd
) {
582 kfree_skb(hdev
->sent_cmd
);
583 hdev
->sent_cmd
= NULL
;
586 /* After this point our queues are empty
587 * and no tasks are scheduled. */
593 hci_req_unlock(hdev
);
599 int hci_dev_close(__u16 dev
)
601 struct hci_dev
*hdev
;
604 if (!(hdev
= hci_dev_get(dev
)))
606 err
= hci_dev_do_close(hdev
);
611 int hci_dev_reset(__u16 dev
)
613 struct hci_dev
*hdev
;
616 if (!(hdev
= hci_dev_get(dev
)))
620 tasklet_disable(&hdev
->tx_task
);
622 if (!test_bit(HCI_UP
, &hdev
->flags
))
626 skb_queue_purge(&hdev
->rx_q
);
627 skb_queue_purge(&hdev
->cmd_q
);
629 hci_dev_lock_bh(hdev
);
630 inquiry_cache_flush(hdev
);
631 hci_conn_hash_flush(hdev
);
632 hci_dev_unlock_bh(hdev
);
637 atomic_set(&hdev
->cmd_cnt
, 1);
638 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0;
640 if (!test_bit(HCI_RAW
, &hdev
->flags
))
641 ret
= __hci_request(hdev
, hci_reset_req
, 0,
642 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
645 tasklet_enable(&hdev
->tx_task
);
646 hci_req_unlock(hdev
);
651 int hci_dev_reset_stat(__u16 dev
)
653 struct hci_dev
*hdev
;
656 if (!(hdev
= hci_dev_get(dev
)))
659 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
666 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
668 struct hci_dev
*hdev
;
669 struct hci_dev_req dr
;
672 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
675 if (!(hdev
= hci_dev_get(dr
.dev_id
)))
680 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
681 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
685 if (!lmp_encrypt_capable(hdev
)) {
690 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
691 /* Auth must be enabled first */
692 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
693 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
698 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
699 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
703 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
704 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
708 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
713 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
714 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
718 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
722 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
723 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
727 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
728 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
740 int hci_get_dev_list(void __user
*arg
)
742 struct hci_dev_list_req
*dl
;
743 struct hci_dev_req
*dr
;
745 int n
= 0, size
, err
;
748 if (get_user(dev_num
, (__u16 __user
*) arg
))
751 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
754 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
756 if (!(dl
= kzalloc(size
, GFP_KERNEL
)))
761 read_lock_bh(&hci_dev_list_lock
);
762 list_for_each(p
, &hci_dev_list
) {
763 struct hci_dev
*hdev
;
764 hdev
= list_entry(p
, struct hci_dev
, list
);
765 (dr
+ n
)->dev_id
= hdev
->id
;
766 (dr
+ n
)->dev_opt
= hdev
->flags
;
770 read_unlock_bh(&hci_dev_list_lock
);
773 size
= sizeof(*dl
) + n
* sizeof(*dr
);
775 err
= copy_to_user(arg
, dl
, size
);
778 return err
? -EFAULT
: 0;
781 int hci_get_dev_info(void __user
*arg
)
783 struct hci_dev
*hdev
;
784 struct hci_dev_info di
;
787 if (copy_from_user(&di
, arg
, sizeof(di
)))
790 if (!(hdev
= hci_dev_get(di
.dev_id
)))
793 strcpy(di
.name
, hdev
->name
);
794 di
.bdaddr
= hdev
->bdaddr
;
795 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
796 di
.flags
= hdev
->flags
;
797 di
.pkt_type
= hdev
->pkt_type
;
798 di
.acl_mtu
= hdev
->acl_mtu
;
799 di
.acl_pkts
= hdev
->acl_pkts
;
800 di
.sco_mtu
= hdev
->sco_mtu
;
801 di
.sco_pkts
= hdev
->sco_pkts
;
802 di
.link_policy
= hdev
->link_policy
;
803 di
.link_mode
= hdev
->link_mode
;
805 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
806 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
808 if (copy_to_user(arg
, &di
, sizeof(di
)))
816 /* ---- Interface to HCI drivers ---- */
818 static int hci_rfkill_set_block(void *data
, bool blocked
)
820 struct hci_dev
*hdev
= data
;
822 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
827 hci_dev_do_close(hdev
);
832 static const struct rfkill_ops hci_rfkill_ops
= {
833 .set_block
= hci_rfkill_set_block
,
836 /* Alloc HCI device */
837 struct hci_dev
*hci_alloc_dev(void)
839 struct hci_dev
*hdev
;
841 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
845 skb_queue_head_init(&hdev
->driver_init
);
849 EXPORT_SYMBOL(hci_alloc_dev
);
851 /* Free HCI device */
852 void hci_free_dev(struct hci_dev
*hdev
)
854 skb_queue_purge(&hdev
->driver_init
);
856 /* will free via device release */
857 put_device(&hdev
->dev
);
859 EXPORT_SYMBOL(hci_free_dev
);
861 /* Register HCI device */
862 int hci_register_dev(struct hci_dev
*hdev
)
864 struct list_head
*head
= &hci_dev_list
, *p
;
867 BT_DBG("%p name %s bus %d owner %p", hdev
, hdev
->name
,
868 hdev
->bus
, hdev
->owner
);
870 if (!hdev
->open
|| !hdev
->close
|| !hdev
->destruct
)
873 write_lock_bh(&hci_dev_list_lock
);
875 /* Find first available device id */
876 list_for_each(p
, &hci_dev_list
) {
877 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
882 sprintf(hdev
->name
, "hci%d", id
);
884 list_add(&hdev
->list
, head
);
886 atomic_set(&hdev
->refcnt
, 1);
887 spin_lock_init(&hdev
->lock
);
890 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
891 hdev
->esco_type
= (ESCO_HV1
);
892 hdev
->link_mode
= (HCI_LM_ACCEPT
);
894 hdev
->idle_timeout
= 0;
895 hdev
->sniff_max_interval
= 800;
896 hdev
->sniff_min_interval
= 80;
898 tasklet_init(&hdev
->cmd_task
, hci_cmd_task
,(unsigned long) hdev
);
899 tasklet_init(&hdev
->rx_task
, hci_rx_task
, (unsigned long) hdev
);
900 tasklet_init(&hdev
->tx_task
, hci_tx_task
, (unsigned long) hdev
);
902 skb_queue_head_init(&hdev
->rx_q
);
903 skb_queue_head_init(&hdev
->cmd_q
);
904 skb_queue_head_init(&hdev
->raw_q
);
906 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
907 hdev
->reassembly
[i
] = NULL
;
909 init_waitqueue_head(&hdev
->req_wait_q
);
910 mutex_init(&hdev
->req_lock
);
912 inquiry_cache_init(hdev
);
914 hci_conn_hash_init(hdev
);
916 INIT_LIST_HEAD(&hdev
->blacklist
);
918 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
920 atomic_set(&hdev
->promisc
, 0);
922 write_unlock_bh(&hci_dev_list_lock
);
924 hdev
->workqueue
= create_singlethread_workqueue(hdev
->name
);
925 if (!hdev
->workqueue
)
928 hci_register_sysfs(hdev
);
930 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
931 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
933 if (rfkill_register(hdev
->rfkill
) < 0) {
934 rfkill_destroy(hdev
->rfkill
);
939 hci_notify(hdev
, HCI_DEV_REG
);
944 write_lock_bh(&hci_dev_list_lock
);
945 list_del(&hdev
->list
);
946 write_unlock_bh(&hci_dev_list_lock
);
950 EXPORT_SYMBOL(hci_register_dev
);
952 /* Unregister HCI device */
953 int hci_unregister_dev(struct hci_dev
*hdev
)
957 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
959 write_lock_bh(&hci_dev_list_lock
);
960 list_del(&hdev
->list
);
961 write_unlock_bh(&hci_dev_list_lock
);
963 hci_dev_do_close(hdev
);
965 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
966 kfree_skb(hdev
->reassembly
[i
]);
968 hci_notify(hdev
, HCI_DEV_UNREG
);
971 rfkill_unregister(hdev
->rfkill
);
972 rfkill_destroy(hdev
->rfkill
);
975 hci_unregister_sysfs(hdev
);
977 destroy_workqueue(hdev
->workqueue
);
983 EXPORT_SYMBOL(hci_unregister_dev
);
985 /* Suspend HCI device */
986 int hci_suspend_dev(struct hci_dev
*hdev
)
988 hci_notify(hdev
, HCI_DEV_SUSPEND
);
991 EXPORT_SYMBOL(hci_suspend_dev
);
993 /* Resume HCI device */
994 int hci_resume_dev(struct hci_dev
*hdev
)
996 hci_notify(hdev
, HCI_DEV_RESUME
);
999 EXPORT_SYMBOL(hci_resume_dev
);
1001 /* Receive frame from HCI drivers */
1002 int hci_recv_frame(struct sk_buff
*skb
)
1004 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1005 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1006 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1012 bt_cb(skb
)->incoming
= 1;
1015 __net_timestamp(skb
);
1017 /* Queue frame for rx task */
1018 skb_queue_tail(&hdev
->rx_q
, skb
);
1019 tasklet_schedule(&hdev
->rx_task
);
1023 EXPORT_SYMBOL(hci_recv_frame
);
1025 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1026 int count
, __u8 index
, gfp_t gfp_mask
)
1031 struct sk_buff
*skb
;
1032 struct bt_skb_cb
*scb
;
1034 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1035 index
>= NUM_REASSEMBLY
)
1038 skb
= hdev
->reassembly
[index
];
1042 case HCI_ACLDATA_PKT
:
1043 len
= HCI_MAX_FRAME_SIZE
;
1044 hlen
= HCI_ACL_HDR_SIZE
;
1047 len
= HCI_MAX_EVENT_SIZE
;
1048 hlen
= HCI_EVENT_HDR_SIZE
;
1050 case HCI_SCODATA_PKT
:
1051 len
= HCI_MAX_SCO_SIZE
;
1052 hlen
= HCI_SCO_HDR_SIZE
;
1056 skb
= bt_skb_alloc(len
, gfp_mask
);
1060 scb
= (void *) skb
->cb
;
1062 scb
->pkt_type
= type
;
1064 skb
->dev
= (void *) hdev
;
1065 hdev
->reassembly
[index
] = skb
;
1069 scb
= (void *) skb
->cb
;
1070 len
= min(scb
->expect
, (__u16
)count
);
1072 memcpy(skb_put(skb
, len
), data
, len
);
1081 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1082 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1083 scb
->expect
= h
->plen
;
1085 if (skb_tailroom(skb
) < scb
->expect
) {
1087 hdev
->reassembly
[index
] = NULL
;
1093 case HCI_ACLDATA_PKT
:
1094 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1095 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1096 scb
->expect
= __le16_to_cpu(h
->dlen
);
1098 if (skb_tailroom(skb
) < scb
->expect
) {
1100 hdev
->reassembly
[index
] = NULL
;
1106 case HCI_SCODATA_PKT
:
1107 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1108 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1109 scb
->expect
= h
->dlen
;
1111 if (skb_tailroom(skb
) < scb
->expect
) {
1113 hdev
->reassembly
[index
] = NULL
;
1120 if (scb
->expect
== 0) {
1121 /* Complete frame */
1123 bt_cb(skb
)->pkt_type
= type
;
1124 hci_recv_frame(skb
);
1126 hdev
->reassembly
[index
] = NULL
;
1134 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1138 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1142 rem
= hci_reassembly(hdev
, type
, data
, count
,
1143 type
- 1, GFP_ATOMIC
);
1147 data
+= (count
- rem
);
1153 EXPORT_SYMBOL(hci_recv_fragment
);
1155 #define STREAM_REASSEMBLY 0
1157 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
1163 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
1166 struct { char type
; } *pkt
;
1168 /* Start of the frame */
1175 type
= bt_cb(skb
)->pkt_type
;
1177 rem
= hci_reassembly(hdev
, type
, data
,
1178 count
, STREAM_REASSEMBLY
, GFP_ATOMIC
);
1182 data
+= (count
- rem
);
1188 EXPORT_SYMBOL(hci_recv_stream_fragment
);
1190 /* ---- Interface to upper protocols ---- */
1192 /* Register/Unregister protocols.
1193 * hci_task_lock is used to ensure that no tasks are running. */
1194 int hci_register_proto(struct hci_proto
*hp
)
1198 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1200 if (hp
->id
>= HCI_MAX_PROTO
)
1203 write_lock_bh(&hci_task_lock
);
1205 if (!hci_proto
[hp
->id
])
1206 hci_proto
[hp
->id
] = hp
;
1210 write_unlock_bh(&hci_task_lock
);
1214 EXPORT_SYMBOL(hci_register_proto
);
1216 int hci_unregister_proto(struct hci_proto
*hp
)
1220 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1222 if (hp
->id
>= HCI_MAX_PROTO
)
1225 write_lock_bh(&hci_task_lock
);
1227 if (hci_proto
[hp
->id
])
1228 hci_proto
[hp
->id
] = NULL
;
1232 write_unlock_bh(&hci_task_lock
);
1236 EXPORT_SYMBOL(hci_unregister_proto
);
1238 int hci_register_cb(struct hci_cb
*cb
)
1240 BT_DBG("%p name %s", cb
, cb
->name
);
1242 write_lock_bh(&hci_cb_list_lock
);
1243 list_add(&cb
->list
, &hci_cb_list
);
1244 write_unlock_bh(&hci_cb_list_lock
);
1248 EXPORT_SYMBOL(hci_register_cb
);
1250 int hci_unregister_cb(struct hci_cb
*cb
)
1252 BT_DBG("%p name %s", cb
, cb
->name
);
1254 write_lock_bh(&hci_cb_list_lock
);
1255 list_del(&cb
->list
);
1256 write_unlock_bh(&hci_cb_list_lock
);
1260 EXPORT_SYMBOL(hci_unregister_cb
);
1262 static int hci_send_frame(struct sk_buff
*skb
)
1264 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1271 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
1273 if (atomic_read(&hdev
->promisc
)) {
1275 __net_timestamp(skb
);
1277 hci_send_to_sock(hdev
, skb
);
1280 /* Get rid of skb owner, prior to sending to the driver. */
1283 return hdev
->send(skb
);
1286 /* Send HCI command */
1287 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
1289 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
1290 struct hci_command_hdr
*hdr
;
1291 struct sk_buff
*skb
;
1293 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
1295 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1297 BT_ERR("%s no memory for command", hdev
->name
);
1301 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
1302 hdr
->opcode
= cpu_to_le16(opcode
);
1306 memcpy(skb_put(skb
, plen
), param
, plen
);
1308 BT_DBG("skb len %d", skb
->len
);
1310 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
1311 skb
->dev
= (void *) hdev
;
1313 skb_queue_tail(&hdev
->cmd_q
, skb
);
1314 tasklet_schedule(&hdev
->cmd_task
);
1319 /* Get data from the previously sent command */
1320 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
1322 struct hci_command_hdr
*hdr
;
1324 if (!hdev
->sent_cmd
)
1327 hdr
= (void *) hdev
->sent_cmd
->data
;
1329 if (hdr
->opcode
!= cpu_to_le16(opcode
))
1332 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
1334 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
1338 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
1340 struct hci_acl_hdr
*hdr
;
1343 skb_push(skb
, HCI_ACL_HDR_SIZE
);
1344 skb_reset_transport_header(skb
);
1345 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
1346 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
1347 hdr
->dlen
= cpu_to_le16(len
);
1350 void hci_send_acl(struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
)
1352 struct hci_dev
*hdev
= conn
->hdev
;
1353 struct sk_buff
*list
;
1355 BT_DBG("%s conn %p flags 0x%x", hdev
->name
, conn
, flags
);
1357 skb
->dev
= (void *) hdev
;
1358 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1359 hci_add_acl_hdr(skb
, conn
->handle
, flags
| ACL_START
);
1361 if (!(list
= skb_shinfo(skb
)->frag_list
)) {
1362 /* Non fragmented */
1363 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
1365 skb_queue_tail(&conn
->data_q
, skb
);
1368 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1370 skb_shinfo(skb
)->frag_list
= NULL
;
1372 /* Queue all fragments atomically */
1373 spin_lock_bh(&conn
->data_q
.lock
);
1375 __skb_queue_tail(&conn
->data_q
, skb
);
1377 skb
= list
; list
= list
->next
;
1379 skb
->dev
= (void *) hdev
;
1380 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1381 hci_add_acl_hdr(skb
, conn
->handle
, flags
| ACL_CONT
);
1383 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1385 __skb_queue_tail(&conn
->data_q
, skb
);
1388 spin_unlock_bh(&conn
->data_q
.lock
);
1391 tasklet_schedule(&hdev
->tx_task
);
1393 EXPORT_SYMBOL(hci_send_acl
);
1396 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
1398 struct hci_dev
*hdev
= conn
->hdev
;
1399 struct hci_sco_hdr hdr
;
1401 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
1403 hdr
.handle
= cpu_to_le16(conn
->handle
);
1404 hdr
.dlen
= skb
->len
;
1406 skb_push(skb
, HCI_SCO_HDR_SIZE
);
1407 skb_reset_transport_header(skb
);
1408 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
1410 skb
->dev
= (void *) hdev
;
1411 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
1413 skb_queue_tail(&conn
->data_q
, skb
);
1414 tasklet_schedule(&hdev
->tx_task
);
1416 EXPORT_SYMBOL(hci_send_sco
);
1418 /* ---- HCI TX task (outgoing data) ---- */
1420 /* HCI Connection scheduler */
1421 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
1423 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1424 struct hci_conn
*conn
= NULL
;
1425 int num
= 0, min
= ~0;
1426 struct list_head
*p
;
1428 /* We don't have to lock device here. Connections are always
1429 * added and removed with TX task disabled. */
1430 list_for_each(p
, &h
->list
) {
1432 c
= list_entry(p
, struct hci_conn
, list
);
1434 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
1437 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
1442 if (c
->sent
< min
) {
1449 int cnt
= (type
== ACL_LINK
? hdev
->acl_cnt
: hdev
->sco_cnt
);
1455 BT_DBG("conn %p quote %d", conn
, *quote
);
1459 static inline void hci_acl_tx_to(struct hci_dev
*hdev
)
1461 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1462 struct list_head
*p
;
1465 BT_ERR("%s ACL tx timeout", hdev
->name
);
1467 /* Kill stalled connections */
1468 list_for_each(p
, &h
->list
) {
1469 c
= list_entry(p
, struct hci_conn
, list
);
1470 if (c
->type
== ACL_LINK
&& c
->sent
) {
1471 BT_ERR("%s killing stalled ACL connection %s",
1472 hdev
->name
, batostr(&c
->dst
));
1473 hci_acl_disconn(c
, 0x13);
1478 static inline void hci_sched_acl(struct hci_dev
*hdev
)
1480 struct hci_conn
*conn
;
1481 struct sk_buff
*skb
;
1484 BT_DBG("%s", hdev
->name
);
1486 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
1487 /* ACL tx timeout must be longer than maximum
1488 * link supervision timeout (40.9 seconds) */
1489 if (!hdev
->acl_cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+ HZ
* 45))
1490 hci_acl_tx_to(hdev
);
1493 while (hdev
->acl_cnt
&& (conn
= hci_low_sent(hdev
, ACL_LINK
, "e
))) {
1494 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1495 BT_DBG("skb %p len %d", skb
, skb
->len
);
1497 hci_conn_enter_active_mode(conn
);
1499 hci_send_frame(skb
);
1500 hdev
->acl_last_tx
= jiffies
;
1509 static inline void hci_sched_sco(struct hci_dev
*hdev
)
1511 struct hci_conn
*conn
;
1512 struct sk_buff
*skb
;
1515 BT_DBG("%s", hdev
->name
);
1517 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
1518 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1519 BT_DBG("skb %p len %d", skb
, skb
->len
);
1520 hci_send_frame(skb
);
1523 if (conn
->sent
== ~0)
1529 static inline void hci_sched_esco(struct hci_dev
*hdev
)
1531 struct hci_conn
*conn
;
1532 struct sk_buff
*skb
;
1535 BT_DBG("%s", hdev
->name
);
1537 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
1538 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1539 BT_DBG("skb %p len %d", skb
, skb
->len
);
1540 hci_send_frame(skb
);
1543 if (conn
->sent
== ~0)
1549 static void hci_tx_task(unsigned long arg
)
1551 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1552 struct sk_buff
*skb
;
1554 read_lock(&hci_task_lock
);
1556 BT_DBG("%s acl %d sco %d", hdev
->name
, hdev
->acl_cnt
, hdev
->sco_cnt
);
1558 /* Schedule queues and send stuff to HCI driver */
1560 hci_sched_acl(hdev
);
1562 hci_sched_sco(hdev
);
1564 hci_sched_esco(hdev
);
1566 /* Send next queued raw (unknown type) packet */
1567 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
1568 hci_send_frame(skb
);
1570 read_unlock(&hci_task_lock
);
1573 /* ----- HCI RX task (incoming data proccessing) ----- */
1575 /* ACL data packet */
1576 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1578 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
1579 struct hci_conn
*conn
;
1580 __u16 handle
, flags
;
1582 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
1584 handle
= __le16_to_cpu(hdr
->handle
);
1585 flags
= hci_flags(handle
);
1586 handle
= hci_handle(handle
);
1588 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
1590 hdev
->stat
.acl_rx
++;
1593 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1594 hci_dev_unlock(hdev
);
1597 register struct hci_proto
*hp
;
1599 hci_conn_enter_active_mode(conn
);
1601 /* Send to upper protocol */
1602 if ((hp
= hci_proto
[HCI_PROTO_L2CAP
]) && hp
->recv_acldata
) {
1603 hp
->recv_acldata(conn
, skb
, flags
);
1607 BT_ERR("%s ACL packet for unknown connection handle %d",
1608 hdev
->name
, handle
);
1614 /* SCO data packet */
1615 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1617 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
1618 struct hci_conn
*conn
;
1621 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
1623 handle
= __le16_to_cpu(hdr
->handle
);
1625 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
1627 hdev
->stat
.sco_rx
++;
1630 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1631 hci_dev_unlock(hdev
);
1634 register struct hci_proto
*hp
;
1636 /* Send to upper protocol */
1637 if ((hp
= hci_proto
[HCI_PROTO_SCO
]) && hp
->recv_scodata
) {
1638 hp
->recv_scodata(conn
, skb
);
1642 BT_ERR("%s SCO packet for unknown connection handle %d",
1643 hdev
->name
, handle
);
1649 static void hci_rx_task(unsigned long arg
)
1651 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1652 struct sk_buff
*skb
;
1654 BT_DBG("%s", hdev
->name
);
1656 read_lock(&hci_task_lock
);
1658 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
1659 if (atomic_read(&hdev
->promisc
)) {
1660 /* Send copy to the sockets */
1661 hci_send_to_sock(hdev
, skb
);
1664 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
1669 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
1670 /* Don't process data packets in this states. */
1671 switch (bt_cb(skb
)->pkt_type
) {
1672 case HCI_ACLDATA_PKT
:
1673 case HCI_SCODATA_PKT
:
1680 switch (bt_cb(skb
)->pkt_type
) {
1682 hci_event_packet(hdev
, skb
);
1685 case HCI_ACLDATA_PKT
:
1686 BT_DBG("%s ACL data packet", hdev
->name
);
1687 hci_acldata_packet(hdev
, skb
);
1690 case HCI_SCODATA_PKT
:
1691 BT_DBG("%s SCO data packet", hdev
->name
);
1692 hci_scodata_packet(hdev
, skb
);
1701 read_unlock(&hci_task_lock
);
1704 static void hci_cmd_task(unsigned long arg
)
1706 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1707 struct sk_buff
*skb
;
1709 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
1711 if (!atomic_read(&hdev
->cmd_cnt
) && time_after(jiffies
, hdev
->cmd_last_tx
+ HZ
)) {
1712 BT_ERR("%s command tx timeout", hdev
->name
);
1713 atomic_set(&hdev
->cmd_cnt
, 1);
1716 /* Send queued commands */
1717 if (atomic_read(&hdev
->cmd_cnt
) && (skb
= skb_dequeue(&hdev
->cmd_q
))) {
1718 kfree_skb(hdev
->sent_cmd
);
1720 if ((hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
))) {
1721 atomic_dec(&hdev
->cmd_cnt
);
1722 hci_send_frame(skb
);
1723 hdev
->cmd_last_tx
= jiffies
;
1725 skb_queue_head(&hdev
->cmd_q
, skb
);
1726 tasklet_schedule(&hdev
->cmd_task
);