2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
46 #include <asm/system.h>
47 #include <asm/uaccess.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
53 static void hci_cmd_task(unsigned long arg
);
54 static void hci_rx_task(unsigned long arg
);
55 static void hci_tx_task(unsigned long arg
);
56 static void hci_notify(struct hci_dev
*hdev
, int event
);
58 static DEFINE_RWLOCK(hci_task_lock
);
61 LIST_HEAD(hci_dev_list
);
62 DEFINE_RWLOCK(hci_dev_list_lock
);
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list
);
66 DEFINE_RWLOCK(hci_cb_list_lock
);
69 #define HCI_MAX_PROTO 2
70 struct hci_proto
*hci_proto
[HCI_MAX_PROTO
];
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block
*nb
)
79 return atomic_notifier_chain_register(&hci_notifier
, nb
);
82 int hci_unregister_notifier(struct notifier_block
*nb
)
84 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
87 static void hci_notify(struct hci_dev
*hdev
, int event
)
89 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev
*hdev
, int result
)
96 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
98 if (hdev
->req_status
== HCI_REQ_PEND
) {
99 hdev
->req_result
= result
;
100 hdev
->req_status
= HCI_REQ_DONE
;
101 wake_up_interruptible(&hdev
->req_wait_q
);
105 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
107 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
109 if (hdev
->req_status
== HCI_REQ_PEND
) {
110 hdev
->req_result
= err
;
111 hdev
->req_status
= HCI_REQ_CANCELED
;
112 wake_up_interruptible(&hdev
->req_wait_q
);
116 /* Execute request and wait for completion. */
117 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
118 unsigned long opt
, __u32 timeout
)
120 DECLARE_WAITQUEUE(wait
, current
);
123 BT_DBG("%s start", hdev
->name
);
125 hdev
->req_status
= HCI_REQ_PEND
;
127 add_wait_queue(&hdev
->req_wait_q
, &wait
);
128 set_current_state(TASK_INTERRUPTIBLE
);
131 schedule_timeout(timeout
);
133 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
135 if (signal_pending(current
))
138 switch (hdev
->req_status
) {
140 err
= -bt_err(hdev
->req_result
);
143 case HCI_REQ_CANCELED
:
144 err
= -hdev
->req_result
;
152 hdev
->req_status
= hdev
->req_result
= 0;
154 BT_DBG("%s end: err %d", hdev
->name
, err
);
159 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
160 unsigned long opt
, __u32 timeout
)
164 if (!test_bit(HCI_UP
, &hdev
->flags
))
167 /* Serialize all requests */
169 ret
= __hci_request(hdev
, req
, opt
, timeout
);
170 hci_req_unlock(hdev
);
175 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
177 BT_DBG("%s %ld", hdev
->name
, opt
);
180 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
183 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
189 BT_DBG("%s %ld", hdev
->name
, opt
);
191 /* Driver initialization */
193 /* Special commands */
194 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
195 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
196 skb
->dev
= (void *) hdev
;
198 skb_queue_tail(&hdev
->cmd_q
, skb
);
199 tasklet_schedule(&hdev
->cmd_task
);
201 skb_queue_purge(&hdev
->driver_init
);
203 /* Mandatory initialization */
206 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
))
207 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
209 /* Read Local Supported Features */
210 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
212 /* Read Local Version */
213 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
215 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
219 /* Host buffer size */
221 struct hci_cp_host_buffer_size cp
;
222 cp
.acl_mtu
= cpu_to_le16(HCI_MAX_ACL_SIZE
);
223 cp
.sco_mtu
= HCI_MAX_SCO_SIZE
;
224 cp
.acl_max_pkt
= cpu_to_le16(0xffff);
225 cp
.sco_max_pkt
= cpu_to_le16(0xffff);
226 hci_send_cmd(hdev
, HCI_OP_HOST_BUFFER_SIZE
, sizeof(cp
), &cp
);
230 /* Read BD Address */
231 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
233 /* Read Class of Device */
234 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
236 /* Read Local Name */
237 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
239 /* Read Voice Setting */
240 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
242 /* Optional initialization */
244 /* Clear Event Filters */
245 flt_type
= HCI_FLT_CLEAR_ALL
;
246 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
248 /* Page timeout ~20 secs */
249 param
= cpu_to_le16(0x8000);
250 hci_send_cmd(hdev
, HCI_OP_WRITE_PG_TIMEOUT
, 2, ¶m
);
252 /* Connection accept timeout ~20 secs */
253 param
= cpu_to_le16(0x7d00);
254 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
257 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
261 BT_DBG("%s %x", hdev
->name
, scan
);
263 /* Inquiry and Page scans */
264 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
267 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
271 BT_DBG("%s %x", hdev
->name
, auth
);
274 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
277 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
281 BT_DBG("%s %x", hdev
->name
, encrypt
);
284 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
287 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
289 __le16 policy
= cpu_to_le16(opt
);
291 BT_DBG("%s %x", hdev
->name
, policy
);
293 /* Default link policy */
294 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
297 /* Get HCI device by index.
298 * Device is held on return. */
299 struct hci_dev
*hci_dev_get(int index
)
301 struct hci_dev
*hdev
= NULL
;
309 read_lock(&hci_dev_list_lock
);
310 list_for_each(p
, &hci_dev_list
) {
311 struct hci_dev
*d
= list_entry(p
, struct hci_dev
, list
);
312 if (d
->id
== index
) {
313 hdev
= hci_dev_hold(d
);
317 read_unlock(&hci_dev_list_lock
);
321 /* ---- Inquiry support ---- */
322 static void inquiry_cache_flush(struct hci_dev
*hdev
)
324 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
325 struct inquiry_entry
*next
= cache
->list
, *e
;
327 BT_DBG("cache %p", cache
);
336 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
338 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
339 struct inquiry_entry
*e
;
341 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
343 for (e
= cache
->list
; e
; e
= e
->next
)
344 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
349 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
)
351 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
352 struct inquiry_entry
*e
;
354 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
356 if (!(e
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
))) {
357 /* Entry not in the cache. Add new one. */
358 if (!(e
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
)))
360 e
->next
= cache
->list
;
364 memcpy(&e
->data
, data
, sizeof(*data
));
365 e
->timestamp
= jiffies
;
366 cache
->timestamp
= jiffies
;
369 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
371 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
372 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
373 struct inquiry_entry
*e
;
376 for (e
= cache
->list
; e
&& copied
< num
; e
= e
->next
, copied
++) {
377 struct inquiry_data
*data
= &e
->data
;
378 bacpy(&info
->bdaddr
, &data
->bdaddr
);
379 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
380 info
->pscan_period_mode
= data
->pscan_period_mode
;
381 info
->pscan_mode
= data
->pscan_mode
;
382 memcpy(info
->dev_class
, data
->dev_class
, 3);
383 info
->clock_offset
= data
->clock_offset
;
387 BT_DBG("cache %p, copied %d", cache
, copied
);
391 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
393 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
394 struct hci_cp_inquiry cp
;
396 BT_DBG("%s", hdev
->name
);
398 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
402 memcpy(&cp
.lap
, &ir
->lap
, 3);
403 cp
.length
= ir
->length
;
404 cp
.num_rsp
= ir
->num_rsp
;
405 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
408 int hci_inquiry(void __user
*arg
)
410 __u8 __user
*ptr
= arg
;
411 struct hci_inquiry_req ir
;
412 struct hci_dev
*hdev
;
413 int err
= 0, do_inquiry
= 0, max_rsp
;
417 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
420 if (!(hdev
= hci_dev_get(ir
.dev_id
)))
423 hci_dev_lock_bh(hdev
);
424 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
425 inquiry_cache_empty(hdev
) ||
426 ir
.flags
& IREQ_CACHE_FLUSH
) {
427 inquiry_cache_flush(hdev
);
430 hci_dev_unlock_bh(hdev
);
432 timeo
= ir
.length
* msecs_to_jiffies(2000);
433 if (do_inquiry
&& (err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
)) < 0)
436 /* for unlimited number of responses we will use buffer with 255 entries */
437 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
439 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
440 * copy it to the user space.
442 if (!(buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
))) {
447 hci_dev_lock_bh(hdev
);
448 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
449 hci_dev_unlock_bh(hdev
);
451 BT_DBG("num_rsp %d", ir
.num_rsp
);
453 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
455 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
468 /* ---- HCI ioctl helpers ---- */
470 int hci_dev_open(__u16 dev
)
472 struct hci_dev
*hdev
;
475 if (!(hdev
= hci_dev_get(dev
)))
478 BT_DBG("%s %p", hdev
->name
, hdev
);
482 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
487 if (test_bit(HCI_UP
, &hdev
->flags
)) {
492 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
493 set_bit(HCI_RAW
, &hdev
->flags
);
495 /* Treat all non BR/EDR controllers as raw devices for now */
496 if (hdev
->dev_type
!= HCI_BREDR
)
497 set_bit(HCI_RAW
, &hdev
->flags
);
499 if (hdev
->open(hdev
)) {
504 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
505 atomic_set(&hdev
->cmd_cnt
, 1);
506 set_bit(HCI_INIT
, &hdev
->flags
);
508 //__hci_request(hdev, hci_reset_req, 0, HZ);
509 ret
= __hci_request(hdev
, hci_init_req
, 0,
510 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
512 clear_bit(HCI_INIT
, &hdev
->flags
);
517 set_bit(HCI_UP
, &hdev
->flags
);
518 hci_notify(hdev
, HCI_DEV_UP
);
520 /* Init failed, cleanup */
521 tasklet_kill(&hdev
->rx_task
);
522 tasklet_kill(&hdev
->tx_task
);
523 tasklet_kill(&hdev
->cmd_task
);
525 skb_queue_purge(&hdev
->cmd_q
);
526 skb_queue_purge(&hdev
->rx_q
);
531 if (hdev
->sent_cmd
) {
532 kfree_skb(hdev
->sent_cmd
);
533 hdev
->sent_cmd
= NULL
;
541 hci_req_unlock(hdev
);
546 static int hci_dev_do_close(struct hci_dev
*hdev
)
548 BT_DBG("%s %p", hdev
->name
, hdev
);
550 hci_req_cancel(hdev
, ENODEV
);
553 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
554 hci_req_unlock(hdev
);
558 /* Kill RX and TX tasks */
559 tasklet_kill(&hdev
->rx_task
);
560 tasklet_kill(&hdev
->tx_task
);
562 hci_dev_lock_bh(hdev
);
563 inquiry_cache_flush(hdev
);
564 hci_conn_hash_flush(hdev
);
565 hci_dev_unlock_bh(hdev
);
567 hci_notify(hdev
, HCI_DEV_DOWN
);
573 skb_queue_purge(&hdev
->cmd_q
);
574 atomic_set(&hdev
->cmd_cnt
, 1);
575 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
576 set_bit(HCI_INIT
, &hdev
->flags
);
577 __hci_request(hdev
, hci_reset_req
, 0,
578 msecs_to_jiffies(250));
579 clear_bit(HCI_INIT
, &hdev
->flags
);
583 tasklet_kill(&hdev
->cmd_task
);
586 skb_queue_purge(&hdev
->rx_q
);
587 skb_queue_purge(&hdev
->cmd_q
);
588 skb_queue_purge(&hdev
->raw_q
);
590 /* Drop last sent command */
591 if (hdev
->sent_cmd
) {
592 kfree_skb(hdev
->sent_cmd
);
593 hdev
->sent_cmd
= NULL
;
596 /* After this point our queues are empty
597 * and no tasks are scheduled. */
603 hci_req_unlock(hdev
);
609 int hci_dev_close(__u16 dev
)
611 struct hci_dev
*hdev
;
614 if (!(hdev
= hci_dev_get(dev
)))
616 err
= hci_dev_do_close(hdev
);
621 int hci_dev_reset(__u16 dev
)
623 struct hci_dev
*hdev
;
626 if (!(hdev
= hci_dev_get(dev
)))
630 tasklet_disable(&hdev
->tx_task
);
632 if (!test_bit(HCI_UP
, &hdev
->flags
))
636 skb_queue_purge(&hdev
->rx_q
);
637 skb_queue_purge(&hdev
->cmd_q
);
639 hci_dev_lock_bh(hdev
);
640 inquiry_cache_flush(hdev
);
641 hci_conn_hash_flush(hdev
);
642 hci_dev_unlock_bh(hdev
);
647 atomic_set(&hdev
->cmd_cnt
, 1);
648 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0;
650 if (!test_bit(HCI_RAW
, &hdev
->flags
))
651 ret
= __hci_request(hdev
, hci_reset_req
, 0,
652 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
655 tasklet_enable(&hdev
->tx_task
);
656 hci_req_unlock(hdev
);
661 int hci_dev_reset_stat(__u16 dev
)
663 struct hci_dev
*hdev
;
666 if (!(hdev
= hci_dev_get(dev
)))
669 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
676 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
678 struct hci_dev
*hdev
;
679 struct hci_dev_req dr
;
682 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
685 if (!(hdev
= hci_dev_get(dr
.dev_id
)))
690 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
691 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
695 if (!lmp_encrypt_capable(hdev
)) {
700 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
701 /* Auth must be enabled first */
702 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
703 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
708 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
713 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
714 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
718 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
719 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
723 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
724 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
728 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
732 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
733 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
737 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
738 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
750 int hci_get_dev_list(void __user
*arg
)
752 struct hci_dev_list_req
*dl
;
753 struct hci_dev_req
*dr
;
755 int n
= 0, size
, err
;
758 if (get_user(dev_num
, (__u16 __user
*) arg
))
761 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
764 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
766 if (!(dl
= kzalloc(size
, GFP_KERNEL
)))
771 read_lock_bh(&hci_dev_list_lock
);
772 list_for_each(p
, &hci_dev_list
) {
773 struct hci_dev
*hdev
;
774 hdev
= list_entry(p
, struct hci_dev
, list
);
775 (dr
+ n
)->dev_id
= hdev
->id
;
776 (dr
+ n
)->dev_opt
= hdev
->flags
;
780 read_unlock_bh(&hci_dev_list_lock
);
783 size
= sizeof(*dl
) + n
* sizeof(*dr
);
785 err
= copy_to_user(arg
, dl
, size
);
788 return err
? -EFAULT
: 0;
791 int hci_get_dev_info(void __user
*arg
)
793 struct hci_dev
*hdev
;
794 struct hci_dev_info di
;
797 if (copy_from_user(&di
, arg
, sizeof(di
)))
800 if (!(hdev
= hci_dev_get(di
.dev_id
)))
803 strcpy(di
.name
, hdev
->name
);
804 di
.bdaddr
= hdev
->bdaddr
;
805 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
806 di
.flags
= hdev
->flags
;
807 di
.pkt_type
= hdev
->pkt_type
;
808 di
.acl_mtu
= hdev
->acl_mtu
;
809 di
.acl_pkts
= hdev
->acl_pkts
;
810 di
.sco_mtu
= hdev
->sco_mtu
;
811 di
.sco_pkts
= hdev
->sco_pkts
;
812 di
.link_policy
= hdev
->link_policy
;
813 di
.link_mode
= hdev
->link_mode
;
815 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
816 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
818 if (copy_to_user(arg
, &di
, sizeof(di
)))
826 /* ---- Interface to HCI drivers ---- */
828 static int hci_rfkill_set_block(void *data
, bool blocked
)
830 struct hci_dev
*hdev
= data
;
832 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
837 hci_dev_do_close(hdev
);
842 static const struct rfkill_ops hci_rfkill_ops
= {
843 .set_block
= hci_rfkill_set_block
,
846 /* Alloc HCI device */
847 struct hci_dev
*hci_alloc_dev(void)
849 struct hci_dev
*hdev
;
851 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
855 skb_queue_head_init(&hdev
->driver_init
);
859 EXPORT_SYMBOL(hci_alloc_dev
);
861 /* Free HCI device */
862 void hci_free_dev(struct hci_dev
*hdev
)
864 skb_queue_purge(&hdev
->driver_init
);
866 /* will free via device release */
867 put_device(&hdev
->dev
);
869 EXPORT_SYMBOL(hci_free_dev
);
871 /* Register HCI device */
872 int hci_register_dev(struct hci_dev
*hdev
)
874 struct list_head
*head
= &hci_dev_list
, *p
;
877 BT_DBG("%p name %s bus %d owner %p", hdev
, hdev
->name
,
878 hdev
->bus
, hdev
->owner
);
880 if (!hdev
->open
|| !hdev
->close
|| !hdev
->destruct
)
883 write_lock_bh(&hci_dev_list_lock
);
885 /* Find first available device id */
886 list_for_each(p
, &hci_dev_list
) {
887 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
892 sprintf(hdev
->name
, "hci%d", id
);
894 list_add(&hdev
->list
, head
);
896 atomic_set(&hdev
->refcnt
, 1);
897 spin_lock_init(&hdev
->lock
);
900 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
901 hdev
->esco_type
= (ESCO_HV1
);
902 hdev
->link_mode
= (HCI_LM_ACCEPT
);
904 hdev
->idle_timeout
= 0;
905 hdev
->sniff_max_interval
= 800;
906 hdev
->sniff_min_interval
= 80;
908 tasklet_init(&hdev
->cmd_task
, hci_cmd_task
,(unsigned long) hdev
);
909 tasklet_init(&hdev
->rx_task
, hci_rx_task
, (unsigned long) hdev
);
910 tasklet_init(&hdev
->tx_task
, hci_tx_task
, (unsigned long) hdev
);
912 skb_queue_head_init(&hdev
->rx_q
);
913 skb_queue_head_init(&hdev
->cmd_q
);
914 skb_queue_head_init(&hdev
->raw_q
);
916 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
917 hdev
->reassembly
[i
] = NULL
;
919 init_waitqueue_head(&hdev
->req_wait_q
);
920 mutex_init(&hdev
->req_lock
);
922 inquiry_cache_init(hdev
);
924 hci_conn_hash_init(hdev
);
926 INIT_LIST_HEAD(&hdev
->blacklist
);
928 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
930 atomic_set(&hdev
->promisc
, 0);
932 write_unlock_bh(&hci_dev_list_lock
);
934 hdev
->workqueue
= create_singlethread_workqueue(hdev
->name
);
935 if (!hdev
->workqueue
)
938 hci_register_sysfs(hdev
);
940 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
941 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
943 if (rfkill_register(hdev
->rfkill
) < 0) {
944 rfkill_destroy(hdev
->rfkill
);
949 hci_notify(hdev
, HCI_DEV_REG
);
954 write_lock_bh(&hci_dev_list_lock
);
955 list_del(&hdev
->list
);
956 write_unlock_bh(&hci_dev_list_lock
);
960 EXPORT_SYMBOL(hci_register_dev
);
962 /* Unregister HCI device */
963 int hci_unregister_dev(struct hci_dev
*hdev
)
967 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
969 write_lock_bh(&hci_dev_list_lock
);
970 list_del(&hdev
->list
);
971 write_unlock_bh(&hci_dev_list_lock
);
973 hci_dev_do_close(hdev
);
975 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
976 kfree_skb(hdev
->reassembly
[i
]);
978 hci_notify(hdev
, HCI_DEV_UNREG
);
981 rfkill_unregister(hdev
->rfkill
);
982 rfkill_destroy(hdev
->rfkill
);
985 hci_unregister_sysfs(hdev
);
987 destroy_workqueue(hdev
->workqueue
);
993 EXPORT_SYMBOL(hci_unregister_dev
);
995 /* Suspend HCI device */
996 int hci_suspend_dev(struct hci_dev
*hdev
)
998 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1001 EXPORT_SYMBOL(hci_suspend_dev
);
1003 /* Resume HCI device */
1004 int hci_resume_dev(struct hci_dev
*hdev
)
1006 hci_notify(hdev
, HCI_DEV_RESUME
);
1009 EXPORT_SYMBOL(hci_resume_dev
);
1011 /* Receive frame from HCI drivers */
1012 int hci_recv_frame(struct sk_buff
*skb
)
1014 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1015 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1016 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1022 bt_cb(skb
)->incoming
= 1;
1025 __net_timestamp(skb
);
1027 /* Queue frame for rx task */
1028 skb_queue_tail(&hdev
->rx_q
, skb
);
1029 tasklet_schedule(&hdev
->rx_task
);
1033 EXPORT_SYMBOL(hci_recv_frame
);
1035 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1036 int count
, __u8 index
, gfp_t gfp_mask
)
1041 struct sk_buff
*skb
;
1042 struct bt_skb_cb
*scb
;
1044 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1045 index
>= NUM_REASSEMBLY
)
1048 skb
= hdev
->reassembly
[index
];
1052 case HCI_ACLDATA_PKT
:
1053 len
= HCI_MAX_FRAME_SIZE
;
1054 hlen
= HCI_ACL_HDR_SIZE
;
1057 len
= HCI_MAX_EVENT_SIZE
;
1058 hlen
= HCI_EVENT_HDR_SIZE
;
1060 case HCI_SCODATA_PKT
:
1061 len
= HCI_MAX_SCO_SIZE
;
1062 hlen
= HCI_SCO_HDR_SIZE
;
1066 skb
= bt_skb_alloc(len
, gfp_mask
);
1070 scb
= (void *) skb
->cb
;
1072 scb
->pkt_type
= type
;
1074 skb
->dev
= (void *) hdev
;
1075 hdev
->reassembly
[index
] = skb
;
1079 scb
= (void *) skb
->cb
;
1080 len
= min(scb
->expect
, (__u16
)count
);
1082 memcpy(skb_put(skb
, len
), data
, len
);
1091 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1092 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1093 scb
->expect
= h
->plen
;
1095 if (skb_tailroom(skb
) < scb
->expect
) {
1097 hdev
->reassembly
[index
] = NULL
;
1103 case HCI_ACLDATA_PKT
:
1104 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1105 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1106 scb
->expect
= __le16_to_cpu(h
->dlen
);
1108 if (skb_tailroom(skb
) < scb
->expect
) {
1110 hdev
->reassembly
[index
] = NULL
;
1116 case HCI_SCODATA_PKT
:
1117 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1118 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1119 scb
->expect
= h
->dlen
;
1121 if (skb_tailroom(skb
) < scb
->expect
) {
1123 hdev
->reassembly
[index
] = NULL
;
1130 if (scb
->expect
== 0) {
1131 /* Complete frame */
1133 bt_cb(skb
)->pkt_type
= type
;
1134 hci_recv_frame(skb
);
1136 hdev
->reassembly
[index
] = NULL
;
1144 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1148 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1152 rem
= hci_reassembly(hdev
, type
, data
, count
,
1153 type
- 1, GFP_ATOMIC
);
1157 data
+= (count
- rem
);
1163 EXPORT_SYMBOL(hci_recv_fragment
);
1165 #define STREAM_REASSEMBLY 0
1167 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
1173 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
1176 struct { char type
; } *pkt
;
1178 /* Start of the frame */
1185 type
= bt_cb(skb
)->pkt_type
;
1187 rem
= hci_reassembly(hdev
, type
, data
,
1188 count
, STREAM_REASSEMBLY
, GFP_ATOMIC
);
1192 data
+= (count
- rem
);
1198 EXPORT_SYMBOL(hci_recv_stream_fragment
);
1200 /* ---- Interface to upper protocols ---- */
1202 /* Register/Unregister protocols.
1203 * hci_task_lock is used to ensure that no tasks are running. */
1204 int hci_register_proto(struct hci_proto
*hp
)
1208 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1210 if (hp
->id
>= HCI_MAX_PROTO
)
1213 write_lock_bh(&hci_task_lock
);
1215 if (!hci_proto
[hp
->id
])
1216 hci_proto
[hp
->id
] = hp
;
1220 write_unlock_bh(&hci_task_lock
);
1224 EXPORT_SYMBOL(hci_register_proto
);
1226 int hci_unregister_proto(struct hci_proto
*hp
)
1230 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1232 if (hp
->id
>= HCI_MAX_PROTO
)
1235 write_lock_bh(&hci_task_lock
);
1237 if (hci_proto
[hp
->id
])
1238 hci_proto
[hp
->id
] = NULL
;
1242 write_unlock_bh(&hci_task_lock
);
1246 EXPORT_SYMBOL(hci_unregister_proto
);
1248 int hci_register_cb(struct hci_cb
*cb
)
1250 BT_DBG("%p name %s", cb
, cb
->name
);
1252 write_lock_bh(&hci_cb_list_lock
);
1253 list_add(&cb
->list
, &hci_cb_list
);
1254 write_unlock_bh(&hci_cb_list_lock
);
1258 EXPORT_SYMBOL(hci_register_cb
);
1260 int hci_unregister_cb(struct hci_cb
*cb
)
1262 BT_DBG("%p name %s", cb
, cb
->name
);
1264 write_lock_bh(&hci_cb_list_lock
);
1265 list_del(&cb
->list
);
1266 write_unlock_bh(&hci_cb_list_lock
);
1270 EXPORT_SYMBOL(hci_unregister_cb
);
1272 static int hci_send_frame(struct sk_buff
*skb
)
1274 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1281 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
1283 if (atomic_read(&hdev
->promisc
)) {
1285 __net_timestamp(skb
);
1287 hci_send_to_sock(hdev
, skb
);
1290 /* Get rid of skb owner, prior to sending to the driver. */
1293 return hdev
->send(skb
);
1296 /* Send HCI command */
1297 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
1299 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
1300 struct hci_command_hdr
*hdr
;
1301 struct sk_buff
*skb
;
1303 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
1305 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1307 BT_ERR("%s no memory for command", hdev
->name
);
1311 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
1312 hdr
->opcode
= cpu_to_le16(opcode
);
1316 memcpy(skb_put(skb
, plen
), param
, plen
);
1318 BT_DBG("skb len %d", skb
->len
);
1320 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
1321 skb
->dev
= (void *) hdev
;
1323 skb_queue_tail(&hdev
->cmd_q
, skb
);
1324 tasklet_schedule(&hdev
->cmd_task
);
1329 /* Get data from the previously sent command */
1330 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
1332 struct hci_command_hdr
*hdr
;
1334 if (!hdev
->sent_cmd
)
1337 hdr
= (void *) hdev
->sent_cmd
->data
;
1339 if (hdr
->opcode
!= cpu_to_le16(opcode
))
1342 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
1344 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
1348 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
1350 struct hci_acl_hdr
*hdr
;
1353 skb_push(skb
, HCI_ACL_HDR_SIZE
);
1354 skb_reset_transport_header(skb
);
1355 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
1356 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
1357 hdr
->dlen
= cpu_to_le16(len
);
1360 void hci_send_acl(struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
)
1362 struct hci_dev
*hdev
= conn
->hdev
;
1363 struct sk_buff
*list
;
1365 BT_DBG("%s conn %p flags 0x%x", hdev
->name
, conn
, flags
);
1367 skb
->dev
= (void *) hdev
;
1368 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1369 hci_add_acl_hdr(skb
, conn
->handle
, flags
| ACL_START
);
1371 if (!(list
= skb_shinfo(skb
)->frag_list
)) {
1372 /* Non fragmented */
1373 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
1375 skb_queue_tail(&conn
->data_q
, skb
);
1378 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1380 skb_shinfo(skb
)->frag_list
= NULL
;
1382 /* Queue all fragments atomically */
1383 spin_lock_bh(&conn
->data_q
.lock
);
1385 __skb_queue_tail(&conn
->data_q
, skb
);
1387 skb
= list
; list
= list
->next
;
1389 skb
->dev
= (void *) hdev
;
1390 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1391 hci_add_acl_hdr(skb
, conn
->handle
, flags
| ACL_CONT
);
1393 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1395 __skb_queue_tail(&conn
->data_q
, skb
);
1398 spin_unlock_bh(&conn
->data_q
.lock
);
1401 tasklet_schedule(&hdev
->tx_task
);
1403 EXPORT_SYMBOL(hci_send_acl
);
1406 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
1408 struct hci_dev
*hdev
= conn
->hdev
;
1409 struct hci_sco_hdr hdr
;
1411 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
1413 hdr
.handle
= cpu_to_le16(conn
->handle
);
1414 hdr
.dlen
= skb
->len
;
1416 skb_push(skb
, HCI_SCO_HDR_SIZE
);
1417 skb_reset_transport_header(skb
);
1418 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
1420 skb
->dev
= (void *) hdev
;
1421 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
1423 skb_queue_tail(&conn
->data_q
, skb
);
1424 tasklet_schedule(&hdev
->tx_task
);
1426 EXPORT_SYMBOL(hci_send_sco
);
1428 /* ---- HCI TX task (outgoing data) ---- */
1430 /* HCI Connection scheduler */
1431 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
1433 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1434 struct hci_conn
*conn
= NULL
;
1435 int num
= 0, min
= ~0;
1436 struct list_head
*p
;
1438 /* We don't have to lock device here. Connections are always
1439 * added and removed with TX task disabled. */
1440 list_for_each(p
, &h
->list
) {
1442 c
= list_entry(p
, struct hci_conn
, list
);
1444 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
1447 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
1452 if (c
->sent
< min
) {
1459 int cnt
= (type
== ACL_LINK
? hdev
->acl_cnt
: hdev
->sco_cnt
);
1465 BT_DBG("conn %p quote %d", conn
, *quote
);
1469 static inline void hci_acl_tx_to(struct hci_dev
*hdev
)
1471 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1472 struct list_head
*p
;
1475 BT_ERR("%s ACL tx timeout", hdev
->name
);
1477 /* Kill stalled connections */
1478 list_for_each(p
, &h
->list
) {
1479 c
= list_entry(p
, struct hci_conn
, list
);
1480 if (c
->type
== ACL_LINK
&& c
->sent
) {
1481 BT_ERR("%s killing stalled ACL connection %s",
1482 hdev
->name
, batostr(&c
->dst
));
1483 hci_acl_disconn(c
, 0x13);
1488 static inline void hci_sched_acl(struct hci_dev
*hdev
)
1490 struct hci_conn
*conn
;
1491 struct sk_buff
*skb
;
1494 BT_DBG("%s", hdev
->name
);
1496 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
1497 /* ACL tx timeout must be longer than maximum
1498 * link supervision timeout (40.9 seconds) */
1499 if (!hdev
->acl_cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+ HZ
* 45))
1500 hci_acl_tx_to(hdev
);
1503 while (hdev
->acl_cnt
&& (conn
= hci_low_sent(hdev
, ACL_LINK
, "e
))) {
1504 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1505 BT_DBG("skb %p len %d", skb
, skb
->len
);
1507 hci_conn_enter_active_mode(conn
);
1509 hci_send_frame(skb
);
1510 hdev
->acl_last_tx
= jiffies
;
1519 static inline void hci_sched_sco(struct hci_dev
*hdev
)
1521 struct hci_conn
*conn
;
1522 struct sk_buff
*skb
;
1525 BT_DBG("%s", hdev
->name
);
1527 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
1528 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1529 BT_DBG("skb %p len %d", skb
, skb
->len
);
1530 hci_send_frame(skb
);
1533 if (conn
->sent
== ~0)
1539 static inline void hci_sched_esco(struct hci_dev
*hdev
)
1541 struct hci_conn
*conn
;
1542 struct sk_buff
*skb
;
1545 BT_DBG("%s", hdev
->name
);
1547 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
1548 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1549 BT_DBG("skb %p len %d", skb
, skb
->len
);
1550 hci_send_frame(skb
);
1553 if (conn
->sent
== ~0)
1559 static void hci_tx_task(unsigned long arg
)
1561 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1562 struct sk_buff
*skb
;
1564 read_lock(&hci_task_lock
);
1566 BT_DBG("%s acl %d sco %d", hdev
->name
, hdev
->acl_cnt
, hdev
->sco_cnt
);
1568 /* Schedule queues and send stuff to HCI driver */
1570 hci_sched_acl(hdev
);
1572 hci_sched_sco(hdev
);
1574 hci_sched_esco(hdev
);
1576 /* Send next queued raw (unknown type) packet */
1577 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
1578 hci_send_frame(skb
);
1580 read_unlock(&hci_task_lock
);
1583 /* ----- HCI RX task (incoming data proccessing) ----- */
1585 /* ACL data packet */
1586 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1588 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
1589 struct hci_conn
*conn
;
1590 __u16 handle
, flags
;
1592 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
1594 handle
= __le16_to_cpu(hdr
->handle
);
1595 flags
= hci_flags(handle
);
1596 handle
= hci_handle(handle
);
1598 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
1600 hdev
->stat
.acl_rx
++;
1603 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1604 hci_dev_unlock(hdev
);
1607 register struct hci_proto
*hp
;
1609 hci_conn_enter_active_mode(conn
);
1611 /* Send to upper protocol */
1612 if ((hp
= hci_proto
[HCI_PROTO_L2CAP
]) && hp
->recv_acldata
) {
1613 hp
->recv_acldata(conn
, skb
, flags
);
1617 BT_ERR("%s ACL packet for unknown connection handle %d",
1618 hdev
->name
, handle
);
1624 /* SCO data packet */
1625 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1627 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
1628 struct hci_conn
*conn
;
1631 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
1633 handle
= __le16_to_cpu(hdr
->handle
);
1635 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
1637 hdev
->stat
.sco_rx
++;
1640 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1641 hci_dev_unlock(hdev
);
1644 register struct hci_proto
*hp
;
1646 /* Send to upper protocol */
1647 if ((hp
= hci_proto
[HCI_PROTO_SCO
]) && hp
->recv_scodata
) {
1648 hp
->recv_scodata(conn
, skb
);
1652 BT_ERR("%s SCO packet for unknown connection handle %d",
1653 hdev
->name
, handle
);
1659 static void hci_rx_task(unsigned long arg
)
1661 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1662 struct sk_buff
*skb
;
1664 BT_DBG("%s", hdev
->name
);
1666 read_lock(&hci_task_lock
);
1668 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
1669 if (atomic_read(&hdev
->promisc
)) {
1670 /* Send copy to the sockets */
1671 hci_send_to_sock(hdev
, skb
);
1674 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
1679 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
1680 /* Don't process data packets in this states. */
1681 switch (bt_cb(skb
)->pkt_type
) {
1682 case HCI_ACLDATA_PKT
:
1683 case HCI_SCODATA_PKT
:
1690 switch (bt_cb(skb
)->pkt_type
) {
1692 hci_event_packet(hdev
, skb
);
1695 case HCI_ACLDATA_PKT
:
1696 BT_DBG("%s ACL data packet", hdev
->name
);
1697 hci_acldata_packet(hdev
, skb
);
1700 case HCI_SCODATA_PKT
:
1701 BT_DBG("%s SCO data packet", hdev
->name
);
1702 hci_scodata_packet(hdev
, skb
);
1711 read_unlock(&hci_task_lock
);
1714 static void hci_cmd_task(unsigned long arg
)
1716 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1717 struct sk_buff
*skb
;
1719 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
1721 if (!atomic_read(&hdev
->cmd_cnt
) && time_after(jiffies
, hdev
->cmd_last_tx
+ HZ
)) {
1722 BT_ERR("%s command tx timeout", hdev
->name
);
1723 atomic_set(&hdev
->cmd_cnt
, 1);
1726 /* Send queued commands */
1727 if (atomic_read(&hdev
->cmd_cnt
) && (skb
= skb_dequeue(&hdev
->cmd_q
))) {
1728 kfree_skb(hdev
->sent_cmd
);
1730 if ((hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
))) {
1731 atomic_dec(&hdev
->cmd_cnt
);
1732 hci_send_frame(skb
);
1733 hdev
->cmd_last_tx
= jiffies
;
1735 skb_queue_head(&hdev
->cmd_q
, skb
);
1736 tasklet_schedule(&hdev
->cmd_task
);