xtensa: fix compilation somewhat
[linux-2.6/mini2440.git] / net / bluetooth / hci_core.c
blobba78cc1eb8d9aaa8973c3a2395af86a98e71d4d6
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
42 #include <net/sock.h>
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
46 #include <asm/unaligned.h>
48 #include <net/bluetooth/bluetooth.h>
49 #include <net/bluetooth/hci_core.h>
51 static void hci_cmd_task(unsigned long arg);
52 static void hci_rx_task(unsigned long arg);
53 static void hci_tx_task(unsigned long arg);
54 static void hci_notify(struct hci_dev *hdev, int event);
56 static DEFINE_RWLOCK(hci_task_lock);
58 /* HCI device list */
59 LIST_HEAD(hci_dev_list);
60 DEFINE_RWLOCK(hci_dev_list_lock);
62 /* HCI callback list */
63 LIST_HEAD(hci_cb_list);
64 DEFINE_RWLOCK(hci_cb_list_lock);
66 /* HCI protocols */
67 #define HCI_MAX_PROTO 2
68 struct hci_proto *hci_proto[HCI_MAX_PROTO];
70 /* HCI notifiers list */
71 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
73 /* ---- HCI notifications ---- */
75 int hci_register_notifier(struct notifier_block *nb)
77 return atomic_notifier_chain_register(&hci_notifier, nb);
80 int hci_unregister_notifier(struct notifier_block *nb)
82 return atomic_notifier_chain_unregister(&hci_notifier, nb);
85 static void hci_notify(struct hci_dev *hdev, int event)
87 atomic_notifier_call_chain(&hci_notifier, event, hdev);
90 /* ---- HCI requests ---- */
92 void hci_req_complete(struct hci_dev *hdev, int result)
94 BT_DBG("%s result 0x%2.2x", hdev->name, result);
96 if (hdev->req_status == HCI_REQ_PEND) {
97 hdev->req_result = result;
98 hdev->req_status = HCI_REQ_DONE;
99 wake_up_interruptible(&hdev->req_wait_q);
103 static void hci_req_cancel(struct hci_dev *hdev, int err)
105 BT_DBG("%s err 0x%2.2x", hdev->name, err);
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = err;
109 hdev->req_status = HCI_REQ_CANCELED;
110 wake_up_interruptible(&hdev->req_wait_q);
114 /* Execute request and wait for completion. */
115 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
116 unsigned long opt, __u32 timeout)
118 DECLARE_WAITQUEUE(wait, current);
119 int err = 0;
121 BT_DBG("%s start", hdev->name);
123 hdev->req_status = HCI_REQ_PEND;
125 add_wait_queue(&hdev->req_wait_q, &wait);
126 set_current_state(TASK_INTERRUPTIBLE);
128 req(hdev, opt);
129 schedule_timeout(timeout);
131 remove_wait_queue(&hdev->req_wait_q, &wait);
133 if (signal_pending(current))
134 return -EINTR;
136 switch (hdev->req_status) {
137 case HCI_REQ_DONE:
138 err = -bt_err(hdev->req_result);
139 break;
141 case HCI_REQ_CANCELED:
142 err = -hdev->req_result;
143 break;
145 default:
146 err = -ETIMEDOUT;
147 break;
150 hdev->req_status = hdev->req_result = 0;
152 BT_DBG("%s end: err %d", hdev->name, err);
154 return err;
157 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
158 unsigned long opt, __u32 timeout)
160 int ret;
162 if (!test_bit(HCI_UP, &hdev->flags))
163 return -ENETDOWN;
165 /* Serialize all requests */
166 hci_req_lock(hdev);
167 ret = __hci_request(hdev, req, opt, timeout);
168 hci_req_unlock(hdev);
170 return ret;
173 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
175 BT_DBG("%s %ld", hdev->name, opt);
177 /* Reset device */
178 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
181 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
183 struct sk_buff *skb;
184 __le16 param;
185 __u8 flt_type;
187 BT_DBG("%s %ld", hdev->name, opt);
189 /* Driver initialization */
191 /* Special commands */
192 while ((skb = skb_dequeue(&hdev->driver_init))) {
193 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
194 skb->dev = (void *) hdev;
195 skb_queue_tail(&hdev->cmd_q, skb);
196 hci_sched_cmd(hdev);
198 skb_queue_purge(&hdev->driver_init);
200 /* Mandatory initialization */
202 /* Reset */
203 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
204 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209 /* Read Local Version */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
215 #if 0
216 /* Host buffer size */
218 struct hci_cp_host_buffer_size cp;
219 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
220 cp.sco_mtu = HCI_MAX_SCO_SIZE;
221 cp.acl_max_pkt = cpu_to_le16(0xffff);
222 cp.sco_max_pkt = cpu_to_le16(0xffff);
223 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
225 #endif
227 /* Read BD Address */
228 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
230 /* Read Class of Device */
231 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
233 /* Read Local Name */
234 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
236 /* Read Voice Setting */
237 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
239 /* Optional initialization */
241 /* Clear Event Filters */
242 flt_type = HCI_FLT_CLEAR_ALL;
243 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
245 /* Page timeout ~20 secs */
246 param = cpu_to_le16(0x8000);
247 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
249 /* Connection accept timeout ~20 secs */
250 param = cpu_to_le16(0x7d00);
251 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
254 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
256 __u8 scan = opt;
258 BT_DBG("%s %x", hdev->name, scan);
260 /* Inquiry and Page scans */
261 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
264 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
266 __u8 auth = opt;
268 BT_DBG("%s %x", hdev->name, auth);
270 /* Authentication */
271 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
274 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
276 __u8 encrypt = opt;
278 BT_DBG("%s %x", hdev->name, encrypt);
280 /* Encryption */
281 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
284 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
286 __le16 policy = cpu_to_le16(opt);
288 BT_DBG("%s %x", hdev->name, policy);
290 /* Default link policy */
291 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
294 /* Get HCI device by index.
295 * Device is held on return. */
296 struct hci_dev *hci_dev_get(int index)
298 struct hci_dev *hdev = NULL;
299 struct list_head *p;
301 BT_DBG("%d", index);
303 if (index < 0)
304 return NULL;
306 read_lock(&hci_dev_list_lock);
307 list_for_each(p, &hci_dev_list) {
308 struct hci_dev *d = list_entry(p, struct hci_dev, list);
309 if (d->id == index) {
310 hdev = hci_dev_hold(d);
311 break;
314 read_unlock(&hci_dev_list_lock);
315 return hdev;
318 /* ---- Inquiry support ---- */
319 static void inquiry_cache_flush(struct hci_dev *hdev)
321 struct inquiry_cache *cache = &hdev->inq_cache;
322 struct inquiry_entry *next = cache->list, *e;
324 BT_DBG("cache %p", cache);
326 cache->list = NULL;
327 while ((e = next)) {
328 next = e->next;
329 kfree(e);
333 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
335 struct inquiry_cache *cache = &hdev->inq_cache;
336 struct inquiry_entry *e;
338 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
340 for (e = cache->list; e; e = e->next)
341 if (!bacmp(&e->data.bdaddr, bdaddr))
342 break;
343 return e;
346 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
348 struct inquiry_cache *cache = &hdev->inq_cache;
349 struct inquiry_entry *e;
351 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
353 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
354 /* Entry not in the cache. Add new one. */
355 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
356 return;
357 e->next = cache->list;
358 cache->list = e;
361 memcpy(&e->data, data, sizeof(*data));
362 e->timestamp = jiffies;
363 cache->timestamp = jiffies;
366 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
368 struct inquiry_cache *cache = &hdev->inq_cache;
369 struct inquiry_info *info = (struct inquiry_info *) buf;
370 struct inquiry_entry *e;
371 int copied = 0;
373 for (e = cache->list; e && copied < num; e = e->next, copied++) {
374 struct inquiry_data *data = &e->data;
375 bacpy(&info->bdaddr, &data->bdaddr);
376 info->pscan_rep_mode = data->pscan_rep_mode;
377 info->pscan_period_mode = data->pscan_period_mode;
378 info->pscan_mode = data->pscan_mode;
379 memcpy(info->dev_class, data->dev_class, 3);
380 info->clock_offset = data->clock_offset;
381 info++;
384 BT_DBG("cache %p, copied %d", cache, copied);
385 return copied;
388 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
390 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
391 struct hci_cp_inquiry cp;
393 BT_DBG("%s", hdev->name);
395 if (test_bit(HCI_INQUIRY, &hdev->flags))
396 return;
398 /* Start Inquiry */
399 memcpy(&cp.lap, &ir->lap, 3);
400 cp.length = ir->length;
401 cp.num_rsp = ir->num_rsp;
402 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
405 int hci_inquiry(void __user *arg)
407 __u8 __user *ptr = arg;
408 struct hci_inquiry_req ir;
409 struct hci_dev *hdev;
410 int err = 0, do_inquiry = 0, max_rsp;
411 long timeo;
412 __u8 *buf;
414 if (copy_from_user(&ir, ptr, sizeof(ir)))
415 return -EFAULT;
417 if (!(hdev = hci_dev_get(ir.dev_id)))
418 return -ENODEV;
420 hci_dev_lock_bh(hdev);
421 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
422 inquiry_cache_empty(hdev) ||
423 ir.flags & IREQ_CACHE_FLUSH) {
424 inquiry_cache_flush(hdev);
425 do_inquiry = 1;
427 hci_dev_unlock_bh(hdev);
429 timeo = ir.length * msecs_to_jiffies(2000);
430 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
431 goto done;
433 /* for unlimited number of responses we will use buffer with 255 entries */
434 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
436 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
437 * copy it to the user space.
439 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
440 err = -ENOMEM;
441 goto done;
444 hci_dev_lock_bh(hdev);
445 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
446 hci_dev_unlock_bh(hdev);
448 BT_DBG("num_rsp %d", ir.num_rsp);
450 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
451 ptr += sizeof(ir);
452 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
453 ir.num_rsp))
454 err = -EFAULT;
455 } else
456 err = -EFAULT;
458 kfree(buf);
460 done:
461 hci_dev_put(hdev);
462 return err;
465 /* ---- HCI ioctl helpers ---- */
467 int hci_dev_open(__u16 dev)
469 struct hci_dev *hdev;
470 int ret = 0;
472 if (!(hdev = hci_dev_get(dev)))
473 return -ENODEV;
475 BT_DBG("%s %p", hdev->name, hdev);
477 hci_req_lock(hdev);
479 if (test_bit(HCI_UP, &hdev->flags)) {
480 ret = -EALREADY;
481 goto done;
484 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
485 set_bit(HCI_RAW, &hdev->flags);
487 if (hdev->open(hdev)) {
488 ret = -EIO;
489 goto done;
492 if (!test_bit(HCI_RAW, &hdev->flags)) {
493 atomic_set(&hdev->cmd_cnt, 1);
494 set_bit(HCI_INIT, &hdev->flags);
496 //__hci_request(hdev, hci_reset_req, 0, HZ);
497 ret = __hci_request(hdev, hci_init_req, 0,
498 msecs_to_jiffies(HCI_INIT_TIMEOUT));
500 clear_bit(HCI_INIT, &hdev->flags);
503 if (!ret) {
504 hci_dev_hold(hdev);
505 set_bit(HCI_UP, &hdev->flags);
506 hci_notify(hdev, HCI_DEV_UP);
507 } else {
508 /* Init failed, cleanup */
509 tasklet_kill(&hdev->rx_task);
510 tasklet_kill(&hdev->tx_task);
511 tasklet_kill(&hdev->cmd_task);
513 skb_queue_purge(&hdev->cmd_q);
514 skb_queue_purge(&hdev->rx_q);
516 if (hdev->flush)
517 hdev->flush(hdev);
519 if (hdev->sent_cmd) {
520 kfree_skb(hdev->sent_cmd);
521 hdev->sent_cmd = NULL;
524 hdev->close(hdev);
525 hdev->flags = 0;
528 done:
529 hci_req_unlock(hdev);
530 hci_dev_put(hdev);
531 return ret;
534 static int hci_dev_do_close(struct hci_dev *hdev)
536 BT_DBG("%s %p", hdev->name, hdev);
538 hci_req_cancel(hdev, ENODEV);
539 hci_req_lock(hdev);
541 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
542 hci_req_unlock(hdev);
543 return 0;
546 /* Kill RX and TX tasks */
547 tasklet_kill(&hdev->rx_task);
548 tasklet_kill(&hdev->tx_task);
550 hci_dev_lock_bh(hdev);
551 inquiry_cache_flush(hdev);
552 hci_conn_hash_flush(hdev);
553 hci_dev_unlock_bh(hdev);
555 hci_notify(hdev, HCI_DEV_DOWN);
557 if (hdev->flush)
558 hdev->flush(hdev);
560 /* Reset device */
561 skb_queue_purge(&hdev->cmd_q);
562 atomic_set(&hdev->cmd_cnt, 1);
563 if (!test_bit(HCI_RAW, &hdev->flags)) {
564 set_bit(HCI_INIT, &hdev->flags);
565 __hci_request(hdev, hci_reset_req, 0,
566 msecs_to_jiffies(250));
567 clear_bit(HCI_INIT, &hdev->flags);
570 /* Kill cmd task */
571 tasklet_kill(&hdev->cmd_task);
573 /* Drop queues */
574 skb_queue_purge(&hdev->rx_q);
575 skb_queue_purge(&hdev->cmd_q);
576 skb_queue_purge(&hdev->raw_q);
578 /* Drop last sent command */
579 if (hdev->sent_cmd) {
580 kfree_skb(hdev->sent_cmd);
581 hdev->sent_cmd = NULL;
584 /* After this point our queues are empty
585 * and no tasks are scheduled. */
586 hdev->close(hdev);
588 /* Clear flags */
589 hdev->flags = 0;
591 hci_req_unlock(hdev);
593 hci_dev_put(hdev);
594 return 0;
597 int hci_dev_close(__u16 dev)
599 struct hci_dev *hdev;
600 int err;
602 if (!(hdev = hci_dev_get(dev)))
603 return -ENODEV;
604 err = hci_dev_do_close(hdev);
605 hci_dev_put(hdev);
606 return err;
609 int hci_dev_reset(__u16 dev)
611 struct hci_dev *hdev;
612 int ret = 0;
614 if (!(hdev = hci_dev_get(dev)))
615 return -ENODEV;
617 hci_req_lock(hdev);
618 tasklet_disable(&hdev->tx_task);
620 if (!test_bit(HCI_UP, &hdev->flags))
621 goto done;
623 /* Drop queues */
624 skb_queue_purge(&hdev->rx_q);
625 skb_queue_purge(&hdev->cmd_q);
627 hci_dev_lock_bh(hdev);
628 inquiry_cache_flush(hdev);
629 hci_conn_hash_flush(hdev);
630 hci_dev_unlock_bh(hdev);
632 if (hdev->flush)
633 hdev->flush(hdev);
635 atomic_set(&hdev->cmd_cnt, 1);
636 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
638 if (!test_bit(HCI_RAW, &hdev->flags))
639 ret = __hci_request(hdev, hci_reset_req, 0,
640 msecs_to_jiffies(HCI_INIT_TIMEOUT));
642 done:
643 tasklet_enable(&hdev->tx_task);
644 hci_req_unlock(hdev);
645 hci_dev_put(hdev);
646 return ret;
649 int hci_dev_reset_stat(__u16 dev)
651 struct hci_dev *hdev;
652 int ret = 0;
654 if (!(hdev = hci_dev_get(dev)))
655 return -ENODEV;
657 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
659 hci_dev_put(hdev);
661 return ret;
664 int hci_dev_cmd(unsigned int cmd, void __user *arg)
666 struct hci_dev *hdev;
667 struct hci_dev_req dr;
668 int err = 0;
670 if (copy_from_user(&dr, arg, sizeof(dr)))
671 return -EFAULT;
673 if (!(hdev = hci_dev_get(dr.dev_id)))
674 return -ENODEV;
676 switch (cmd) {
677 case HCISETAUTH:
678 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
679 msecs_to_jiffies(HCI_INIT_TIMEOUT));
680 break;
682 case HCISETENCRYPT:
683 if (!lmp_encrypt_capable(hdev)) {
684 err = -EOPNOTSUPP;
685 break;
688 if (!test_bit(HCI_AUTH, &hdev->flags)) {
689 /* Auth must be enabled first */
690 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
691 msecs_to_jiffies(HCI_INIT_TIMEOUT));
692 if (err)
693 break;
696 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
697 msecs_to_jiffies(HCI_INIT_TIMEOUT));
698 break;
700 case HCISETSCAN:
701 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
702 msecs_to_jiffies(HCI_INIT_TIMEOUT));
703 break;
705 case HCISETLINKPOL:
706 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
707 msecs_to_jiffies(HCI_INIT_TIMEOUT));
708 break;
710 case HCISETLINKMODE:
711 hdev->link_mode = ((__u16) dr.dev_opt) &
712 (HCI_LM_MASTER | HCI_LM_ACCEPT);
713 break;
715 case HCISETPTYPE:
716 hdev->pkt_type = (__u16) dr.dev_opt;
717 break;
719 case HCISETACLMTU:
720 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
721 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
722 break;
724 case HCISETSCOMTU:
725 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
726 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
727 break;
729 default:
730 err = -EINVAL;
731 break;
734 hci_dev_put(hdev);
735 return err;
738 int hci_get_dev_list(void __user *arg)
740 struct hci_dev_list_req *dl;
741 struct hci_dev_req *dr;
742 struct list_head *p;
743 int n = 0, size, err;
744 __u16 dev_num;
746 if (get_user(dev_num, (__u16 __user *) arg))
747 return -EFAULT;
749 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
750 return -EINVAL;
752 size = sizeof(*dl) + dev_num * sizeof(*dr);
754 if (!(dl = kzalloc(size, GFP_KERNEL)))
755 return -ENOMEM;
757 dr = dl->dev_req;
759 read_lock_bh(&hci_dev_list_lock);
760 list_for_each(p, &hci_dev_list) {
761 struct hci_dev *hdev;
762 hdev = list_entry(p, struct hci_dev, list);
763 (dr + n)->dev_id = hdev->id;
764 (dr + n)->dev_opt = hdev->flags;
765 if (++n >= dev_num)
766 break;
768 read_unlock_bh(&hci_dev_list_lock);
770 dl->dev_num = n;
771 size = sizeof(*dl) + n * sizeof(*dr);
773 err = copy_to_user(arg, dl, size);
774 kfree(dl);
776 return err ? -EFAULT : 0;
779 int hci_get_dev_info(void __user *arg)
781 struct hci_dev *hdev;
782 struct hci_dev_info di;
783 int err = 0;
785 if (copy_from_user(&di, arg, sizeof(di)))
786 return -EFAULT;
788 if (!(hdev = hci_dev_get(di.dev_id)))
789 return -ENODEV;
791 strcpy(di.name, hdev->name);
792 di.bdaddr = hdev->bdaddr;
793 di.type = hdev->type;
794 di.flags = hdev->flags;
795 di.pkt_type = hdev->pkt_type;
796 di.acl_mtu = hdev->acl_mtu;
797 di.acl_pkts = hdev->acl_pkts;
798 di.sco_mtu = hdev->sco_mtu;
799 di.sco_pkts = hdev->sco_pkts;
800 di.link_policy = hdev->link_policy;
801 di.link_mode = hdev->link_mode;
803 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
804 memcpy(&di.features, &hdev->features, sizeof(di.features));
806 if (copy_to_user(arg, &di, sizeof(di)))
807 err = -EFAULT;
809 hci_dev_put(hdev);
811 return err;
814 /* ---- Interface to HCI drivers ---- */
816 /* Alloc HCI device */
817 struct hci_dev *hci_alloc_dev(void)
819 struct hci_dev *hdev;
821 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
822 if (!hdev)
823 return NULL;
825 skb_queue_head_init(&hdev->driver_init);
827 return hdev;
829 EXPORT_SYMBOL(hci_alloc_dev);
831 /* Free HCI device */
832 void hci_free_dev(struct hci_dev *hdev)
834 skb_queue_purge(&hdev->driver_init);
836 /* will free via device release */
837 put_device(&hdev->dev);
839 EXPORT_SYMBOL(hci_free_dev);
841 /* Register HCI device */
842 int hci_register_dev(struct hci_dev *hdev)
844 struct list_head *head = &hci_dev_list, *p;
845 int i, id = 0;
847 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
849 if (!hdev->open || !hdev->close || !hdev->destruct)
850 return -EINVAL;
852 write_lock_bh(&hci_dev_list_lock);
854 /* Find first available device id */
855 list_for_each(p, &hci_dev_list) {
856 if (list_entry(p, struct hci_dev, list)->id != id)
857 break;
858 head = p; id++;
861 sprintf(hdev->name, "hci%d", id);
862 hdev->id = id;
863 list_add(&hdev->list, head);
865 atomic_set(&hdev->refcnt, 1);
866 spin_lock_init(&hdev->lock);
868 hdev->flags = 0;
869 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
870 hdev->esco_type = (ESCO_HV1);
871 hdev->link_mode = (HCI_LM_ACCEPT);
873 hdev->idle_timeout = 0;
874 hdev->sniff_max_interval = 800;
875 hdev->sniff_min_interval = 80;
877 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
878 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
879 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
881 skb_queue_head_init(&hdev->rx_q);
882 skb_queue_head_init(&hdev->cmd_q);
883 skb_queue_head_init(&hdev->raw_q);
885 for (i = 0; i < 3; i++)
886 hdev->reassembly[i] = NULL;
888 init_waitqueue_head(&hdev->req_wait_q);
889 init_MUTEX(&hdev->req_lock);
891 inquiry_cache_init(hdev);
893 hci_conn_hash_init(hdev);
895 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
897 atomic_set(&hdev->promisc, 0);
899 write_unlock_bh(&hci_dev_list_lock);
901 hci_register_sysfs(hdev);
903 hci_notify(hdev, HCI_DEV_REG);
905 return id;
907 EXPORT_SYMBOL(hci_register_dev);
909 /* Unregister HCI device */
910 int hci_unregister_dev(struct hci_dev *hdev)
912 int i;
914 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
916 write_lock_bh(&hci_dev_list_lock);
917 list_del(&hdev->list);
918 write_unlock_bh(&hci_dev_list_lock);
920 hci_dev_do_close(hdev);
922 for (i = 0; i < 3; i++)
923 kfree_skb(hdev->reassembly[i]);
925 hci_notify(hdev, HCI_DEV_UNREG);
927 hci_unregister_sysfs(hdev);
929 __hci_dev_put(hdev);
931 return 0;
933 EXPORT_SYMBOL(hci_unregister_dev);
935 /* Suspend HCI device */
936 int hci_suspend_dev(struct hci_dev *hdev)
938 hci_notify(hdev, HCI_DEV_SUSPEND);
939 return 0;
941 EXPORT_SYMBOL(hci_suspend_dev);
943 /* Resume HCI device */
944 int hci_resume_dev(struct hci_dev *hdev)
946 hci_notify(hdev, HCI_DEV_RESUME);
947 return 0;
949 EXPORT_SYMBOL(hci_resume_dev);
951 /* Receive packet type fragment */
952 #define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
954 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
956 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
957 return -EILSEQ;
959 while (count) {
960 struct sk_buff *skb = __reassembly(hdev, type);
961 struct { int expect; } *scb;
962 int len = 0;
964 if (!skb) {
965 /* Start of the frame */
967 switch (type) {
968 case HCI_EVENT_PKT:
969 if (count >= HCI_EVENT_HDR_SIZE) {
970 struct hci_event_hdr *h = data;
971 len = HCI_EVENT_HDR_SIZE + h->plen;
972 } else
973 return -EILSEQ;
974 break;
976 case HCI_ACLDATA_PKT:
977 if (count >= HCI_ACL_HDR_SIZE) {
978 struct hci_acl_hdr *h = data;
979 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
980 } else
981 return -EILSEQ;
982 break;
984 case HCI_SCODATA_PKT:
985 if (count >= HCI_SCO_HDR_SIZE) {
986 struct hci_sco_hdr *h = data;
987 len = HCI_SCO_HDR_SIZE + h->dlen;
988 } else
989 return -EILSEQ;
990 break;
993 skb = bt_skb_alloc(len, GFP_ATOMIC);
994 if (!skb) {
995 BT_ERR("%s no memory for packet", hdev->name);
996 return -ENOMEM;
999 skb->dev = (void *) hdev;
1000 bt_cb(skb)->pkt_type = type;
1002 __reassembly(hdev, type) = skb;
1004 scb = (void *) skb->cb;
1005 scb->expect = len;
1006 } else {
1007 /* Continuation */
1009 scb = (void *) skb->cb;
1010 len = scb->expect;
1013 len = min(len, count);
1015 memcpy(skb_put(skb, len), data, len);
1017 scb->expect -= len;
1019 if (scb->expect == 0) {
1020 /* Complete frame */
1022 __reassembly(hdev, type) = NULL;
1024 bt_cb(skb)->pkt_type = type;
1025 hci_recv_frame(skb);
1028 count -= len; data += len;
1031 return 0;
1033 EXPORT_SYMBOL(hci_recv_fragment);
1035 /* ---- Interface to upper protocols ---- */
1037 /* Register/Unregister protocols.
1038 * hci_task_lock is used to ensure that no tasks are running. */
1039 int hci_register_proto(struct hci_proto *hp)
1041 int err = 0;
1043 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1045 if (hp->id >= HCI_MAX_PROTO)
1046 return -EINVAL;
1048 write_lock_bh(&hci_task_lock);
1050 if (!hci_proto[hp->id])
1051 hci_proto[hp->id] = hp;
1052 else
1053 err = -EEXIST;
1055 write_unlock_bh(&hci_task_lock);
1057 return err;
1059 EXPORT_SYMBOL(hci_register_proto);
1061 int hci_unregister_proto(struct hci_proto *hp)
1063 int err = 0;
1065 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1067 if (hp->id >= HCI_MAX_PROTO)
1068 return -EINVAL;
1070 write_lock_bh(&hci_task_lock);
1072 if (hci_proto[hp->id])
1073 hci_proto[hp->id] = NULL;
1074 else
1075 err = -ENOENT;
1077 write_unlock_bh(&hci_task_lock);
1079 return err;
1081 EXPORT_SYMBOL(hci_unregister_proto);
1083 int hci_register_cb(struct hci_cb *cb)
1085 BT_DBG("%p name %s", cb, cb->name);
1087 write_lock_bh(&hci_cb_list_lock);
1088 list_add(&cb->list, &hci_cb_list);
1089 write_unlock_bh(&hci_cb_list_lock);
1091 return 0;
1093 EXPORT_SYMBOL(hci_register_cb);
1095 int hci_unregister_cb(struct hci_cb *cb)
1097 BT_DBG("%p name %s", cb, cb->name);
1099 write_lock_bh(&hci_cb_list_lock);
1100 list_del(&cb->list);
1101 write_unlock_bh(&hci_cb_list_lock);
1103 return 0;
1105 EXPORT_SYMBOL(hci_unregister_cb);
1107 static int hci_send_frame(struct sk_buff *skb)
1109 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1111 if (!hdev) {
1112 kfree_skb(skb);
1113 return -ENODEV;
1116 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1118 if (atomic_read(&hdev->promisc)) {
1119 /* Time stamp */
1120 __net_timestamp(skb);
1122 hci_send_to_sock(hdev, skb);
1125 /* Get rid of skb owner, prior to sending to the driver. */
1126 skb_orphan(skb);
1128 return hdev->send(skb);
1131 /* Send HCI command */
1132 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1134 int len = HCI_COMMAND_HDR_SIZE + plen;
1135 struct hci_command_hdr *hdr;
1136 struct sk_buff *skb;
1138 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1140 skb = bt_skb_alloc(len, GFP_ATOMIC);
1141 if (!skb) {
1142 BT_ERR("%s no memory for command", hdev->name);
1143 return -ENOMEM;
1146 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1147 hdr->opcode = cpu_to_le16(opcode);
1148 hdr->plen = plen;
1150 if (plen)
1151 memcpy(skb_put(skb, plen), param, plen);
1153 BT_DBG("skb len %d", skb->len);
1155 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1156 skb->dev = (void *) hdev;
1157 skb_queue_tail(&hdev->cmd_q, skb);
1158 hci_sched_cmd(hdev);
1160 return 0;
1163 /* Get data from the previously sent command */
1164 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1166 struct hci_command_hdr *hdr;
1168 if (!hdev->sent_cmd)
1169 return NULL;
1171 hdr = (void *) hdev->sent_cmd->data;
1173 if (hdr->opcode != cpu_to_le16(opcode))
1174 return NULL;
1176 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1178 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1181 /* Send ACL data */
1182 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1184 struct hci_acl_hdr *hdr;
1185 int len = skb->len;
1187 skb_push(skb, HCI_ACL_HDR_SIZE);
1188 skb_reset_transport_header(skb);
1189 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1190 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1191 hdr->dlen = cpu_to_le16(len);
1194 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1196 struct hci_dev *hdev = conn->hdev;
1197 struct sk_buff *list;
1199 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1201 skb->dev = (void *) hdev;
1202 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1203 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1205 if (!(list = skb_shinfo(skb)->frag_list)) {
1206 /* Non fragmented */
1207 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1209 skb_queue_tail(&conn->data_q, skb);
1210 } else {
1211 /* Fragmented */
1212 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1214 skb_shinfo(skb)->frag_list = NULL;
1216 /* Queue all fragments atomically */
1217 spin_lock_bh(&conn->data_q.lock);
1219 __skb_queue_tail(&conn->data_q, skb);
1220 do {
1221 skb = list; list = list->next;
1223 skb->dev = (void *) hdev;
1224 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1225 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1227 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1229 __skb_queue_tail(&conn->data_q, skb);
1230 } while (list);
1232 spin_unlock_bh(&conn->data_q.lock);
1235 hci_sched_tx(hdev);
1236 return 0;
1238 EXPORT_SYMBOL(hci_send_acl);
1240 /* Send SCO data */
1241 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1243 struct hci_dev *hdev = conn->hdev;
1244 struct hci_sco_hdr hdr;
1246 BT_DBG("%s len %d", hdev->name, skb->len);
1248 if (skb->len > hdev->sco_mtu) {
1249 kfree_skb(skb);
1250 return -EINVAL;
1253 hdr.handle = cpu_to_le16(conn->handle);
1254 hdr.dlen = skb->len;
1256 skb_push(skb, HCI_SCO_HDR_SIZE);
1257 skb_reset_transport_header(skb);
1258 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1260 skb->dev = (void *) hdev;
1261 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1262 skb_queue_tail(&conn->data_q, skb);
1263 hci_sched_tx(hdev);
1264 return 0;
1266 EXPORT_SYMBOL(hci_send_sco);
1268 /* ---- HCI TX task (outgoing data) ---- */
1270 /* HCI Connection scheduler */
1271 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1273 struct hci_conn_hash *h = &hdev->conn_hash;
1274 struct hci_conn *conn = NULL;
1275 int num = 0, min = ~0;
1276 struct list_head *p;
1278 /* We don't have to lock device here. Connections are always
1279 * added and removed with TX task disabled. */
1280 list_for_each(p, &h->list) {
1281 struct hci_conn *c;
1282 c = list_entry(p, struct hci_conn, list);
1284 if (c->type != type || skb_queue_empty(&c->data_q))
1285 continue;
1287 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1288 continue;
1290 num++;
1292 if (c->sent < min) {
1293 min = c->sent;
1294 conn = c;
1298 if (conn) {
1299 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1300 int q = cnt / num;
1301 *quote = q ? q : 1;
1302 } else
1303 *quote = 0;
1305 BT_DBG("conn %p quote %d", conn, *quote);
1306 return conn;
1309 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1311 struct hci_conn_hash *h = &hdev->conn_hash;
1312 struct list_head *p;
1313 struct hci_conn *c;
1315 BT_ERR("%s ACL tx timeout", hdev->name);
1317 /* Kill stalled connections */
1318 list_for_each(p, &h->list) {
1319 c = list_entry(p, struct hci_conn, list);
1320 if (c->type == ACL_LINK && c->sent) {
1321 BT_ERR("%s killing stalled ACL connection %s",
1322 hdev->name, batostr(&c->dst));
1323 hci_acl_disconn(c, 0x13);
1328 static inline void hci_sched_acl(struct hci_dev *hdev)
1330 struct hci_conn *conn;
1331 struct sk_buff *skb;
1332 int quote;
1334 BT_DBG("%s", hdev->name);
1336 if (!test_bit(HCI_RAW, &hdev->flags)) {
1337 /* ACL tx timeout must be longer than maximum
1338 * link supervision timeout (40.9 seconds) */
1339 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1340 hci_acl_tx_to(hdev);
1343 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1344 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1345 BT_DBG("skb %p len %d", skb, skb->len);
1347 hci_conn_enter_active_mode(conn);
1349 hci_send_frame(skb);
1350 hdev->acl_last_tx = jiffies;
1352 hdev->acl_cnt--;
1353 conn->sent++;
1358 /* Schedule SCO */
1359 static inline void hci_sched_sco(struct hci_dev *hdev)
1361 struct hci_conn *conn;
1362 struct sk_buff *skb;
1363 int quote;
1365 BT_DBG("%s", hdev->name);
1367 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1368 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1369 BT_DBG("skb %p len %d", skb, skb->len);
1370 hci_send_frame(skb);
1372 conn->sent++;
1373 if (conn->sent == ~0)
1374 conn->sent = 0;
1379 static inline void hci_sched_esco(struct hci_dev *hdev)
1381 struct hci_conn *conn;
1382 struct sk_buff *skb;
1383 int quote;
1385 BT_DBG("%s", hdev->name);
1387 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1388 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1389 BT_DBG("skb %p len %d", skb, skb->len);
1390 hci_send_frame(skb);
1392 conn->sent++;
1393 if (conn->sent == ~0)
1394 conn->sent = 0;
1399 static void hci_tx_task(unsigned long arg)
1401 struct hci_dev *hdev = (struct hci_dev *) arg;
1402 struct sk_buff *skb;
1404 read_lock(&hci_task_lock);
1406 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1408 /* Schedule queues and send stuff to HCI driver */
1410 hci_sched_acl(hdev);
1412 hci_sched_sco(hdev);
1414 hci_sched_esco(hdev);
1416 /* Send next queued raw (unknown type) packet */
1417 while ((skb = skb_dequeue(&hdev->raw_q)))
1418 hci_send_frame(skb);
1420 read_unlock(&hci_task_lock);
1423 /* ----- HCI RX task (incoming data proccessing) ----- */
1425 /* ACL data packet */
1426 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1428 struct hci_acl_hdr *hdr = (void *) skb->data;
1429 struct hci_conn *conn;
1430 __u16 handle, flags;
1432 skb_pull(skb, HCI_ACL_HDR_SIZE);
1434 handle = __le16_to_cpu(hdr->handle);
1435 flags = hci_flags(handle);
1436 handle = hci_handle(handle);
1438 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1440 hdev->stat.acl_rx++;
1442 hci_dev_lock(hdev);
1443 conn = hci_conn_hash_lookup_handle(hdev, handle);
1444 hci_dev_unlock(hdev);
1446 if (conn) {
1447 register struct hci_proto *hp;
1449 hci_conn_enter_active_mode(conn);
1451 /* Send to upper protocol */
1452 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1453 hp->recv_acldata(conn, skb, flags);
1454 return;
1456 } else {
1457 BT_ERR("%s ACL packet for unknown connection handle %d",
1458 hdev->name, handle);
1461 kfree_skb(skb);
1464 /* SCO data packet */
1465 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1467 struct hci_sco_hdr *hdr = (void *) skb->data;
1468 struct hci_conn *conn;
1469 __u16 handle;
1471 skb_pull(skb, HCI_SCO_HDR_SIZE);
1473 handle = __le16_to_cpu(hdr->handle);
1475 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1477 hdev->stat.sco_rx++;
1479 hci_dev_lock(hdev);
1480 conn = hci_conn_hash_lookup_handle(hdev, handle);
1481 hci_dev_unlock(hdev);
1483 if (conn) {
1484 register struct hci_proto *hp;
1486 /* Send to upper protocol */
1487 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1488 hp->recv_scodata(conn, skb);
1489 return;
1491 } else {
1492 BT_ERR("%s SCO packet for unknown connection handle %d",
1493 hdev->name, handle);
1496 kfree_skb(skb);
1499 static void hci_rx_task(unsigned long arg)
1501 struct hci_dev *hdev = (struct hci_dev *) arg;
1502 struct sk_buff *skb;
1504 BT_DBG("%s", hdev->name);
1506 read_lock(&hci_task_lock);
1508 while ((skb = skb_dequeue(&hdev->rx_q))) {
1509 if (atomic_read(&hdev->promisc)) {
1510 /* Send copy to the sockets */
1511 hci_send_to_sock(hdev, skb);
1514 if (test_bit(HCI_RAW, &hdev->flags)) {
1515 kfree_skb(skb);
1516 continue;
1519 if (test_bit(HCI_INIT, &hdev->flags)) {
1520 /* Don't process data packets in this states. */
1521 switch (bt_cb(skb)->pkt_type) {
1522 case HCI_ACLDATA_PKT:
1523 case HCI_SCODATA_PKT:
1524 kfree_skb(skb);
1525 continue;
1529 /* Process frame */
1530 switch (bt_cb(skb)->pkt_type) {
1531 case HCI_EVENT_PKT:
1532 hci_event_packet(hdev, skb);
1533 break;
1535 case HCI_ACLDATA_PKT:
1536 BT_DBG("%s ACL data packet", hdev->name);
1537 hci_acldata_packet(hdev, skb);
1538 break;
1540 case HCI_SCODATA_PKT:
1541 BT_DBG("%s SCO data packet", hdev->name);
1542 hci_scodata_packet(hdev, skb);
1543 break;
1545 default:
1546 kfree_skb(skb);
1547 break;
1551 read_unlock(&hci_task_lock);
1554 static void hci_cmd_task(unsigned long arg)
1556 struct hci_dev *hdev = (struct hci_dev *) arg;
1557 struct sk_buff *skb;
1559 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1561 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1562 BT_ERR("%s command tx timeout", hdev->name);
1563 atomic_set(&hdev->cmd_cnt, 1);
1566 /* Send queued commands */
1567 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1568 if (hdev->sent_cmd)
1569 kfree_skb(hdev->sent_cmd);
1571 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1572 atomic_dec(&hdev->cmd_cnt);
1573 hci_send_frame(skb);
1574 hdev->cmd_last_tx = jiffies;
1575 } else {
1576 skb_queue_head(&hdev->cmd_q, skb);
1577 hci_sched_cmd(hdev);