[CIFS] Fix ntlmv2 auth with ntlmssp
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / hci_core.c
blobc52f091ee6de2c4b1e81f68363086e2af4a51660
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <net/sock.h>
46 #include <asm/system.h>
47 #include <asm/uaccess.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
53 static void hci_cmd_task(unsigned long arg);
54 static void hci_rx_task(unsigned long arg);
55 static void hci_tx_task(unsigned long arg);
56 static void hci_notify(struct hci_dev *hdev, int event);
58 static DEFINE_RWLOCK(hci_task_lock);
60 /* HCI device list */
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
68 /* HCI protocols */
69 #define HCI_MAX_PROTO 2
70 struct hci_proto *hci_proto[HCI_MAX_PROTO];
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block *nb)
79 return atomic_notifier_chain_register(&hci_notifier, nb);
82 int hci_unregister_notifier(struct notifier_block *nb)
84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 static void hci_notify(struct hci_dev *hdev, int event)
89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev *hdev, int result)
96 BT_DBG("%s result 0x%2.2x", hdev->name, result);
98 if (hdev->req_status == HCI_REQ_PEND) {
99 hdev->req_result = result;
100 hdev->req_status = HCI_REQ_DONE;
101 wake_up_interruptible(&hdev->req_wait_q);
105 static void hci_req_cancel(struct hci_dev *hdev, int err)
107 BT_DBG("%s err 0x%2.2x", hdev->name, err);
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = err;
111 hdev->req_status = HCI_REQ_CANCELED;
112 wake_up_interruptible(&hdev->req_wait_q);
116 /* Execute request and wait for completion. */
117 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
118 unsigned long opt, __u32 timeout)
120 DECLARE_WAITQUEUE(wait, current);
121 int err = 0;
123 BT_DBG("%s start", hdev->name);
125 hdev->req_status = HCI_REQ_PEND;
127 add_wait_queue(&hdev->req_wait_q, &wait);
128 set_current_state(TASK_INTERRUPTIBLE);
130 req(hdev, opt);
131 schedule_timeout(timeout);
133 remove_wait_queue(&hdev->req_wait_q, &wait);
135 if (signal_pending(current))
136 return -EINTR;
138 switch (hdev->req_status) {
139 case HCI_REQ_DONE:
140 err = -bt_err(hdev->req_result);
141 break;
143 case HCI_REQ_CANCELED:
144 err = -hdev->req_result;
145 break;
147 default:
148 err = -ETIMEDOUT;
149 break;
152 hdev->req_status = hdev->req_result = 0;
154 BT_DBG("%s end: err %d", hdev->name, err);
156 return err;
159 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160 unsigned long opt, __u32 timeout)
162 int ret;
164 if (!test_bit(HCI_UP, &hdev->flags))
165 return -ENETDOWN;
167 /* Serialize all requests */
168 hci_req_lock(hdev);
169 ret = __hci_request(hdev, req, opt, timeout);
170 hci_req_unlock(hdev);
172 return ret;
175 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
177 BT_DBG("%s %ld", hdev->name, opt);
179 /* Reset device */
180 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
183 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
185 struct sk_buff *skb;
186 __le16 param;
187 __u8 flt_type;
189 BT_DBG("%s %ld", hdev->name, opt);
191 /* Driver initialization */
193 /* Special commands */
194 while ((skb = skb_dequeue(&hdev->driver_init))) {
195 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
196 skb->dev = (void *) hdev;
198 skb_queue_tail(&hdev->cmd_q, skb);
199 tasklet_schedule(&hdev->cmd_task);
201 skb_queue_purge(&hdev->driver_init);
203 /* Mandatory initialization */
205 /* Reset */
206 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
209 /* Read Local Supported Features */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
212 /* Read Local Version */
213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
218 #if 0
219 /* Host buffer size */
221 struct hci_cp_host_buffer_size cp;
222 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
223 cp.sco_mtu = HCI_MAX_SCO_SIZE;
224 cp.acl_max_pkt = cpu_to_le16(0xffff);
225 cp.sco_max_pkt = cpu_to_le16(0xffff);
226 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
228 #endif
230 /* Read BD Address */
231 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
233 /* Read Class of Device */
234 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
236 /* Read Local Name */
237 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
239 /* Read Voice Setting */
240 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
242 /* Optional initialization */
244 /* Clear Event Filters */
245 flt_type = HCI_FLT_CLEAR_ALL;
246 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
248 /* Page timeout ~20 secs */
249 param = cpu_to_le16(0x8000);
250 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
252 /* Connection accept timeout ~20 secs */
253 param = cpu_to_le16(0x7d00);
254 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
257 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
259 __u8 scan = opt;
261 BT_DBG("%s %x", hdev->name, scan);
263 /* Inquiry and Page scans */
264 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
267 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
269 __u8 auth = opt;
271 BT_DBG("%s %x", hdev->name, auth);
273 /* Authentication */
274 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
277 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
279 __u8 encrypt = opt;
281 BT_DBG("%s %x", hdev->name, encrypt);
283 /* Encryption */
284 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
287 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
289 __le16 policy = cpu_to_le16(opt);
291 BT_DBG("%s %x", hdev->name, policy);
293 /* Default link policy */
294 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
297 /* Get HCI device by index.
298 * Device is held on return. */
299 struct hci_dev *hci_dev_get(int index)
301 struct hci_dev *hdev = NULL;
302 struct list_head *p;
304 BT_DBG("%d", index);
306 if (index < 0)
307 return NULL;
309 read_lock(&hci_dev_list_lock);
310 list_for_each(p, &hci_dev_list) {
311 struct hci_dev *d = list_entry(p, struct hci_dev, list);
312 if (d->id == index) {
313 hdev = hci_dev_hold(d);
314 break;
317 read_unlock(&hci_dev_list_lock);
318 return hdev;
321 /* ---- Inquiry support ---- */
322 static void inquiry_cache_flush(struct hci_dev *hdev)
324 struct inquiry_cache *cache = &hdev->inq_cache;
325 struct inquiry_entry *next = cache->list, *e;
327 BT_DBG("cache %p", cache);
329 cache->list = NULL;
330 while ((e = next)) {
331 next = e->next;
332 kfree(e);
336 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
338 struct inquiry_cache *cache = &hdev->inq_cache;
339 struct inquiry_entry *e;
341 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
343 for (e = cache->list; e; e = e->next)
344 if (!bacmp(&e->data.bdaddr, bdaddr))
345 break;
346 return e;
349 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
351 struct inquiry_cache *cache = &hdev->inq_cache;
352 struct inquiry_entry *e;
354 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
356 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
357 /* Entry not in the cache. Add new one. */
358 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
359 return;
360 e->next = cache->list;
361 cache->list = e;
364 memcpy(&e->data, data, sizeof(*data));
365 e->timestamp = jiffies;
366 cache->timestamp = jiffies;
369 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
371 struct inquiry_cache *cache = &hdev->inq_cache;
372 struct inquiry_info *info = (struct inquiry_info *) buf;
373 struct inquiry_entry *e;
374 int copied = 0;
376 for (e = cache->list; e && copied < num; e = e->next, copied++) {
377 struct inquiry_data *data = &e->data;
378 bacpy(&info->bdaddr, &data->bdaddr);
379 info->pscan_rep_mode = data->pscan_rep_mode;
380 info->pscan_period_mode = data->pscan_period_mode;
381 info->pscan_mode = data->pscan_mode;
382 memcpy(info->dev_class, data->dev_class, 3);
383 info->clock_offset = data->clock_offset;
384 info++;
387 BT_DBG("cache %p, copied %d", cache, copied);
388 return copied;
391 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
393 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
394 struct hci_cp_inquiry cp;
396 BT_DBG("%s", hdev->name);
398 if (test_bit(HCI_INQUIRY, &hdev->flags))
399 return;
401 /* Start Inquiry */
402 memcpy(&cp.lap, &ir->lap, 3);
403 cp.length = ir->length;
404 cp.num_rsp = ir->num_rsp;
405 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
408 int hci_inquiry(void __user *arg)
410 __u8 __user *ptr = arg;
411 struct hci_inquiry_req ir;
412 struct hci_dev *hdev;
413 int err = 0, do_inquiry = 0, max_rsp;
414 long timeo;
415 __u8 *buf;
417 if (copy_from_user(&ir, ptr, sizeof(ir)))
418 return -EFAULT;
420 if (!(hdev = hci_dev_get(ir.dev_id)))
421 return -ENODEV;
423 hci_dev_lock_bh(hdev);
424 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
425 inquiry_cache_empty(hdev) ||
426 ir.flags & IREQ_CACHE_FLUSH) {
427 inquiry_cache_flush(hdev);
428 do_inquiry = 1;
430 hci_dev_unlock_bh(hdev);
432 timeo = ir.length * msecs_to_jiffies(2000);
433 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
434 goto done;
436 /* for unlimited number of responses we will use buffer with 255 entries */
437 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
439 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
440 * copy it to the user space.
442 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
443 err = -ENOMEM;
444 goto done;
447 hci_dev_lock_bh(hdev);
448 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
449 hci_dev_unlock_bh(hdev);
451 BT_DBG("num_rsp %d", ir.num_rsp);
453 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
454 ptr += sizeof(ir);
455 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
456 ir.num_rsp))
457 err = -EFAULT;
458 } else
459 err = -EFAULT;
461 kfree(buf);
463 done:
464 hci_dev_put(hdev);
465 return err;
468 /* ---- HCI ioctl helpers ---- */
470 int hci_dev_open(__u16 dev)
472 struct hci_dev *hdev;
473 int ret = 0;
475 if (!(hdev = hci_dev_get(dev)))
476 return -ENODEV;
478 BT_DBG("%s %p", hdev->name, hdev);
480 hci_req_lock(hdev);
482 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
483 ret = -ERFKILL;
484 goto done;
487 if (test_bit(HCI_UP, &hdev->flags)) {
488 ret = -EALREADY;
489 goto done;
492 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
493 set_bit(HCI_RAW, &hdev->flags);
495 /* Treat all non BR/EDR controllers as raw devices for now */
496 if (hdev->dev_type != HCI_BREDR)
497 set_bit(HCI_RAW, &hdev->flags);
499 if (hdev->open(hdev)) {
500 ret = -EIO;
501 goto done;
504 if (!test_bit(HCI_RAW, &hdev->flags)) {
505 atomic_set(&hdev->cmd_cnt, 1);
506 set_bit(HCI_INIT, &hdev->flags);
508 //__hci_request(hdev, hci_reset_req, 0, HZ);
509 ret = __hci_request(hdev, hci_init_req, 0,
510 msecs_to_jiffies(HCI_INIT_TIMEOUT));
512 clear_bit(HCI_INIT, &hdev->flags);
515 if (!ret) {
516 hci_dev_hold(hdev);
517 set_bit(HCI_UP, &hdev->flags);
518 hci_notify(hdev, HCI_DEV_UP);
519 } else {
520 /* Init failed, cleanup */
521 tasklet_kill(&hdev->rx_task);
522 tasklet_kill(&hdev->tx_task);
523 tasklet_kill(&hdev->cmd_task);
525 skb_queue_purge(&hdev->cmd_q);
526 skb_queue_purge(&hdev->rx_q);
528 if (hdev->flush)
529 hdev->flush(hdev);
531 if (hdev->sent_cmd) {
532 kfree_skb(hdev->sent_cmd);
533 hdev->sent_cmd = NULL;
536 hdev->close(hdev);
537 hdev->flags = 0;
540 done:
541 hci_req_unlock(hdev);
542 hci_dev_put(hdev);
543 return ret;
546 static int hci_dev_do_close(struct hci_dev *hdev)
548 BT_DBG("%s %p", hdev->name, hdev);
550 hci_req_cancel(hdev, ENODEV);
551 hci_req_lock(hdev);
553 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
554 hci_req_unlock(hdev);
555 return 0;
558 /* Kill RX and TX tasks */
559 tasklet_kill(&hdev->rx_task);
560 tasklet_kill(&hdev->tx_task);
562 hci_dev_lock_bh(hdev);
563 inquiry_cache_flush(hdev);
564 hci_conn_hash_flush(hdev);
565 hci_blacklist_clear(hdev);
566 hci_dev_unlock_bh(hdev);
568 hci_notify(hdev, HCI_DEV_DOWN);
570 if (hdev->flush)
571 hdev->flush(hdev);
573 /* Reset device */
574 skb_queue_purge(&hdev->cmd_q);
575 atomic_set(&hdev->cmd_cnt, 1);
576 if (!test_bit(HCI_RAW, &hdev->flags)) {
577 set_bit(HCI_INIT, &hdev->flags);
578 __hci_request(hdev, hci_reset_req, 0,
579 msecs_to_jiffies(250));
580 clear_bit(HCI_INIT, &hdev->flags);
583 /* Kill cmd task */
584 tasklet_kill(&hdev->cmd_task);
586 /* Drop queues */
587 skb_queue_purge(&hdev->rx_q);
588 skb_queue_purge(&hdev->cmd_q);
589 skb_queue_purge(&hdev->raw_q);
591 /* Drop last sent command */
592 if (hdev->sent_cmd) {
593 kfree_skb(hdev->sent_cmd);
594 hdev->sent_cmd = NULL;
597 /* After this point our queues are empty
598 * and no tasks are scheduled. */
599 hdev->close(hdev);
601 /* Clear flags */
602 hdev->flags = 0;
604 hci_req_unlock(hdev);
606 hci_dev_put(hdev);
607 return 0;
610 int hci_dev_close(__u16 dev)
612 struct hci_dev *hdev;
613 int err;
615 if (!(hdev = hci_dev_get(dev)))
616 return -ENODEV;
617 err = hci_dev_do_close(hdev);
618 hci_dev_put(hdev);
619 return err;
622 int hci_dev_reset(__u16 dev)
624 struct hci_dev *hdev;
625 int ret = 0;
627 if (!(hdev = hci_dev_get(dev)))
628 return -ENODEV;
630 hci_req_lock(hdev);
631 tasklet_disable(&hdev->tx_task);
633 if (!test_bit(HCI_UP, &hdev->flags))
634 goto done;
636 /* Drop queues */
637 skb_queue_purge(&hdev->rx_q);
638 skb_queue_purge(&hdev->cmd_q);
640 hci_dev_lock_bh(hdev);
641 inquiry_cache_flush(hdev);
642 hci_conn_hash_flush(hdev);
643 hci_dev_unlock_bh(hdev);
645 if (hdev->flush)
646 hdev->flush(hdev);
648 atomic_set(&hdev->cmd_cnt, 1);
649 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
651 if (!test_bit(HCI_RAW, &hdev->flags))
652 ret = __hci_request(hdev, hci_reset_req, 0,
653 msecs_to_jiffies(HCI_INIT_TIMEOUT));
655 done:
656 tasklet_enable(&hdev->tx_task);
657 hci_req_unlock(hdev);
658 hci_dev_put(hdev);
659 return ret;
662 int hci_dev_reset_stat(__u16 dev)
664 struct hci_dev *hdev;
665 int ret = 0;
667 if (!(hdev = hci_dev_get(dev)))
668 return -ENODEV;
670 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
672 hci_dev_put(hdev);
674 return ret;
677 int hci_dev_cmd(unsigned int cmd, void __user *arg)
679 struct hci_dev *hdev;
680 struct hci_dev_req dr;
681 int err = 0;
683 if (copy_from_user(&dr, arg, sizeof(dr)))
684 return -EFAULT;
686 if (!(hdev = hci_dev_get(dr.dev_id)))
687 return -ENODEV;
689 switch (cmd) {
690 case HCISETAUTH:
691 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
692 msecs_to_jiffies(HCI_INIT_TIMEOUT));
693 break;
695 case HCISETENCRYPT:
696 if (!lmp_encrypt_capable(hdev)) {
697 err = -EOPNOTSUPP;
698 break;
701 if (!test_bit(HCI_AUTH, &hdev->flags)) {
702 /* Auth must be enabled first */
703 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
704 msecs_to_jiffies(HCI_INIT_TIMEOUT));
705 if (err)
706 break;
709 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
710 msecs_to_jiffies(HCI_INIT_TIMEOUT));
711 break;
713 case HCISETSCAN:
714 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
715 msecs_to_jiffies(HCI_INIT_TIMEOUT));
716 break;
718 case HCISETLINKPOL:
719 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
720 msecs_to_jiffies(HCI_INIT_TIMEOUT));
721 break;
723 case HCISETLINKMODE:
724 hdev->link_mode = ((__u16) dr.dev_opt) &
725 (HCI_LM_MASTER | HCI_LM_ACCEPT);
726 break;
728 case HCISETPTYPE:
729 hdev->pkt_type = (__u16) dr.dev_opt;
730 break;
732 case HCISETACLMTU:
733 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
734 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
735 break;
737 case HCISETSCOMTU:
738 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
739 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
740 break;
742 default:
743 err = -EINVAL;
744 break;
747 hci_dev_put(hdev);
748 return err;
751 int hci_get_dev_list(void __user *arg)
753 struct hci_dev_list_req *dl;
754 struct hci_dev_req *dr;
755 struct list_head *p;
756 int n = 0, size, err;
757 __u16 dev_num;
759 if (get_user(dev_num, (__u16 __user *) arg))
760 return -EFAULT;
762 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
763 return -EINVAL;
765 size = sizeof(*dl) + dev_num * sizeof(*dr);
767 if (!(dl = kzalloc(size, GFP_KERNEL)))
768 return -ENOMEM;
770 dr = dl->dev_req;
772 read_lock_bh(&hci_dev_list_lock);
773 list_for_each(p, &hci_dev_list) {
774 struct hci_dev *hdev;
775 hdev = list_entry(p, struct hci_dev, list);
776 (dr + n)->dev_id = hdev->id;
777 (dr + n)->dev_opt = hdev->flags;
778 if (++n >= dev_num)
779 break;
781 read_unlock_bh(&hci_dev_list_lock);
783 dl->dev_num = n;
784 size = sizeof(*dl) + n * sizeof(*dr);
786 err = copy_to_user(arg, dl, size);
787 kfree(dl);
789 return err ? -EFAULT : 0;
792 int hci_get_dev_info(void __user *arg)
794 struct hci_dev *hdev;
795 struct hci_dev_info di;
796 int err = 0;
798 if (copy_from_user(&di, arg, sizeof(di)))
799 return -EFAULT;
801 if (!(hdev = hci_dev_get(di.dev_id)))
802 return -ENODEV;
804 strcpy(di.name, hdev->name);
805 di.bdaddr = hdev->bdaddr;
806 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
807 di.flags = hdev->flags;
808 di.pkt_type = hdev->pkt_type;
809 di.acl_mtu = hdev->acl_mtu;
810 di.acl_pkts = hdev->acl_pkts;
811 di.sco_mtu = hdev->sco_mtu;
812 di.sco_pkts = hdev->sco_pkts;
813 di.link_policy = hdev->link_policy;
814 di.link_mode = hdev->link_mode;
816 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
817 memcpy(&di.features, &hdev->features, sizeof(di.features));
819 if (copy_to_user(arg, &di, sizeof(di)))
820 err = -EFAULT;
822 hci_dev_put(hdev);
824 return err;
827 /* ---- Interface to HCI drivers ---- */
829 static int hci_rfkill_set_block(void *data, bool blocked)
831 struct hci_dev *hdev = data;
833 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
835 if (!blocked)
836 return 0;
838 hci_dev_do_close(hdev);
840 return 0;
843 static const struct rfkill_ops hci_rfkill_ops = {
844 .set_block = hci_rfkill_set_block,
847 /* Alloc HCI device */
848 struct hci_dev *hci_alloc_dev(void)
850 struct hci_dev *hdev;
852 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
853 if (!hdev)
854 return NULL;
856 skb_queue_head_init(&hdev->driver_init);
858 return hdev;
860 EXPORT_SYMBOL(hci_alloc_dev);
862 /* Free HCI device */
863 void hci_free_dev(struct hci_dev *hdev)
865 skb_queue_purge(&hdev->driver_init);
867 /* will free via device release */
868 put_device(&hdev->dev);
870 EXPORT_SYMBOL(hci_free_dev);
872 /* Register HCI device */
873 int hci_register_dev(struct hci_dev *hdev)
875 struct list_head *head = &hci_dev_list, *p;
876 int i, id = 0;
878 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
879 hdev->bus, hdev->owner);
881 if (!hdev->open || !hdev->close || !hdev->destruct)
882 return -EINVAL;
884 write_lock_bh(&hci_dev_list_lock);
886 /* Find first available device id */
887 list_for_each(p, &hci_dev_list) {
888 if (list_entry(p, struct hci_dev, list)->id != id)
889 break;
890 head = p; id++;
893 sprintf(hdev->name, "hci%d", id);
894 hdev->id = id;
895 list_add(&hdev->list, head);
897 atomic_set(&hdev->refcnt, 1);
898 spin_lock_init(&hdev->lock);
900 hdev->flags = 0;
901 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
902 hdev->esco_type = (ESCO_HV1);
903 hdev->link_mode = (HCI_LM_ACCEPT);
905 hdev->idle_timeout = 0;
906 hdev->sniff_max_interval = 800;
907 hdev->sniff_min_interval = 80;
909 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
910 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
911 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
913 skb_queue_head_init(&hdev->rx_q);
914 skb_queue_head_init(&hdev->cmd_q);
915 skb_queue_head_init(&hdev->raw_q);
917 for (i = 0; i < NUM_REASSEMBLY; i++)
918 hdev->reassembly[i] = NULL;
920 init_waitqueue_head(&hdev->req_wait_q);
921 mutex_init(&hdev->req_lock);
923 inquiry_cache_init(hdev);
925 hci_conn_hash_init(hdev);
927 INIT_LIST_HEAD(&hdev->blacklist);
929 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
931 atomic_set(&hdev->promisc, 0);
933 write_unlock_bh(&hci_dev_list_lock);
935 hdev->workqueue = create_singlethread_workqueue(hdev->name);
936 if (!hdev->workqueue)
937 goto nomem;
939 hci_register_sysfs(hdev);
941 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
942 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
943 if (hdev->rfkill) {
944 if (rfkill_register(hdev->rfkill) < 0) {
945 rfkill_destroy(hdev->rfkill);
946 hdev->rfkill = NULL;
950 hci_notify(hdev, HCI_DEV_REG);
952 return id;
954 nomem:
955 write_lock_bh(&hci_dev_list_lock);
956 list_del(&hdev->list);
957 write_unlock_bh(&hci_dev_list_lock);
959 return -ENOMEM;
961 EXPORT_SYMBOL(hci_register_dev);
963 /* Unregister HCI device */
964 int hci_unregister_dev(struct hci_dev *hdev)
966 int i;
968 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
970 write_lock_bh(&hci_dev_list_lock);
971 list_del(&hdev->list);
972 write_unlock_bh(&hci_dev_list_lock);
974 hci_dev_do_close(hdev);
976 for (i = 0; i < NUM_REASSEMBLY; i++)
977 kfree_skb(hdev->reassembly[i]);
979 hci_notify(hdev, HCI_DEV_UNREG);
981 if (hdev->rfkill) {
982 rfkill_unregister(hdev->rfkill);
983 rfkill_destroy(hdev->rfkill);
986 hci_unregister_sysfs(hdev);
988 destroy_workqueue(hdev->workqueue);
990 __hci_dev_put(hdev);
992 return 0;
994 EXPORT_SYMBOL(hci_unregister_dev);
996 /* Suspend HCI device */
997 int hci_suspend_dev(struct hci_dev *hdev)
999 hci_notify(hdev, HCI_DEV_SUSPEND);
1000 return 0;
1002 EXPORT_SYMBOL(hci_suspend_dev);
1004 /* Resume HCI device */
1005 int hci_resume_dev(struct hci_dev *hdev)
1007 hci_notify(hdev, HCI_DEV_RESUME);
1008 return 0;
1010 EXPORT_SYMBOL(hci_resume_dev);
1012 /* Receive frame from HCI drivers */
1013 int hci_recv_frame(struct sk_buff *skb)
1015 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1016 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1017 && !test_bit(HCI_INIT, &hdev->flags))) {
1018 kfree_skb(skb);
1019 return -ENXIO;
1022 /* Incomming skb */
1023 bt_cb(skb)->incoming = 1;
1025 /* Time stamp */
1026 __net_timestamp(skb);
1028 /* Queue frame for rx task */
1029 skb_queue_tail(&hdev->rx_q, skb);
1030 tasklet_schedule(&hdev->rx_task);
1032 return 0;
1034 EXPORT_SYMBOL(hci_recv_frame);
1036 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1037 int count, __u8 index, gfp_t gfp_mask)
1039 int len = 0;
1040 int hlen = 0;
1041 int remain = count;
1042 struct sk_buff *skb;
1043 struct bt_skb_cb *scb;
1045 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1046 index >= NUM_REASSEMBLY)
1047 return -EILSEQ;
1049 skb = hdev->reassembly[index];
1051 if (!skb) {
1052 switch (type) {
1053 case HCI_ACLDATA_PKT:
1054 len = HCI_MAX_FRAME_SIZE;
1055 hlen = HCI_ACL_HDR_SIZE;
1056 break;
1057 case HCI_EVENT_PKT:
1058 len = HCI_MAX_EVENT_SIZE;
1059 hlen = HCI_EVENT_HDR_SIZE;
1060 break;
1061 case HCI_SCODATA_PKT:
1062 len = HCI_MAX_SCO_SIZE;
1063 hlen = HCI_SCO_HDR_SIZE;
1064 break;
1067 skb = bt_skb_alloc(len, gfp_mask);
1068 if (!skb)
1069 return -ENOMEM;
1071 scb = (void *) skb->cb;
1072 scb->expect = hlen;
1073 scb->pkt_type = type;
1075 skb->dev = (void *) hdev;
1076 hdev->reassembly[index] = skb;
1079 while (count) {
1080 scb = (void *) skb->cb;
1081 len = min(scb->expect, (__u16)count);
1083 memcpy(skb_put(skb, len), data, len);
1085 count -= len;
1086 data += len;
1087 scb->expect -= len;
1088 remain = count;
1090 switch (type) {
1091 case HCI_EVENT_PKT:
1092 if (skb->len == HCI_EVENT_HDR_SIZE) {
1093 struct hci_event_hdr *h = hci_event_hdr(skb);
1094 scb->expect = h->plen;
1096 if (skb_tailroom(skb) < scb->expect) {
1097 kfree_skb(skb);
1098 hdev->reassembly[index] = NULL;
1099 return -ENOMEM;
1102 break;
1104 case HCI_ACLDATA_PKT:
1105 if (skb->len == HCI_ACL_HDR_SIZE) {
1106 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1107 scb->expect = __le16_to_cpu(h->dlen);
1109 if (skb_tailroom(skb) < scb->expect) {
1110 kfree_skb(skb);
1111 hdev->reassembly[index] = NULL;
1112 return -ENOMEM;
1115 break;
1117 case HCI_SCODATA_PKT:
1118 if (skb->len == HCI_SCO_HDR_SIZE) {
1119 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1120 scb->expect = h->dlen;
1122 if (skb_tailroom(skb) < scb->expect) {
1123 kfree_skb(skb);
1124 hdev->reassembly[index] = NULL;
1125 return -ENOMEM;
1128 break;
1131 if (scb->expect == 0) {
1132 /* Complete frame */
1134 bt_cb(skb)->pkt_type = type;
1135 hci_recv_frame(skb);
1137 hdev->reassembly[index] = NULL;
1138 return remain;
1142 return remain;
1145 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1147 int rem = 0;
1149 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1150 return -EILSEQ;
1152 while (count) {
1153 rem = hci_reassembly(hdev, type, data, count,
1154 type - 1, GFP_ATOMIC);
1155 if (rem < 0)
1156 return rem;
1158 data += (count - rem);
1159 count = rem;
1162 return rem;
1164 EXPORT_SYMBOL(hci_recv_fragment);
1166 #define STREAM_REASSEMBLY 0
1168 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1170 int type;
1171 int rem = 0;
1173 while (count) {
1174 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1176 if (!skb) {
1177 struct { char type; } *pkt;
1179 /* Start of the frame */
1180 pkt = data;
1181 type = pkt->type;
1183 data++;
1184 count--;
1185 } else
1186 type = bt_cb(skb)->pkt_type;
1188 rem = hci_reassembly(hdev, type, data,
1189 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1190 if (rem < 0)
1191 return rem;
1193 data += (count - rem);
1194 count = rem;
1197 return rem;
1199 EXPORT_SYMBOL(hci_recv_stream_fragment);
1201 /* ---- Interface to upper protocols ---- */
1203 /* Register/Unregister protocols.
1204 * hci_task_lock is used to ensure that no tasks are running. */
1205 int hci_register_proto(struct hci_proto *hp)
1207 int err = 0;
1209 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1211 if (hp->id >= HCI_MAX_PROTO)
1212 return -EINVAL;
1214 write_lock_bh(&hci_task_lock);
1216 if (!hci_proto[hp->id])
1217 hci_proto[hp->id] = hp;
1218 else
1219 err = -EEXIST;
1221 write_unlock_bh(&hci_task_lock);
1223 return err;
1225 EXPORT_SYMBOL(hci_register_proto);
1227 int hci_unregister_proto(struct hci_proto *hp)
1229 int err = 0;
1231 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1233 if (hp->id >= HCI_MAX_PROTO)
1234 return -EINVAL;
1236 write_lock_bh(&hci_task_lock);
1238 if (hci_proto[hp->id])
1239 hci_proto[hp->id] = NULL;
1240 else
1241 err = -ENOENT;
1243 write_unlock_bh(&hci_task_lock);
1245 return err;
1247 EXPORT_SYMBOL(hci_unregister_proto);
1249 int hci_register_cb(struct hci_cb *cb)
1251 BT_DBG("%p name %s", cb, cb->name);
1253 write_lock_bh(&hci_cb_list_lock);
1254 list_add(&cb->list, &hci_cb_list);
1255 write_unlock_bh(&hci_cb_list_lock);
1257 return 0;
1259 EXPORT_SYMBOL(hci_register_cb);
1261 int hci_unregister_cb(struct hci_cb *cb)
1263 BT_DBG("%p name %s", cb, cb->name);
1265 write_lock_bh(&hci_cb_list_lock);
1266 list_del(&cb->list);
1267 write_unlock_bh(&hci_cb_list_lock);
1269 return 0;
1271 EXPORT_SYMBOL(hci_unregister_cb);
1273 static int hci_send_frame(struct sk_buff *skb)
1275 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1277 if (!hdev) {
1278 kfree_skb(skb);
1279 return -ENODEV;
1282 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1284 if (atomic_read(&hdev->promisc)) {
1285 /* Time stamp */
1286 __net_timestamp(skb);
1288 hci_send_to_sock(hdev, skb);
1291 /* Get rid of skb owner, prior to sending to the driver. */
1292 skb_orphan(skb);
1294 return hdev->send(skb);
1297 /* Send HCI command */
1298 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1300 int len = HCI_COMMAND_HDR_SIZE + plen;
1301 struct hci_command_hdr *hdr;
1302 struct sk_buff *skb;
1304 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1306 skb = bt_skb_alloc(len, GFP_ATOMIC);
1307 if (!skb) {
1308 BT_ERR("%s no memory for command", hdev->name);
1309 return -ENOMEM;
1312 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1313 hdr->opcode = cpu_to_le16(opcode);
1314 hdr->plen = plen;
1316 if (plen)
1317 memcpy(skb_put(skb, plen), param, plen);
1319 BT_DBG("skb len %d", skb->len);
1321 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1322 skb->dev = (void *) hdev;
1324 skb_queue_tail(&hdev->cmd_q, skb);
1325 tasklet_schedule(&hdev->cmd_task);
1327 return 0;
1330 /* Get data from the previously sent command */
1331 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1333 struct hci_command_hdr *hdr;
1335 if (!hdev->sent_cmd)
1336 return NULL;
1338 hdr = (void *) hdev->sent_cmd->data;
1340 if (hdr->opcode != cpu_to_le16(opcode))
1341 return NULL;
1343 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1345 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1348 /* Send ACL data */
1349 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1351 struct hci_acl_hdr *hdr;
1352 int len = skb->len;
1354 skb_push(skb, HCI_ACL_HDR_SIZE);
1355 skb_reset_transport_header(skb);
1356 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1357 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1358 hdr->dlen = cpu_to_le16(len);
1361 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1363 struct hci_dev *hdev = conn->hdev;
1364 struct sk_buff *list;
1366 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1368 skb->dev = (void *) hdev;
1369 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1370 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1372 if (!(list = skb_shinfo(skb)->frag_list)) {
1373 /* Non fragmented */
1374 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1376 skb_queue_tail(&conn->data_q, skb);
1377 } else {
1378 /* Fragmented */
1379 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1381 skb_shinfo(skb)->frag_list = NULL;
1383 /* Queue all fragments atomically */
1384 spin_lock_bh(&conn->data_q.lock);
1386 __skb_queue_tail(&conn->data_q, skb);
1387 do {
1388 skb = list; list = list->next;
1390 skb->dev = (void *) hdev;
1391 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1392 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1394 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1396 __skb_queue_tail(&conn->data_q, skb);
1397 } while (list);
1399 spin_unlock_bh(&conn->data_q.lock);
1402 tasklet_schedule(&hdev->tx_task);
1404 EXPORT_SYMBOL(hci_send_acl);
1406 /* Send SCO data */
1407 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1409 struct hci_dev *hdev = conn->hdev;
1410 struct hci_sco_hdr hdr;
1412 BT_DBG("%s len %d", hdev->name, skb->len);
1414 hdr.handle = cpu_to_le16(conn->handle);
1415 hdr.dlen = skb->len;
1417 skb_push(skb, HCI_SCO_HDR_SIZE);
1418 skb_reset_transport_header(skb);
1419 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1421 skb->dev = (void *) hdev;
1422 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1424 skb_queue_tail(&conn->data_q, skb);
1425 tasklet_schedule(&hdev->tx_task);
1427 EXPORT_SYMBOL(hci_send_sco);
1429 /* ---- HCI TX task (outgoing data) ---- */
1431 /* HCI Connection scheduler */
1432 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1434 struct hci_conn_hash *h = &hdev->conn_hash;
1435 struct hci_conn *conn = NULL;
1436 int num = 0, min = ~0;
1437 struct list_head *p;
1439 /* We don't have to lock device here. Connections are always
1440 * added and removed with TX task disabled. */
1441 list_for_each(p, &h->list) {
1442 struct hci_conn *c;
1443 c = list_entry(p, struct hci_conn, list);
1445 if (c->type != type || skb_queue_empty(&c->data_q))
1446 continue;
1448 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1449 continue;
1451 num++;
1453 if (c->sent < min) {
1454 min = c->sent;
1455 conn = c;
1459 if (conn) {
1460 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1461 int q = cnt / num;
1462 *quote = q ? q : 1;
1463 } else
1464 *quote = 0;
1466 BT_DBG("conn %p quote %d", conn, *quote);
1467 return conn;
1470 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1472 struct hci_conn_hash *h = &hdev->conn_hash;
1473 struct list_head *p;
1474 struct hci_conn *c;
1476 BT_ERR("%s ACL tx timeout", hdev->name);
1478 /* Kill stalled connections */
1479 list_for_each(p, &h->list) {
1480 c = list_entry(p, struct hci_conn, list);
1481 if (c->type == ACL_LINK && c->sent) {
1482 BT_ERR("%s killing stalled ACL connection %s",
1483 hdev->name, batostr(&c->dst));
1484 hci_acl_disconn(c, 0x13);
1489 static inline void hci_sched_acl(struct hci_dev *hdev)
1491 struct hci_conn *conn;
1492 struct sk_buff *skb;
1493 int quote;
1495 BT_DBG("%s", hdev->name);
1497 if (!test_bit(HCI_RAW, &hdev->flags)) {
1498 /* ACL tx timeout must be longer than maximum
1499 * link supervision timeout (40.9 seconds) */
1500 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1501 hci_acl_tx_to(hdev);
1504 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1505 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1506 BT_DBG("skb %p len %d", skb, skb->len);
1508 hci_conn_enter_active_mode(conn);
1510 hci_send_frame(skb);
1511 hdev->acl_last_tx = jiffies;
1513 hdev->acl_cnt--;
1514 conn->sent++;
1519 /* Schedule SCO */
1520 static inline void hci_sched_sco(struct hci_dev *hdev)
1522 struct hci_conn *conn;
1523 struct sk_buff *skb;
1524 int quote;
1526 BT_DBG("%s", hdev->name);
1528 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1529 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1530 BT_DBG("skb %p len %d", skb, skb->len);
1531 hci_send_frame(skb);
1533 conn->sent++;
1534 if (conn->sent == ~0)
1535 conn->sent = 0;
1540 static inline void hci_sched_esco(struct hci_dev *hdev)
1542 struct hci_conn *conn;
1543 struct sk_buff *skb;
1544 int quote;
1546 BT_DBG("%s", hdev->name);
1548 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1549 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1550 BT_DBG("skb %p len %d", skb, skb->len);
1551 hci_send_frame(skb);
1553 conn->sent++;
1554 if (conn->sent == ~0)
1555 conn->sent = 0;
1560 static void hci_tx_task(unsigned long arg)
1562 struct hci_dev *hdev = (struct hci_dev *) arg;
1563 struct sk_buff *skb;
1565 read_lock(&hci_task_lock);
1567 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1569 /* Schedule queues and send stuff to HCI driver */
1571 hci_sched_acl(hdev);
1573 hci_sched_sco(hdev);
1575 hci_sched_esco(hdev);
1577 /* Send next queued raw (unknown type) packet */
1578 while ((skb = skb_dequeue(&hdev->raw_q)))
1579 hci_send_frame(skb);
1581 read_unlock(&hci_task_lock);
1584 /* ----- HCI RX task (incoming data proccessing) ----- */
1586 /* ACL data packet */
1587 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1589 struct hci_acl_hdr *hdr = (void *) skb->data;
1590 struct hci_conn *conn;
1591 __u16 handle, flags;
1593 skb_pull(skb, HCI_ACL_HDR_SIZE);
1595 handle = __le16_to_cpu(hdr->handle);
1596 flags = hci_flags(handle);
1597 handle = hci_handle(handle);
1599 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1601 hdev->stat.acl_rx++;
1603 hci_dev_lock(hdev);
1604 conn = hci_conn_hash_lookup_handle(hdev, handle);
1605 hci_dev_unlock(hdev);
1607 if (conn) {
1608 register struct hci_proto *hp;
1610 hci_conn_enter_active_mode(conn);
1612 /* Send to upper protocol */
1613 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1614 hp->recv_acldata(conn, skb, flags);
1615 return;
1617 } else {
1618 BT_ERR("%s ACL packet for unknown connection handle %d",
1619 hdev->name, handle);
1622 kfree_skb(skb);
1625 /* SCO data packet */
1626 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1628 struct hci_sco_hdr *hdr = (void *) skb->data;
1629 struct hci_conn *conn;
1630 __u16 handle;
1632 skb_pull(skb, HCI_SCO_HDR_SIZE);
1634 handle = __le16_to_cpu(hdr->handle);
1636 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1638 hdev->stat.sco_rx++;
1640 hci_dev_lock(hdev);
1641 conn = hci_conn_hash_lookup_handle(hdev, handle);
1642 hci_dev_unlock(hdev);
1644 if (conn) {
1645 register struct hci_proto *hp;
1647 /* Send to upper protocol */
1648 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1649 hp->recv_scodata(conn, skb);
1650 return;
1652 } else {
1653 BT_ERR("%s SCO packet for unknown connection handle %d",
1654 hdev->name, handle);
1657 kfree_skb(skb);
1660 static void hci_rx_task(unsigned long arg)
1662 struct hci_dev *hdev = (struct hci_dev *) arg;
1663 struct sk_buff *skb;
1665 BT_DBG("%s", hdev->name);
1667 read_lock(&hci_task_lock);
1669 while ((skb = skb_dequeue(&hdev->rx_q))) {
1670 if (atomic_read(&hdev->promisc)) {
1671 /* Send copy to the sockets */
1672 hci_send_to_sock(hdev, skb);
1675 if (test_bit(HCI_RAW, &hdev->flags)) {
1676 kfree_skb(skb);
1677 continue;
1680 if (test_bit(HCI_INIT, &hdev->flags)) {
1681 /* Don't process data packets in this states. */
1682 switch (bt_cb(skb)->pkt_type) {
1683 case HCI_ACLDATA_PKT:
1684 case HCI_SCODATA_PKT:
1685 kfree_skb(skb);
1686 continue;
1690 /* Process frame */
1691 switch (bt_cb(skb)->pkt_type) {
1692 case HCI_EVENT_PKT:
1693 hci_event_packet(hdev, skb);
1694 break;
1696 case HCI_ACLDATA_PKT:
1697 BT_DBG("%s ACL data packet", hdev->name);
1698 hci_acldata_packet(hdev, skb);
1699 break;
1701 case HCI_SCODATA_PKT:
1702 BT_DBG("%s SCO data packet", hdev->name);
1703 hci_scodata_packet(hdev, skb);
1704 break;
1706 default:
1707 kfree_skb(skb);
1708 break;
1712 read_unlock(&hci_task_lock);
1715 static void hci_cmd_task(unsigned long arg)
1717 struct hci_dev *hdev = (struct hci_dev *) arg;
1718 struct sk_buff *skb;
1720 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1722 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1723 BT_ERR("%s command tx timeout", hdev->name);
1724 atomic_set(&hdev->cmd_cnt, 1);
1727 /* Send queued commands */
1728 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1729 kfree_skb(hdev->sent_cmd);
1731 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1732 atomic_dec(&hdev->cmd_cnt);
1733 hci_send_frame(skb);
1734 hdev->cmd_last_tx = jiffies;
1735 } else {
1736 skb_queue_head(&hdev->cmd_q, skb);
1737 tasklet_schedule(&hdev->cmd_task);