vmscan: avoid setting zone congested if no page dirty
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / hci_core.c
blobbc2a052e518b37518eaf9e128ce418d195bd91e1
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <net/sock.h>
46 #include <asm/system.h>
47 #include <asm/uaccess.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
53 static void hci_cmd_task(unsigned long arg);
54 static void hci_rx_task(unsigned long arg);
55 static void hci_tx_task(unsigned long arg);
56 static void hci_notify(struct hci_dev *hdev, int event);
58 static DEFINE_RWLOCK(hci_task_lock);
60 /* HCI device list */
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
68 /* HCI protocols */
69 #define HCI_MAX_PROTO 2
70 struct hci_proto *hci_proto[HCI_MAX_PROTO];
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block *nb)
79 return atomic_notifier_chain_register(&hci_notifier, nb);
82 int hci_unregister_notifier(struct notifier_block *nb)
84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 static void hci_notify(struct hci_dev *hdev, int event)
89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev *hdev, int result)
96 BT_DBG("%s result 0x%2.2x", hdev->name, result);
98 if (hdev->req_status == HCI_REQ_PEND) {
99 hdev->req_result = result;
100 hdev->req_status = HCI_REQ_DONE;
101 wake_up_interruptible(&hdev->req_wait_q);
105 static void hci_req_cancel(struct hci_dev *hdev, int err)
107 BT_DBG("%s err 0x%2.2x", hdev->name, err);
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = err;
111 hdev->req_status = HCI_REQ_CANCELED;
112 wake_up_interruptible(&hdev->req_wait_q);
116 /* Execute request and wait for completion. */
117 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
118 unsigned long opt, __u32 timeout)
120 DECLARE_WAITQUEUE(wait, current);
121 int err = 0;
123 BT_DBG("%s start", hdev->name);
125 hdev->req_status = HCI_REQ_PEND;
127 add_wait_queue(&hdev->req_wait_q, &wait);
128 set_current_state(TASK_INTERRUPTIBLE);
130 req(hdev, opt);
131 schedule_timeout(timeout);
133 remove_wait_queue(&hdev->req_wait_q, &wait);
135 if (signal_pending(current))
136 return -EINTR;
138 switch (hdev->req_status) {
139 case HCI_REQ_DONE:
140 err = -bt_err(hdev->req_result);
141 break;
143 case HCI_REQ_CANCELED:
144 err = -hdev->req_result;
145 break;
147 default:
148 err = -ETIMEDOUT;
149 break;
152 hdev->req_status = hdev->req_result = 0;
154 BT_DBG("%s end: err %d", hdev->name, err);
156 return err;
159 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160 unsigned long opt, __u32 timeout)
162 int ret;
164 if (!test_bit(HCI_UP, &hdev->flags))
165 return -ENETDOWN;
167 /* Serialize all requests */
168 hci_req_lock(hdev);
169 ret = __hci_request(hdev, req, opt, timeout);
170 hci_req_unlock(hdev);
172 return ret;
175 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
177 BT_DBG("%s %ld", hdev->name, opt);
179 /* Reset device */
180 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
183 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
185 struct sk_buff *skb;
186 __le16 param;
187 __u8 flt_type;
189 BT_DBG("%s %ld", hdev->name, opt);
191 /* Driver initialization */
193 /* Special commands */
194 while ((skb = skb_dequeue(&hdev->driver_init))) {
195 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
196 skb->dev = (void *) hdev;
198 skb_queue_tail(&hdev->cmd_q, skb);
199 tasklet_schedule(&hdev->cmd_task);
201 skb_queue_purge(&hdev->driver_init);
203 /* Mandatory initialization */
205 /* Reset */
206 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
209 /* Read Local Supported Features */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
212 /* Read Local Version */
213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
218 #if 0
219 /* Host buffer size */
221 struct hci_cp_host_buffer_size cp;
222 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
223 cp.sco_mtu = HCI_MAX_SCO_SIZE;
224 cp.acl_max_pkt = cpu_to_le16(0xffff);
225 cp.sco_max_pkt = cpu_to_le16(0xffff);
226 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
228 #endif
230 /* Read BD Address */
231 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
233 /* Read Class of Device */
234 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
236 /* Read Local Name */
237 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
239 /* Read Voice Setting */
240 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
242 /* Optional initialization */
244 /* Clear Event Filters */
245 flt_type = HCI_FLT_CLEAR_ALL;
246 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
248 /* Page timeout ~20 secs */
249 param = cpu_to_le16(0x8000);
250 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
252 /* Connection accept timeout ~20 secs */
253 param = cpu_to_le16(0x7d00);
254 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
257 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
259 __u8 scan = opt;
261 BT_DBG("%s %x", hdev->name, scan);
263 /* Inquiry and Page scans */
264 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
267 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
269 __u8 auth = opt;
271 BT_DBG("%s %x", hdev->name, auth);
273 /* Authentication */
274 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
277 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
279 __u8 encrypt = opt;
281 BT_DBG("%s %x", hdev->name, encrypt);
283 /* Encryption */
284 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
287 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
289 __le16 policy = cpu_to_le16(opt);
291 BT_DBG("%s %x", hdev->name, policy);
293 /* Default link policy */
294 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
297 /* Get HCI device by index.
298 * Device is held on return. */
299 struct hci_dev *hci_dev_get(int index)
301 struct hci_dev *hdev = NULL;
302 struct list_head *p;
304 BT_DBG("%d", index);
306 if (index < 0)
307 return NULL;
309 read_lock(&hci_dev_list_lock);
310 list_for_each(p, &hci_dev_list) {
311 struct hci_dev *d = list_entry(p, struct hci_dev, list);
312 if (d->id == index) {
313 hdev = hci_dev_hold(d);
314 break;
317 read_unlock(&hci_dev_list_lock);
318 return hdev;
321 /* ---- Inquiry support ---- */
322 static void inquiry_cache_flush(struct hci_dev *hdev)
324 struct inquiry_cache *cache = &hdev->inq_cache;
325 struct inquiry_entry *next = cache->list, *e;
327 BT_DBG("cache %p", cache);
329 cache->list = NULL;
330 while ((e = next)) {
331 next = e->next;
332 kfree(e);
336 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
338 struct inquiry_cache *cache = &hdev->inq_cache;
339 struct inquiry_entry *e;
341 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
343 for (e = cache->list; e; e = e->next)
344 if (!bacmp(&e->data.bdaddr, bdaddr))
345 break;
346 return e;
349 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
351 struct inquiry_cache *cache = &hdev->inq_cache;
352 struct inquiry_entry *e;
354 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
356 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
357 /* Entry not in the cache. Add new one. */
358 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
359 return;
360 e->next = cache->list;
361 cache->list = e;
364 memcpy(&e->data, data, sizeof(*data));
365 e->timestamp = jiffies;
366 cache->timestamp = jiffies;
369 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
371 struct inquiry_cache *cache = &hdev->inq_cache;
372 struct inquiry_info *info = (struct inquiry_info *) buf;
373 struct inquiry_entry *e;
374 int copied = 0;
376 for (e = cache->list; e && copied < num; e = e->next, copied++) {
377 struct inquiry_data *data = &e->data;
378 bacpy(&info->bdaddr, &data->bdaddr);
379 info->pscan_rep_mode = data->pscan_rep_mode;
380 info->pscan_period_mode = data->pscan_period_mode;
381 info->pscan_mode = data->pscan_mode;
382 memcpy(info->dev_class, data->dev_class, 3);
383 info->clock_offset = data->clock_offset;
384 info++;
387 BT_DBG("cache %p, copied %d", cache, copied);
388 return copied;
391 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
393 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
394 struct hci_cp_inquiry cp;
396 BT_DBG("%s", hdev->name);
398 if (test_bit(HCI_INQUIRY, &hdev->flags))
399 return;
401 /* Start Inquiry */
402 memcpy(&cp.lap, &ir->lap, 3);
403 cp.length = ir->length;
404 cp.num_rsp = ir->num_rsp;
405 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
408 int hci_inquiry(void __user *arg)
410 __u8 __user *ptr = arg;
411 struct hci_inquiry_req ir;
412 struct hci_dev *hdev;
413 int err = 0, do_inquiry = 0, max_rsp;
414 long timeo;
415 __u8 *buf;
417 if (copy_from_user(&ir, ptr, sizeof(ir)))
418 return -EFAULT;
420 if (!(hdev = hci_dev_get(ir.dev_id)))
421 return -ENODEV;
423 hci_dev_lock_bh(hdev);
424 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
425 inquiry_cache_empty(hdev) ||
426 ir.flags & IREQ_CACHE_FLUSH) {
427 inquiry_cache_flush(hdev);
428 do_inquiry = 1;
430 hci_dev_unlock_bh(hdev);
432 timeo = ir.length * msecs_to_jiffies(2000);
433 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
434 goto done;
436 /* for unlimited number of responses we will use buffer with 255 entries */
437 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
439 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
440 * copy it to the user space.
442 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
443 err = -ENOMEM;
444 goto done;
447 hci_dev_lock_bh(hdev);
448 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
449 hci_dev_unlock_bh(hdev);
451 BT_DBG("num_rsp %d", ir.num_rsp);
453 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
454 ptr += sizeof(ir);
455 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
456 ir.num_rsp))
457 err = -EFAULT;
458 } else
459 err = -EFAULT;
461 kfree(buf);
463 done:
464 hci_dev_put(hdev);
465 return err;
468 /* ---- HCI ioctl helpers ---- */
470 int hci_dev_open(__u16 dev)
472 struct hci_dev *hdev;
473 int ret = 0;
475 if (!(hdev = hci_dev_get(dev)))
476 return -ENODEV;
478 BT_DBG("%s %p", hdev->name, hdev);
480 hci_req_lock(hdev);
482 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
483 ret = -ERFKILL;
484 goto done;
487 if (test_bit(HCI_UP, &hdev->flags)) {
488 ret = -EALREADY;
489 goto done;
492 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
493 set_bit(HCI_RAW, &hdev->flags);
495 /* Treat all non BR/EDR controllers as raw devices for now */
496 if (hdev->dev_type != HCI_BREDR)
497 set_bit(HCI_RAW, &hdev->flags);
499 if (hdev->open(hdev)) {
500 ret = -EIO;
501 goto done;
504 if (!test_bit(HCI_RAW, &hdev->flags)) {
505 atomic_set(&hdev->cmd_cnt, 1);
506 set_bit(HCI_INIT, &hdev->flags);
508 //__hci_request(hdev, hci_reset_req, 0, HZ);
509 ret = __hci_request(hdev, hci_init_req, 0,
510 msecs_to_jiffies(HCI_INIT_TIMEOUT));
512 clear_bit(HCI_INIT, &hdev->flags);
515 if (!ret) {
516 hci_dev_hold(hdev);
517 set_bit(HCI_UP, &hdev->flags);
518 hci_notify(hdev, HCI_DEV_UP);
519 } else {
520 /* Init failed, cleanup */
521 tasklet_kill(&hdev->rx_task);
522 tasklet_kill(&hdev->tx_task);
523 tasklet_kill(&hdev->cmd_task);
525 skb_queue_purge(&hdev->cmd_q);
526 skb_queue_purge(&hdev->rx_q);
528 if (hdev->flush)
529 hdev->flush(hdev);
531 if (hdev->sent_cmd) {
532 kfree_skb(hdev->sent_cmd);
533 hdev->sent_cmd = NULL;
536 hdev->close(hdev);
537 hdev->flags = 0;
540 done:
541 hci_req_unlock(hdev);
542 hci_dev_put(hdev);
543 return ret;
546 static int hci_dev_do_close(struct hci_dev *hdev)
548 BT_DBG("%s %p", hdev->name, hdev);
550 hci_req_cancel(hdev, ENODEV);
551 hci_req_lock(hdev);
553 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
554 hci_req_unlock(hdev);
555 return 0;
558 /* Kill RX and TX tasks */
559 tasklet_kill(&hdev->rx_task);
560 tasklet_kill(&hdev->tx_task);
562 hci_dev_lock_bh(hdev);
563 inquiry_cache_flush(hdev);
564 hci_conn_hash_flush(hdev);
565 hci_dev_unlock_bh(hdev);
567 hci_notify(hdev, HCI_DEV_DOWN);
569 if (hdev->flush)
570 hdev->flush(hdev);
572 /* Reset device */
573 skb_queue_purge(&hdev->cmd_q);
574 atomic_set(&hdev->cmd_cnt, 1);
575 if (!test_bit(HCI_RAW, &hdev->flags)) {
576 set_bit(HCI_INIT, &hdev->flags);
577 __hci_request(hdev, hci_reset_req, 0,
578 msecs_to_jiffies(250));
579 clear_bit(HCI_INIT, &hdev->flags);
582 /* Kill cmd task */
583 tasklet_kill(&hdev->cmd_task);
585 /* Drop queues */
586 skb_queue_purge(&hdev->rx_q);
587 skb_queue_purge(&hdev->cmd_q);
588 skb_queue_purge(&hdev->raw_q);
590 /* Drop last sent command */
591 if (hdev->sent_cmd) {
592 kfree_skb(hdev->sent_cmd);
593 hdev->sent_cmd = NULL;
596 /* After this point our queues are empty
597 * and no tasks are scheduled. */
598 hdev->close(hdev);
600 /* Clear flags */
601 hdev->flags = 0;
603 hci_req_unlock(hdev);
605 hci_dev_put(hdev);
606 return 0;
609 int hci_dev_close(__u16 dev)
611 struct hci_dev *hdev;
612 int err;
614 if (!(hdev = hci_dev_get(dev)))
615 return -ENODEV;
616 err = hci_dev_do_close(hdev);
617 hci_dev_put(hdev);
618 return err;
621 int hci_dev_reset(__u16 dev)
623 struct hci_dev *hdev;
624 int ret = 0;
626 if (!(hdev = hci_dev_get(dev)))
627 return -ENODEV;
629 hci_req_lock(hdev);
630 tasklet_disable(&hdev->tx_task);
632 if (!test_bit(HCI_UP, &hdev->flags))
633 goto done;
635 /* Drop queues */
636 skb_queue_purge(&hdev->rx_q);
637 skb_queue_purge(&hdev->cmd_q);
639 hci_dev_lock_bh(hdev);
640 inquiry_cache_flush(hdev);
641 hci_conn_hash_flush(hdev);
642 hci_dev_unlock_bh(hdev);
644 if (hdev->flush)
645 hdev->flush(hdev);
647 atomic_set(&hdev->cmd_cnt, 1);
648 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
650 if (!test_bit(HCI_RAW, &hdev->flags))
651 ret = __hci_request(hdev, hci_reset_req, 0,
652 msecs_to_jiffies(HCI_INIT_TIMEOUT));
654 done:
655 tasklet_enable(&hdev->tx_task);
656 hci_req_unlock(hdev);
657 hci_dev_put(hdev);
658 return ret;
661 int hci_dev_reset_stat(__u16 dev)
663 struct hci_dev *hdev;
664 int ret = 0;
666 if (!(hdev = hci_dev_get(dev)))
667 return -ENODEV;
669 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
671 hci_dev_put(hdev);
673 return ret;
676 int hci_dev_cmd(unsigned int cmd, void __user *arg)
678 struct hci_dev *hdev;
679 struct hci_dev_req dr;
680 int err = 0;
682 if (copy_from_user(&dr, arg, sizeof(dr)))
683 return -EFAULT;
685 if (!(hdev = hci_dev_get(dr.dev_id)))
686 return -ENODEV;
688 switch (cmd) {
689 case HCISETAUTH:
690 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
691 msecs_to_jiffies(HCI_INIT_TIMEOUT));
692 break;
694 case HCISETENCRYPT:
695 if (!lmp_encrypt_capable(hdev)) {
696 err = -EOPNOTSUPP;
697 break;
700 if (!test_bit(HCI_AUTH, &hdev->flags)) {
701 /* Auth must be enabled first */
702 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
703 msecs_to_jiffies(HCI_INIT_TIMEOUT));
704 if (err)
705 break;
708 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT));
710 break;
712 case HCISETSCAN:
713 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
714 msecs_to_jiffies(HCI_INIT_TIMEOUT));
715 break;
717 case HCISETLINKPOL:
718 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
719 msecs_to_jiffies(HCI_INIT_TIMEOUT));
720 break;
722 case HCISETLINKMODE:
723 hdev->link_mode = ((__u16) dr.dev_opt) &
724 (HCI_LM_MASTER | HCI_LM_ACCEPT);
725 break;
727 case HCISETPTYPE:
728 hdev->pkt_type = (__u16) dr.dev_opt;
729 break;
731 case HCISETACLMTU:
732 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
733 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
734 break;
736 case HCISETSCOMTU:
737 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
738 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
739 break;
741 default:
742 err = -EINVAL;
743 break;
746 hci_dev_put(hdev);
747 return err;
750 int hci_get_dev_list(void __user *arg)
752 struct hci_dev_list_req *dl;
753 struct hci_dev_req *dr;
754 struct list_head *p;
755 int n = 0, size, err;
756 __u16 dev_num;
758 if (get_user(dev_num, (__u16 __user *) arg))
759 return -EFAULT;
761 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
762 return -EINVAL;
764 size = sizeof(*dl) + dev_num * sizeof(*dr);
766 if (!(dl = kzalloc(size, GFP_KERNEL)))
767 return -ENOMEM;
769 dr = dl->dev_req;
771 read_lock_bh(&hci_dev_list_lock);
772 list_for_each(p, &hci_dev_list) {
773 struct hci_dev *hdev;
774 hdev = list_entry(p, struct hci_dev, list);
775 (dr + n)->dev_id = hdev->id;
776 (dr + n)->dev_opt = hdev->flags;
777 if (++n >= dev_num)
778 break;
780 read_unlock_bh(&hci_dev_list_lock);
782 dl->dev_num = n;
783 size = sizeof(*dl) + n * sizeof(*dr);
785 err = copy_to_user(arg, dl, size);
786 kfree(dl);
788 return err ? -EFAULT : 0;
791 int hci_get_dev_info(void __user *arg)
793 struct hci_dev *hdev;
794 struct hci_dev_info di;
795 int err = 0;
797 if (copy_from_user(&di, arg, sizeof(di)))
798 return -EFAULT;
800 if (!(hdev = hci_dev_get(di.dev_id)))
801 return -ENODEV;
803 strcpy(di.name, hdev->name);
804 di.bdaddr = hdev->bdaddr;
805 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
806 di.flags = hdev->flags;
807 di.pkt_type = hdev->pkt_type;
808 di.acl_mtu = hdev->acl_mtu;
809 di.acl_pkts = hdev->acl_pkts;
810 di.sco_mtu = hdev->sco_mtu;
811 di.sco_pkts = hdev->sco_pkts;
812 di.link_policy = hdev->link_policy;
813 di.link_mode = hdev->link_mode;
815 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
816 memcpy(&di.features, &hdev->features, sizeof(di.features));
818 if (copy_to_user(arg, &di, sizeof(di)))
819 err = -EFAULT;
821 hci_dev_put(hdev);
823 return err;
826 /* ---- Interface to HCI drivers ---- */
828 static int hci_rfkill_set_block(void *data, bool blocked)
830 struct hci_dev *hdev = data;
832 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
834 if (!blocked)
835 return 0;
837 hci_dev_do_close(hdev);
839 return 0;
842 static const struct rfkill_ops hci_rfkill_ops = {
843 .set_block = hci_rfkill_set_block,
846 /* Alloc HCI device */
847 struct hci_dev *hci_alloc_dev(void)
849 struct hci_dev *hdev;
851 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
852 if (!hdev)
853 return NULL;
855 skb_queue_head_init(&hdev->driver_init);
857 return hdev;
859 EXPORT_SYMBOL(hci_alloc_dev);
861 /* Free HCI device */
862 void hci_free_dev(struct hci_dev *hdev)
864 skb_queue_purge(&hdev->driver_init);
866 /* will free via device release */
867 put_device(&hdev->dev);
869 EXPORT_SYMBOL(hci_free_dev);
871 /* Register HCI device */
872 int hci_register_dev(struct hci_dev *hdev)
874 struct list_head *head = &hci_dev_list, *p;
875 int i, id = 0;
877 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
878 hdev->bus, hdev->owner);
880 if (!hdev->open || !hdev->close || !hdev->destruct)
881 return -EINVAL;
883 write_lock_bh(&hci_dev_list_lock);
885 /* Find first available device id */
886 list_for_each(p, &hci_dev_list) {
887 if (list_entry(p, struct hci_dev, list)->id != id)
888 break;
889 head = p; id++;
892 sprintf(hdev->name, "hci%d", id);
893 hdev->id = id;
894 list_add(&hdev->list, head);
896 atomic_set(&hdev->refcnt, 1);
897 spin_lock_init(&hdev->lock);
899 hdev->flags = 0;
900 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
901 hdev->esco_type = (ESCO_HV1);
902 hdev->link_mode = (HCI_LM_ACCEPT);
904 hdev->idle_timeout = 0;
905 hdev->sniff_max_interval = 800;
906 hdev->sniff_min_interval = 80;
908 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
909 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
910 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
912 skb_queue_head_init(&hdev->rx_q);
913 skb_queue_head_init(&hdev->cmd_q);
914 skb_queue_head_init(&hdev->raw_q);
916 for (i = 0; i < NUM_REASSEMBLY; i++)
917 hdev->reassembly[i] = NULL;
919 init_waitqueue_head(&hdev->req_wait_q);
920 mutex_init(&hdev->req_lock);
922 inquiry_cache_init(hdev);
924 hci_conn_hash_init(hdev);
926 INIT_LIST_HEAD(&hdev->blacklist);
928 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
930 atomic_set(&hdev->promisc, 0);
932 write_unlock_bh(&hci_dev_list_lock);
934 hdev->workqueue = create_singlethread_workqueue(hdev->name);
935 if (!hdev->workqueue)
936 goto nomem;
938 hci_register_sysfs(hdev);
940 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
941 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
942 if (hdev->rfkill) {
943 if (rfkill_register(hdev->rfkill) < 0) {
944 rfkill_destroy(hdev->rfkill);
945 hdev->rfkill = NULL;
949 hci_notify(hdev, HCI_DEV_REG);
951 return id;
953 nomem:
954 write_lock_bh(&hci_dev_list_lock);
955 list_del(&hdev->list);
956 write_unlock_bh(&hci_dev_list_lock);
958 return -ENOMEM;
960 EXPORT_SYMBOL(hci_register_dev);
962 /* Unregister HCI device */
963 int hci_unregister_dev(struct hci_dev *hdev)
965 int i;
967 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
969 write_lock_bh(&hci_dev_list_lock);
970 list_del(&hdev->list);
971 write_unlock_bh(&hci_dev_list_lock);
973 hci_dev_do_close(hdev);
975 for (i = 0; i < NUM_REASSEMBLY; i++)
976 kfree_skb(hdev->reassembly[i]);
978 hci_notify(hdev, HCI_DEV_UNREG);
980 if (hdev->rfkill) {
981 rfkill_unregister(hdev->rfkill);
982 rfkill_destroy(hdev->rfkill);
985 hci_unregister_sysfs(hdev);
987 destroy_workqueue(hdev->workqueue);
989 __hci_dev_put(hdev);
991 return 0;
993 EXPORT_SYMBOL(hci_unregister_dev);
995 /* Suspend HCI device */
996 int hci_suspend_dev(struct hci_dev *hdev)
998 hci_notify(hdev, HCI_DEV_SUSPEND);
999 return 0;
1001 EXPORT_SYMBOL(hci_suspend_dev);
1003 /* Resume HCI device */
1004 int hci_resume_dev(struct hci_dev *hdev)
1006 hci_notify(hdev, HCI_DEV_RESUME);
1007 return 0;
1009 EXPORT_SYMBOL(hci_resume_dev);
1011 /* Receive frame from HCI drivers */
1012 int hci_recv_frame(struct sk_buff *skb)
1014 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1015 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1016 && !test_bit(HCI_INIT, &hdev->flags))) {
1017 kfree_skb(skb);
1018 return -ENXIO;
1021 /* Incomming skb */
1022 bt_cb(skb)->incoming = 1;
1024 /* Time stamp */
1025 __net_timestamp(skb);
1027 /* Queue frame for rx task */
1028 skb_queue_tail(&hdev->rx_q, skb);
1029 tasklet_schedule(&hdev->rx_task);
1031 return 0;
1033 EXPORT_SYMBOL(hci_recv_frame);
1035 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1036 int count, __u8 index, gfp_t gfp_mask)
1038 int len = 0;
1039 int hlen = 0;
1040 int remain = count;
1041 struct sk_buff *skb;
1042 struct bt_skb_cb *scb;
1044 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1045 index >= NUM_REASSEMBLY)
1046 return -EILSEQ;
1048 skb = hdev->reassembly[index];
1050 if (!skb) {
1051 switch (type) {
1052 case HCI_ACLDATA_PKT:
1053 len = HCI_MAX_FRAME_SIZE;
1054 hlen = HCI_ACL_HDR_SIZE;
1055 break;
1056 case HCI_EVENT_PKT:
1057 len = HCI_MAX_EVENT_SIZE;
1058 hlen = HCI_EVENT_HDR_SIZE;
1059 break;
1060 case HCI_SCODATA_PKT:
1061 len = HCI_MAX_SCO_SIZE;
1062 hlen = HCI_SCO_HDR_SIZE;
1063 break;
1066 skb = bt_skb_alloc(len, gfp_mask);
1067 if (!skb)
1068 return -ENOMEM;
1070 scb = (void *) skb->cb;
1071 scb->expect = hlen;
1072 scb->pkt_type = type;
1074 skb->dev = (void *) hdev;
1075 hdev->reassembly[index] = skb;
1078 while (count) {
1079 scb = (void *) skb->cb;
1080 len = min(scb->expect, (__u16)count);
1082 memcpy(skb_put(skb, len), data, len);
1084 count -= len;
1085 data += len;
1086 scb->expect -= len;
1087 remain = count;
1089 switch (type) {
1090 case HCI_EVENT_PKT:
1091 if (skb->len == HCI_EVENT_HDR_SIZE) {
1092 struct hci_event_hdr *h = hci_event_hdr(skb);
1093 scb->expect = h->plen;
1095 if (skb_tailroom(skb) < scb->expect) {
1096 kfree_skb(skb);
1097 hdev->reassembly[index] = NULL;
1098 return -ENOMEM;
1101 break;
1103 case HCI_ACLDATA_PKT:
1104 if (skb->len == HCI_ACL_HDR_SIZE) {
1105 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1106 scb->expect = __le16_to_cpu(h->dlen);
1108 if (skb_tailroom(skb) < scb->expect) {
1109 kfree_skb(skb);
1110 hdev->reassembly[index] = NULL;
1111 return -ENOMEM;
1114 break;
1116 case HCI_SCODATA_PKT:
1117 if (skb->len == HCI_SCO_HDR_SIZE) {
1118 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1119 scb->expect = h->dlen;
1121 if (skb_tailroom(skb) < scb->expect) {
1122 kfree_skb(skb);
1123 hdev->reassembly[index] = NULL;
1124 return -ENOMEM;
1127 break;
1130 if (scb->expect == 0) {
1131 /* Complete frame */
1133 bt_cb(skb)->pkt_type = type;
1134 hci_recv_frame(skb);
1136 hdev->reassembly[index] = NULL;
1137 return remain;
1141 return remain;
1144 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1146 int rem = 0;
1148 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1149 return -EILSEQ;
1151 while (count) {
1152 rem = hci_reassembly(hdev, type, data, count,
1153 type - 1, GFP_ATOMIC);
1154 if (rem < 0)
1155 return rem;
1157 data += (count - rem);
1158 count = rem;
1161 return rem;
1163 EXPORT_SYMBOL(hci_recv_fragment);
1165 #define STREAM_REASSEMBLY 0
1167 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1169 int type;
1170 int rem = 0;
1172 while (count) {
1173 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1175 if (!skb) {
1176 struct { char type; } *pkt;
1178 /* Start of the frame */
1179 pkt = data;
1180 type = pkt->type;
1182 data++;
1183 count--;
1184 } else
1185 type = bt_cb(skb)->pkt_type;
1187 rem = hci_reassembly(hdev, type, data,
1188 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1189 if (rem < 0)
1190 return rem;
1192 data += (count - rem);
1193 count = rem;
1196 return rem;
1198 EXPORT_SYMBOL(hci_recv_stream_fragment);
1200 /* ---- Interface to upper protocols ---- */
1202 /* Register/Unregister protocols.
1203 * hci_task_lock is used to ensure that no tasks are running. */
1204 int hci_register_proto(struct hci_proto *hp)
1206 int err = 0;
1208 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1210 if (hp->id >= HCI_MAX_PROTO)
1211 return -EINVAL;
1213 write_lock_bh(&hci_task_lock);
1215 if (!hci_proto[hp->id])
1216 hci_proto[hp->id] = hp;
1217 else
1218 err = -EEXIST;
1220 write_unlock_bh(&hci_task_lock);
1222 return err;
1224 EXPORT_SYMBOL(hci_register_proto);
1226 int hci_unregister_proto(struct hci_proto *hp)
1228 int err = 0;
1230 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1232 if (hp->id >= HCI_MAX_PROTO)
1233 return -EINVAL;
1235 write_lock_bh(&hci_task_lock);
1237 if (hci_proto[hp->id])
1238 hci_proto[hp->id] = NULL;
1239 else
1240 err = -ENOENT;
1242 write_unlock_bh(&hci_task_lock);
1244 return err;
1246 EXPORT_SYMBOL(hci_unregister_proto);
1248 int hci_register_cb(struct hci_cb *cb)
1250 BT_DBG("%p name %s", cb, cb->name);
1252 write_lock_bh(&hci_cb_list_lock);
1253 list_add(&cb->list, &hci_cb_list);
1254 write_unlock_bh(&hci_cb_list_lock);
1256 return 0;
1258 EXPORT_SYMBOL(hci_register_cb);
1260 int hci_unregister_cb(struct hci_cb *cb)
1262 BT_DBG("%p name %s", cb, cb->name);
1264 write_lock_bh(&hci_cb_list_lock);
1265 list_del(&cb->list);
1266 write_unlock_bh(&hci_cb_list_lock);
1268 return 0;
1270 EXPORT_SYMBOL(hci_unregister_cb);
1272 static int hci_send_frame(struct sk_buff *skb)
1274 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1276 if (!hdev) {
1277 kfree_skb(skb);
1278 return -ENODEV;
1281 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1283 if (atomic_read(&hdev->promisc)) {
1284 /* Time stamp */
1285 __net_timestamp(skb);
1287 hci_send_to_sock(hdev, skb);
1290 /* Get rid of skb owner, prior to sending to the driver. */
1291 skb_orphan(skb);
1293 return hdev->send(skb);
1296 /* Send HCI command */
1297 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1299 int len = HCI_COMMAND_HDR_SIZE + plen;
1300 struct hci_command_hdr *hdr;
1301 struct sk_buff *skb;
1303 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1305 skb = bt_skb_alloc(len, GFP_ATOMIC);
1306 if (!skb) {
1307 BT_ERR("%s no memory for command", hdev->name);
1308 return -ENOMEM;
1311 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1312 hdr->opcode = cpu_to_le16(opcode);
1313 hdr->plen = plen;
1315 if (plen)
1316 memcpy(skb_put(skb, plen), param, plen);
1318 BT_DBG("skb len %d", skb->len);
1320 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1321 skb->dev = (void *) hdev;
1323 skb_queue_tail(&hdev->cmd_q, skb);
1324 tasklet_schedule(&hdev->cmd_task);
1326 return 0;
1329 /* Get data from the previously sent command */
1330 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1332 struct hci_command_hdr *hdr;
1334 if (!hdev->sent_cmd)
1335 return NULL;
1337 hdr = (void *) hdev->sent_cmd->data;
1339 if (hdr->opcode != cpu_to_le16(opcode))
1340 return NULL;
1342 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1344 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1347 /* Send ACL data */
1348 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1350 struct hci_acl_hdr *hdr;
1351 int len = skb->len;
1353 skb_push(skb, HCI_ACL_HDR_SIZE);
1354 skb_reset_transport_header(skb);
1355 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1356 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1357 hdr->dlen = cpu_to_le16(len);
1360 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1362 struct hci_dev *hdev = conn->hdev;
1363 struct sk_buff *list;
1365 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1367 skb->dev = (void *) hdev;
1368 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1369 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1371 if (!(list = skb_shinfo(skb)->frag_list)) {
1372 /* Non fragmented */
1373 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1375 skb_queue_tail(&conn->data_q, skb);
1376 } else {
1377 /* Fragmented */
1378 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1380 skb_shinfo(skb)->frag_list = NULL;
1382 /* Queue all fragments atomically */
1383 spin_lock_bh(&conn->data_q.lock);
1385 __skb_queue_tail(&conn->data_q, skb);
1386 do {
1387 skb = list; list = list->next;
1389 skb->dev = (void *) hdev;
1390 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1391 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1393 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1395 __skb_queue_tail(&conn->data_q, skb);
1396 } while (list);
1398 spin_unlock_bh(&conn->data_q.lock);
1401 tasklet_schedule(&hdev->tx_task);
1403 EXPORT_SYMBOL(hci_send_acl);
1405 /* Send SCO data */
1406 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1408 struct hci_dev *hdev = conn->hdev;
1409 struct hci_sco_hdr hdr;
1411 BT_DBG("%s len %d", hdev->name, skb->len);
1413 hdr.handle = cpu_to_le16(conn->handle);
1414 hdr.dlen = skb->len;
1416 skb_push(skb, HCI_SCO_HDR_SIZE);
1417 skb_reset_transport_header(skb);
1418 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1420 skb->dev = (void *) hdev;
1421 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1423 skb_queue_tail(&conn->data_q, skb);
1424 tasklet_schedule(&hdev->tx_task);
1426 EXPORT_SYMBOL(hci_send_sco);
1428 /* ---- HCI TX task (outgoing data) ---- */
1430 /* HCI Connection scheduler */
1431 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1433 struct hci_conn_hash *h = &hdev->conn_hash;
1434 struct hci_conn *conn = NULL;
1435 int num = 0, min = ~0;
1436 struct list_head *p;
1438 /* We don't have to lock device here. Connections are always
1439 * added and removed with TX task disabled. */
1440 list_for_each(p, &h->list) {
1441 struct hci_conn *c;
1442 c = list_entry(p, struct hci_conn, list);
1444 if (c->type != type || skb_queue_empty(&c->data_q))
1445 continue;
1447 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1448 continue;
1450 num++;
1452 if (c->sent < min) {
1453 min = c->sent;
1454 conn = c;
1458 if (conn) {
1459 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1460 int q = cnt / num;
1461 *quote = q ? q : 1;
1462 } else
1463 *quote = 0;
1465 BT_DBG("conn %p quote %d", conn, *quote);
1466 return conn;
1469 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1471 struct hci_conn_hash *h = &hdev->conn_hash;
1472 struct list_head *p;
1473 struct hci_conn *c;
1475 BT_ERR("%s ACL tx timeout", hdev->name);
1477 /* Kill stalled connections */
1478 list_for_each(p, &h->list) {
1479 c = list_entry(p, struct hci_conn, list);
1480 if (c->type == ACL_LINK && c->sent) {
1481 BT_ERR("%s killing stalled ACL connection %s",
1482 hdev->name, batostr(&c->dst));
1483 hci_acl_disconn(c, 0x13);
1488 static inline void hci_sched_acl(struct hci_dev *hdev)
1490 struct hci_conn *conn;
1491 struct sk_buff *skb;
1492 int quote;
1494 BT_DBG("%s", hdev->name);
1496 if (!test_bit(HCI_RAW, &hdev->flags)) {
1497 /* ACL tx timeout must be longer than maximum
1498 * link supervision timeout (40.9 seconds) */
1499 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1500 hci_acl_tx_to(hdev);
1503 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1504 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1505 BT_DBG("skb %p len %d", skb, skb->len);
1507 hci_conn_enter_active_mode(conn);
1509 hci_send_frame(skb);
1510 hdev->acl_last_tx = jiffies;
1512 hdev->acl_cnt--;
1513 conn->sent++;
1518 /* Schedule SCO */
1519 static inline void hci_sched_sco(struct hci_dev *hdev)
1521 struct hci_conn *conn;
1522 struct sk_buff *skb;
1523 int quote;
1525 BT_DBG("%s", hdev->name);
1527 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1528 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1529 BT_DBG("skb %p len %d", skb, skb->len);
1530 hci_send_frame(skb);
1532 conn->sent++;
1533 if (conn->sent == ~0)
1534 conn->sent = 0;
1539 static inline void hci_sched_esco(struct hci_dev *hdev)
1541 struct hci_conn *conn;
1542 struct sk_buff *skb;
1543 int quote;
1545 BT_DBG("%s", hdev->name);
1547 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1548 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1549 BT_DBG("skb %p len %d", skb, skb->len);
1550 hci_send_frame(skb);
1552 conn->sent++;
1553 if (conn->sent == ~0)
1554 conn->sent = 0;
1559 static void hci_tx_task(unsigned long arg)
1561 struct hci_dev *hdev = (struct hci_dev *) arg;
1562 struct sk_buff *skb;
1564 read_lock(&hci_task_lock);
1566 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1568 /* Schedule queues and send stuff to HCI driver */
1570 hci_sched_acl(hdev);
1572 hci_sched_sco(hdev);
1574 hci_sched_esco(hdev);
1576 /* Send next queued raw (unknown type) packet */
1577 while ((skb = skb_dequeue(&hdev->raw_q)))
1578 hci_send_frame(skb);
1580 read_unlock(&hci_task_lock);
1583 /* ----- HCI RX task (incoming data proccessing) ----- */
1585 /* ACL data packet */
1586 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1588 struct hci_acl_hdr *hdr = (void *) skb->data;
1589 struct hci_conn *conn;
1590 __u16 handle, flags;
1592 skb_pull(skb, HCI_ACL_HDR_SIZE);
1594 handle = __le16_to_cpu(hdr->handle);
1595 flags = hci_flags(handle);
1596 handle = hci_handle(handle);
1598 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1600 hdev->stat.acl_rx++;
1602 hci_dev_lock(hdev);
1603 conn = hci_conn_hash_lookup_handle(hdev, handle);
1604 hci_dev_unlock(hdev);
1606 if (conn) {
1607 register struct hci_proto *hp;
1609 hci_conn_enter_active_mode(conn);
1611 /* Send to upper protocol */
1612 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1613 hp->recv_acldata(conn, skb, flags);
1614 return;
1616 } else {
1617 BT_ERR("%s ACL packet for unknown connection handle %d",
1618 hdev->name, handle);
1621 kfree_skb(skb);
1624 /* SCO data packet */
1625 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1627 struct hci_sco_hdr *hdr = (void *) skb->data;
1628 struct hci_conn *conn;
1629 __u16 handle;
1631 skb_pull(skb, HCI_SCO_HDR_SIZE);
1633 handle = __le16_to_cpu(hdr->handle);
1635 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1637 hdev->stat.sco_rx++;
1639 hci_dev_lock(hdev);
1640 conn = hci_conn_hash_lookup_handle(hdev, handle);
1641 hci_dev_unlock(hdev);
1643 if (conn) {
1644 register struct hci_proto *hp;
1646 /* Send to upper protocol */
1647 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1648 hp->recv_scodata(conn, skb);
1649 return;
1651 } else {
1652 BT_ERR("%s SCO packet for unknown connection handle %d",
1653 hdev->name, handle);
1656 kfree_skb(skb);
1659 static void hci_rx_task(unsigned long arg)
1661 struct hci_dev *hdev = (struct hci_dev *) arg;
1662 struct sk_buff *skb;
1664 BT_DBG("%s", hdev->name);
1666 read_lock(&hci_task_lock);
1668 while ((skb = skb_dequeue(&hdev->rx_q))) {
1669 if (atomic_read(&hdev->promisc)) {
1670 /* Send copy to the sockets */
1671 hci_send_to_sock(hdev, skb);
1674 if (test_bit(HCI_RAW, &hdev->flags)) {
1675 kfree_skb(skb);
1676 continue;
1679 if (test_bit(HCI_INIT, &hdev->flags)) {
1680 /* Don't process data packets in this states. */
1681 switch (bt_cb(skb)->pkt_type) {
1682 case HCI_ACLDATA_PKT:
1683 case HCI_SCODATA_PKT:
1684 kfree_skb(skb);
1685 continue;
1689 /* Process frame */
1690 switch (bt_cb(skb)->pkt_type) {
1691 case HCI_EVENT_PKT:
1692 hci_event_packet(hdev, skb);
1693 break;
1695 case HCI_ACLDATA_PKT:
1696 BT_DBG("%s ACL data packet", hdev->name);
1697 hci_acldata_packet(hdev, skb);
1698 break;
1700 case HCI_SCODATA_PKT:
1701 BT_DBG("%s SCO data packet", hdev->name);
1702 hci_scodata_packet(hdev, skb);
1703 break;
1705 default:
1706 kfree_skb(skb);
1707 break;
1711 read_unlock(&hci_task_lock);
1714 static void hci_cmd_task(unsigned long arg)
1716 struct hci_dev *hdev = (struct hci_dev *) arg;
1717 struct sk_buff *skb;
1719 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1721 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1722 BT_ERR("%s command tx timeout", hdev->name);
1723 atomic_set(&hdev->cmd_cnt, 1);
1726 /* Send queued commands */
1727 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1728 kfree_skb(hdev->sent_cmd);
1730 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1731 atomic_dec(&hdev->cmd_cnt);
1732 hci_send_frame(skb);
1733 hdev->cmd_last_tx = jiffies;
1734 } else {
1735 skb_queue_head(&hdev->cmd_q, skb);
1736 tasklet_schedule(&hdev->cmd_task);