bcma: detect PCI core working in hostmode
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / hci_core.c
blob0029e178e52e80202cb4bf35346116d75e64eecc
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
55 #define AUTO_OFF_TIMEOUT 2000
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
61 static DEFINE_RWLOCK(hci_task_lock);
63 static int enable_smp;
65 /* HCI device list */
66 LIST_HEAD(hci_dev_list);
67 DEFINE_RWLOCK(hci_dev_list_lock);
69 /* HCI callback list */
70 LIST_HEAD(hci_cb_list);
71 DEFINE_RWLOCK(hci_cb_list_lock);
73 /* HCI protocols */
74 #define HCI_MAX_PROTO 2
75 struct hci_proto *hci_proto[HCI_MAX_PROTO];
77 /* HCI notifiers list */
78 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
80 /* ---- HCI notifications ---- */
82 int hci_register_notifier(struct notifier_block *nb)
84 return atomic_notifier_chain_register(&hci_notifier, nb);
87 int hci_unregister_notifier(struct notifier_block *nb)
89 return atomic_notifier_chain_unregister(&hci_notifier, nb);
92 static void hci_notify(struct hci_dev *hdev, int event)
94 atomic_notifier_call_chain(&hci_notifier, event, hdev);
97 /* ---- HCI requests ---- */
99 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
101 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
103 /* If this is the init phase check if the completed command matches
104 * the last init command, and if not just return.
106 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
107 return;
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = result;
111 hdev->req_status = HCI_REQ_DONE;
112 wake_up_interruptible(&hdev->req_wait_q);
116 static void hci_req_cancel(struct hci_dev *hdev, int err)
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
127 /* Execute request and wait for completion. */
128 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
129 unsigned long opt, __u32 timeout)
131 DECLARE_WAITQUEUE(wait, current);
132 int err = 0;
134 BT_DBG("%s start", hdev->name);
136 hdev->req_status = HCI_REQ_PEND;
138 add_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_INTERRUPTIBLE);
141 req(hdev, opt);
142 schedule_timeout(timeout);
144 remove_wait_queue(&hdev->req_wait_q, &wait);
146 if (signal_pending(current))
147 return -EINTR;
149 switch (hdev->req_status) {
150 case HCI_REQ_DONE:
151 err = -bt_err(hdev->req_result);
152 break;
154 case HCI_REQ_CANCELED:
155 err = -hdev->req_result;
156 break;
158 default:
159 err = -ETIMEDOUT;
160 break;
163 hdev->req_status = hdev->req_result = 0;
165 BT_DBG("%s end: err %d", hdev->name, err);
167 return err;
170 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
171 unsigned long opt, __u32 timeout)
173 int ret;
175 if (!test_bit(HCI_UP, &hdev->flags))
176 return -ENETDOWN;
178 /* Serialize all requests */
179 hci_req_lock(hdev);
180 ret = __hci_request(hdev, req, opt, timeout);
181 hci_req_unlock(hdev);
183 return ret;
186 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
188 BT_DBG("%s %ld", hdev->name, opt);
190 /* Reset device */
191 set_bit(HCI_RESET, &hdev->flags);
192 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
195 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
197 struct hci_cp_delete_stored_link_key cp;
198 struct sk_buff *skb;
199 __le16 param;
200 __u8 flt_type;
202 BT_DBG("%s %ld", hdev->name, opt);
204 /* Driver initialization */
206 /* Special commands */
207 while ((skb = skb_dequeue(&hdev->driver_init))) {
208 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
209 skb->dev = (void *) hdev;
211 skb_queue_tail(&hdev->cmd_q, skb);
212 tasklet_schedule(&hdev->cmd_task);
214 skb_queue_purge(&hdev->driver_init);
216 /* Mandatory initialization */
218 /* Reset */
219 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
220 set_bit(HCI_RESET, &hdev->flags);
221 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
224 /* Read Local Supported Features */
225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
227 /* Read Local Version */
228 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
230 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
231 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
233 #if 0
234 /* Host buffer size */
236 struct hci_cp_host_buffer_size cp;
237 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
238 cp.sco_mtu = HCI_MAX_SCO_SIZE;
239 cp.acl_max_pkt = cpu_to_le16(0xffff);
240 cp.sco_max_pkt = cpu_to_le16(0xffff);
241 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
243 #endif
245 /* Read BD Address */
246 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
248 /* Read Class of Device */
249 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
251 /* Read Local Name */
252 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
254 /* Read Voice Setting */
255 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
257 /* Optional initialization */
259 /* Clear Event Filters */
260 flt_type = HCI_FLT_CLEAR_ALL;
261 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
263 /* Connection accept timeout ~20 secs */
264 param = cpu_to_le16(0x7d00);
265 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
267 bacpy(&cp.bdaddr, BDADDR_ANY);
268 cp.delete_all = 1;
269 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
272 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
274 BT_DBG("%s", hdev->name);
276 /* Read LE buffer size */
277 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
280 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
282 __u8 scan = opt;
284 BT_DBG("%s %x", hdev->name, scan);
286 /* Inquiry and Page scans */
287 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
290 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
292 __u8 auth = opt;
294 BT_DBG("%s %x", hdev->name, auth);
296 /* Authentication */
297 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
300 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
302 __u8 encrypt = opt;
304 BT_DBG("%s %x", hdev->name, encrypt);
306 /* Encryption */
307 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
310 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
312 __le16 policy = cpu_to_le16(opt);
314 BT_DBG("%s %x", hdev->name, policy);
316 /* Default link policy */
317 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
320 /* Get HCI device by index.
321 * Device is held on return. */
322 struct hci_dev *hci_dev_get(int index)
324 struct hci_dev *hdev = NULL;
325 struct list_head *p;
327 BT_DBG("%d", index);
329 if (index < 0)
330 return NULL;
332 read_lock(&hci_dev_list_lock);
333 list_for_each(p, &hci_dev_list) {
334 struct hci_dev *d = list_entry(p, struct hci_dev, list);
335 if (d->id == index) {
336 hdev = hci_dev_hold(d);
337 break;
340 read_unlock(&hci_dev_list_lock);
341 return hdev;
344 /* ---- Inquiry support ---- */
345 static void inquiry_cache_flush(struct hci_dev *hdev)
347 struct inquiry_cache *cache = &hdev->inq_cache;
348 struct inquiry_entry *next = cache->list, *e;
350 BT_DBG("cache %p", cache);
352 cache->list = NULL;
353 while ((e = next)) {
354 next = e->next;
355 kfree(e);
359 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
361 struct inquiry_cache *cache = &hdev->inq_cache;
362 struct inquiry_entry *e;
364 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
366 for (e = cache->list; e; e = e->next)
367 if (!bacmp(&e->data.bdaddr, bdaddr))
368 break;
369 return e;
372 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
374 struct inquiry_cache *cache = &hdev->inq_cache;
375 struct inquiry_entry *ie;
377 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
379 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
380 if (!ie) {
381 /* Entry not in the cache. Add new one. */
382 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
383 if (!ie)
384 return;
386 ie->next = cache->list;
387 cache->list = ie;
390 memcpy(&ie->data, data, sizeof(*data));
391 ie->timestamp = jiffies;
392 cache->timestamp = jiffies;
395 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
397 struct inquiry_cache *cache = &hdev->inq_cache;
398 struct inquiry_info *info = (struct inquiry_info *) buf;
399 struct inquiry_entry *e;
400 int copied = 0;
402 for (e = cache->list; e && copied < num; e = e->next, copied++) {
403 struct inquiry_data *data = &e->data;
404 bacpy(&info->bdaddr, &data->bdaddr);
405 info->pscan_rep_mode = data->pscan_rep_mode;
406 info->pscan_period_mode = data->pscan_period_mode;
407 info->pscan_mode = data->pscan_mode;
408 memcpy(info->dev_class, data->dev_class, 3);
409 info->clock_offset = data->clock_offset;
410 info++;
413 BT_DBG("cache %p, copied %d", cache, copied);
414 return copied;
417 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
419 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
420 struct hci_cp_inquiry cp;
422 BT_DBG("%s", hdev->name);
424 if (test_bit(HCI_INQUIRY, &hdev->flags))
425 return;
427 /* Start Inquiry */
428 memcpy(&cp.lap, &ir->lap, 3);
429 cp.length = ir->length;
430 cp.num_rsp = ir->num_rsp;
431 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
434 int hci_inquiry(void __user *arg)
436 __u8 __user *ptr = arg;
437 struct hci_inquiry_req ir;
438 struct hci_dev *hdev;
439 int err = 0, do_inquiry = 0, max_rsp;
440 long timeo;
441 __u8 *buf;
443 if (copy_from_user(&ir, ptr, sizeof(ir)))
444 return -EFAULT;
446 hdev = hci_dev_get(ir.dev_id);
447 if (!hdev)
448 return -ENODEV;
450 hci_dev_lock_bh(hdev);
451 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
452 inquiry_cache_empty(hdev) ||
453 ir.flags & IREQ_CACHE_FLUSH) {
454 inquiry_cache_flush(hdev);
455 do_inquiry = 1;
457 hci_dev_unlock_bh(hdev);
459 timeo = ir.length * msecs_to_jiffies(2000);
461 if (do_inquiry) {
462 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
463 if (err < 0)
464 goto done;
467 /* for unlimited number of responses we will use buffer with 255 entries */
468 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
470 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
471 * copy it to the user space.
473 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
474 if (!buf) {
475 err = -ENOMEM;
476 goto done;
479 hci_dev_lock_bh(hdev);
480 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
481 hci_dev_unlock_bh(hdev);
483 BT_DBG("num_rsp %d", ir.num_rsp);
485 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
486 ptr += sizeof(ir);
487 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
488 ir.num_rsp))
489 err = -EFAULT;
490 } else
491 err = -EFAULT;
493 kfree(buf);
495 done:
496 hci_dev_put(hdev);
497 return err;
500 /* ---- HCI ioctl helpers ---- */
502 int hci_dev_open(__u16 dev)
504 struct hci_dev *hdev;
505 int ret = 0;
507 hdev = hci_dev_get(dev);
508 if (!hdev)
509 return -ENODEV;
511 BT_DBG("%s %p", hdev->name, hdev);
513 hci_req_lock(hdev);
515 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
516 ret = -ERFKILL;
517 goto done;
520 if (test_bit(HCI_UP, &hdev->flags)) {
521 ret = -EALREADY;
522 goto done;
525 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
526 set_bit(HCI_RAW, &hdev->flags);
528 /* Treat all non BR/EDR controllers as raw devices for now */
529 if (hdev->dev_type != HCI_BREDR)
530 set_bit(HCI_RAW, &hdev->flags);
532 if (hdev->open(hdev)) {
533 ret = -EIO;
534 goto done;
537 if (!test_bit(HCI_RAW, &hdev->flags)) {
538 atomic_set(&hdev->cmd_cnt, 1);
539 set_bit(HCI_INIT, &hdev->flags);
540 hdev->init_last_cmd = 0;
542 ret = __hci_request(hdev, hci_init_req, 0,
543 msecs_to_jiffies(HCI_INIT_TIMEOUT));
545 if (lmp_le_capable(hdev))
546 ret = __hci_request(hdev, hci_le_init_req, 0,
547 msecs_to_jiffies(HCI_INIT_TIMEOUT));
549 clear_bit(HCI_INIT, &hdev->flags);
552 if (!ret) {
553 hci_dev_hold(hdev);
554 set_bit(HCI_UP, &hdev->flags);
555 hci_notify(hdev, HCI_DEV_UP);
556 if (!test_bit(HCI_SETUP, &hdev->flags))
557 mgmt_powered(hdev->id, 1);
558 } else {
559 /* Init failed, cleanup */
560 tasklet_kill(&hdev->rx_task);
561 tasklet_kill(&hdev->tx_task);
562 tasklet_kill(&hdev->cmd_task);
564 skb_queue_purge(&hdev->cmd_q);
565 skb_queue_purge(&hdev->rx_q);
567 if (hdev->flush)
568 hdev->flush(hdev);
570 if (hdev->sent_cmd) {
571 kfree_skb(hdev->sent_cmd);
572 hdev->sent_cmd = NULL;
575 hdev->close(hdev);
576 hdev->flags = 0;
579 done:
580 hci_req_unlock(hdev);
581 hci_dev_put(hdev);
582 return ret;
585 static int hci_dev_do_close(struct hci_dev *hdev)
587 BT_DBG("%s %p", hdev->name, hdev);
589 hci_req_cancel(hdev, ENODEV);
590 hci_req_lock(hdev);
592 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
593 del_timer_sync(&hdev->cmd_timer);
594 hci_req_unlock(hdev);
595 return 0;
598 /* Kill RX and TX tasks */
599 tasklet_kill(&hdev->rx_task);
600 tasklet_kill(&hdev->tx_task);
602 hci_dev_lock_bh(hdev);
603 inquiry_cache_flush(hdev);
604 hci_conn_hash_flush(hdev);
605 hci_dev_unlock_bh(hdev);
607 hci_notify(hdev, HCI_DEV_DOWN);
609 if (hdev->flush)
610 hdev->flush(hdev);
612 /* Reset device */
613 skb_queue_purge(&hdev->cmd_q);
614 atomic_set(&hdev->cmd_cnt, 1);
615 if (!test_bit(HCI_RAW, &hdev->flags)) {
616 set_bit(HCI_INIT, &hdev->flags);
617 __hci_request(hdev, hci_reset_req, 0,
618 msecs_to_jiffies(250));
619 clear_bit(HCI_INIT, &hdev->flags);
622 /* Kill cmd task */
623 tasklet_kill(&hdev->cmd_task);
625 /* Drop queues */
626 skb_queue_purge(&hdev->rx_q);
627 skb_queue_purge(&hdev->cmd_q);
628 skb_queue_purge(&hdev->raw_q);
630 /* Drop last sent command */
631 if (hdev->sent_cmd) {
632 del_timer_sync(&hdev->cmd_timer);
633 kfree_skb(hdev->sent_cmd);
634 hdev->sent_cmd = NULL;
637 /* After this point our queues are empty
638 * and no tasks are scheduled. */
639 hdev->close(hdev);
641 mgmt_powered(hdev->id, 0);
643 /* Clear flags */
644 hdev->flags = 0;
646 hci_req_unlock(hdev);
648 hci_dev_put(hdev);
649 return 0;
652 int hci_dev_close(__u16 dev)
654 struct hci_dev *hdev;
655 int err;
657 hdev = hci_dev_get(dev);
658 if (!hdev)
659 return -ENODEV;
660 err = hci_dev_do_close(hdev);
661 hci_dev_put(hdev);
662 return err;
665 int hci_dev_reset(__u16 dev)
667 struct hci_dev *hdev;
668 int ret = 0;
670 hdev = hci_dev_get(dev);
671 if (!hdev)
672 return -ENODEV;
674 hci_req_lock(hdev);
675 tasklet_disable(&hdev->tx_task);
677 if (!test_bit(HCI_UP, &hdev->flags))
678 goto done;
680 /* Drop queues */
681 skb_queue_purge(&hdev->rx_q);
682 skb_queue_purge(&hdev->cmd_q);
684 hci_dev_lock_bh(hdev);
685 inquiry_cache_flush(hdev);
686 hci_conn_hash_flush(hdev);
687 hci_dev_unlock_bh(hdev);
689 if (hdev->flush)
690 hdev->flush(hdev);
692 atomic_set(&hdev->cmd_cnt, 1);
693 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
695 if (!test_bit(HCI_RAW, &hdev->flags))
696 ret = __hci_request(hdev, hci_reset_req, 0,
697 msecs_to_jiffies(HCI_INIT_TIMEOUT));
699 done:
700 tasklet_enable(&hdev->tx_task);
701 hci_req_unlock(hdev);
702 hci_dev_put(hdev);
703 return ret;
706 int hci_dev_reset_stat(__u16 dev)
708 struct hci_dev *hdev;
709 int ret = 0;
711 hdev = hci_dev_get(dev);
712 if (!hdev)
713 return -ENODEV;
715 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
717 hci_dev_put(hdev);
719 return ret;
722 int hci_dev_cmd(unsigned int cmd, void __user *arg)
724 struct hci_dev *hdev;
725 struct hci_dev_req dr;
726 int err = 0;
728 if (copy_from_user(&dr, arg, sizeof(dr)))
729 return -EFAULT;
731 hdev = hci_dev_get(dr.dev_id);
732 if (!hdev)
733 return -ENODEV;
735 switch (cmd) {
736 case HCISETAUTH:
737 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
738 msecs_to_jiffies(HCI_INIT_TIMEOUT));
739 break;
741 case HCISETENCRYPT:
742 if (!lmp_encrypt_capable(hdev)) {
743 err = -EOPNOTSUPP;
744 break;
747 if (!test_bit(HCI_AUTH, &hdev->flags)) {
748 /* Auth must be enabled first */
749 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
750 msecs_to_jiffies(HCI_INIT_TIMEOUT));
751 if (err)
752 break;
755 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
756 msecs_to_jiffies(HCI_INIT_TIMEOUT));
757 break;
759 case HCISETSCAN:
760 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
761 msecs_to_jiffies(HCI_INIT_TIMEOUT));
762 break;
764 case HCISETLINKPOL:
765 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
766 msecs_to_jiffies(HCI_INIT_TIMEOUT));
767 break;
769 case HCISETLINKMODE:
770 hdev->link_mode = ((__u16) dr.dev_opt) &
771 (HCI_LM_MASTER | HCI_LM_ACCEPT);
772 break;
774 case HCISETPTYPE:
775 hdev->pkt_type = (__u16) dr.dev_opt;
776 break;
778 case HCISETACLMTU:
779 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
780 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
781 break;
783 case HCISETSCOMTU:
784 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
785 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
786 break;
788 default:
789 err = -EINVAL;
790 break;
793 hci_dev_put(hdev);
794 return err;
797 int hci_get_dev_list(void __user *arg)
799 struct hci_dev_list_req *dl;
800 struct hci_dev_req *dr;
801 struct list_head *p;
802 int n = 0, size, err;
803 __u16 dev_num;
805 if (get_user(dev_num, (__u16 __user *) arg))
806 return -EFAULT;
808 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
809 return -EINVAL;
811 size = sizeof(*dl) + dev_num * sizeof(*dr);
813 dl = kzalloc(size, GFP_KERNEL);
814 if (!dl)
815 return -ENOMEM;
817 dr = dl->dev_req;
819 read_lock_bh(&hci_dev_list_lock);
820 list_for_each(p, &hci_dev_list) {
821 struct hci_dev *hdev;
823 hdev = list_entry(p, struct hci_dev, list);
825 hci_del_off_timer(hdev);
827 if (!test_bit(HCI_MGMT, &hdev->flags))
828 set_bit(HCI_PAIRABLE, &hdev->flags);
830 (dr + n)->dev_id = hdev->id;
831 (dr + n)->dev_opt = hdev->flags;
833 if (++n >= dev_num)
834 break;
836 read_unlock_bh(&hci_dev_list_lock);
838 dl->dev_num = n;
839 size = sizeof(*dl) + n * sizeof(*dr);
841 err = copy_to_user(arg, dl, size);
842 kfree(dl);
844 return err ? -EFAULT : 0;
847 int hci_get_dev_info(void __user *arg)
849 struct hci_dev *hdev;
850 struct hci_dev_info di;
851 int err = 0;
853 if (copy_from_user(&di, arg, sizeof(di)))
854 return -EFAULT;
856 hdev = hci_dev_get(di.dev_id);
857 if (!hdev)
858 return -ENODEV;
860 hci_del_off_timer(hdev);
862 if (!test_bit(HCI_MGMT, &hdev->flags))
863 set_bit(HCI_PAIRABLE, &hdev->flags);
865 strcpy(di.name, hdev->name);
866 di.bdaddr = hdev->bdaddr;
867 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
868 di.flags = hdev->flags;
869 di.pkt_type = hdev->pkt_type;
870 di.acl_mtu = hdev->acl_mtu;
871 di.acl_pkts = hdev->acl_pkts;
872 di.sco_mtu = hdev->sco_mtu;
873 di.sco_pkts = hdev->sco_pkts;
874 di.link_policy = hdev->link_policy;
875 di.link_mode = hdev->link_mode;
877 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
878 memcpy(&di.features, &hdev->features, sizeof(di.features));
880 if (copy_to_user(arg, &di, sizeof(di)))
881 err = -EFAULT;
883 hci_dev_put(hdev);
885 return err;
888 /* ---- Interface to HCI drivers ---- */
890 static int hci_rfkill_set_block(void *data, bool blocked)
892 struct hci_dev *hdev = data;
894 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
896 if (!blocked)
897 return 0;
899 hci_dev_do_close(hdev);
901 return 0;
904 static const struct rfkill_ops hci_rfkill_ops = {
905 .set_block = hci_rfkill_set_block,
908 /* Alloc HCI device */
909 struct hci_dev *hci_alloc_dev(void)
911 struct hci_dev *hdev;
913 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
914 if (!hdev)
915 return NULL;
917 skb_queue_head_init(&hdev->driver_init);
919 return hdev;
921 EXPORT_SYMBOL(hci_alloc_dev);
923 /* Free HCI device */
924 void hci_free_dev(struct hci_dev *hdev)
926 skb_queue_purge(&hdev->driver_init);
928 /* will free via device release */
929 put_device(&hdev->dev);
931 EXPORT_SYMBOL(hci_free_dev);
933 static void hci_power_on(struct work_struct *work)
935 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
937 BT_DBG("%s", hdev->name);
939 if (hci_dev_open(hdev->id) < 0)
940 return;
942 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
943 mod_timer(&hdev->off_timer,
944 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
946 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
947 mgmt_index_added(hdev->id);
950 static void hci_power_off(struct work_struct *work)
952 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
954 BT_DBG("%s", hdev->name);
956 hci_dev_close(hdev->id);
959 static void hci_auto_off(unsigned long data)
961 struct hci_dev *hdev = (struct hci_dev *) data;
963 BT_DBG("%s", hdev->name);
965 clear_bit(HCI_AUTO_OFF, &hdev->flags);
967 queue_work(hdev->workqueue, &hdev->power_off);
970 void hci_del_off_timer(struct hci_dev *hdev)
972 BT_DBG("%s", hdev->name);
974 clear_bit(HCI_AUTO_OFF, &hdev->flags);
975 del_timer(&hdev->off_timer);
978 int hci_uuids_clear(struct hci_dev *hdev)
980 struct list_head *p, *n;
982 list_for_each_safe(p, n, &hdev->uuids) {
983 struct bt_uuid *uuid;
985 uuid = list_entry(p, struct bt_uuid, list);
987 list_del(p);
988 kfree(uuid);
991 return 0;
994 int hci_link_keys_clear(struct hci_dev *hdev)
996 struct list_head *p, *n;
998 list_for_each_safe(p, n, &hdev->link_keys) {
999 struct link_key *key;
1001 key = list_entry(p, struct link_key, list);
1003 list_del(p);
1004 kfree(key);
1007 return 0;
1010 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1012 struct list_head *p;
1014 list_for_each(p, &hdev->link_keys) {
1015 struct link_key *k;
1017 k = list_entry(p, struct link_key, list);
1019 if (bacmp(bdaddr, &k->bdaddr) == 0)
1020 return k;
1023 return NULL;
1026 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1027 u8 key_type, u8 old_key_type)
1029 /* Legacy key */
1030 if (key_type < 0x03)
1031 return 1;
1033 /* Debug keys are insecure so don't store them persistently */
1034 if (key_type == HCI_LK_DEBUG_COMBINATION)
1035 return 0;
1037 /* Changed combination key and there's no previous one */
1038 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1039 return 0;
1041 /* Security mode 3 case */
1042 if (!conn)
1043 return 1;
1045 /* Neither local nor remote side had no-bonding as requirement */
1046 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1047 return 1;
1049 /* Local side had dedicated bonding as requirement */
1050 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1051 return 1;
1053 /* Remote side had dedicated bonding as requirement */
1054 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1055 return 1;
1057 /* If none of the above criteria match, then don't store the key
1058 * persistently */
1059 return 0;
1062 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1063 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1065 struct link_key *key, *old_key;
1066 u8 old_key_type, persistent;
1068 old_key = hci_find_link_key(hdev, bdaddr);
1069 if (old_key) {
1070 old_key_type = old_key->type;
1071 key = old_key;
1072 } else {
1073 old_key_type = conn ? conn->key_type : 0xff;
1074 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1075 if (!key)
1076 return -ENOMEM;
1077 list_add(&key->list, &hdev->link_keys);
1080 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1082 /* Some buggy controller combinations generate a changed
1083 * combination key for legacy pairing even when there's no
1084 * previous key */
1085 if (type == HCI_LK_CHANGED_COMBINATION &&
1086 (!conn || conn->remote_auth == 0xff) &&
1087 old_key_type == 0xff) {
1088 type = HCI_LK_COMBINATION;
1089 if (conn)
1090 conn->key_type = type;
1093 bacpy(&key->bdaddr, bdaddr);
1094 memcpy(key->val, val, 16);
1095 key->pin_len = pin_len;
1097 if (type == HCI_LK_CHANGED_COMBINATION)
1098 key->type = old_key_type;
1099 else
1100 key->type = type;
1102 if (!new_key)
1103 return 0;
1105 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1107 mgmt_new_key(hdev->id, key, persistent);
1109 if (!persistent) {
1110 list_del(&key->list);
1111 kfree(key);
1114 return 0;
1117 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1119 struct link_key *key;
1121 key = hci_find_link_key(hdev, bdaddr);
1122 if (!key)
1123 return -ENOENT;
1125 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1127 list_del(&key->list);
1128 kfree(key);
1130 return 0;
1133 /* HCI command timer function */
1134 static void hci_cmd_timer(unsigned long arg)
1136 struct hci_dev *hdev = (void *) arg;
1138 BT_ERR("%s command tx timeout", hdev->name);
1139 atomic_set(&hdev->cmd_cnt, 1);
1140 clear_bit(HCI_RESET, &hdev->flags);
1141 tasklet_schedule(&hdev->cmd_task);
1144 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1145 bdaddr_t *bdaddr)
1147 struct oob_data *data;
1149 list_for_each_entry(data, &hdev->remote_oob_data, list)
1150 if (bacmp(bdaddr, &data->bdaddr) == 0)
1151 return data;
1153 return NULL;
1156 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1158 struct oob_data *data;
1160 data = hci_find_remote_oob_data(hdev, bdaddr);
1161 if (!data)
1162 return -ENOENT;
1164 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1166 list_del(&data->list);
1167 kfree(data);
1169 return 0;
1172 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1174 struct oob_data *data, *n;
1176 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1177 list_del(&data->list);
1178 kfree(data);
1181 return 0;
1184 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1185 u8 *randomizer)
1187 struct oob_data *data;
1189 data = hci_find_remote_oob_data(hdev, bdaddr);
1191 if (!data) {
1192 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1193 if (!data)
1194 return -ENOMEM;
1196 bacpy(&data->bdaddr, bdaddr);
1197 list_add(&data->list, &hdev->remote_oob_data);
1200 memcpy(data->hash, hash, sizeof(data->hash));
1201 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1203 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1205 return 0;
1208 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1209 bdaddr_t *bdaddr)
1211 struct list_head *p;
1213 list_for_each(p, &hdev->blacklist) {
1214 struct bdaddr_list *b;
1216 b = list_entry(p, struct bdaddr_list, list);
1218 if (bacmp(bdaddr, &b->bdaddr) == 0)
1219 return b;
1222 return NULL;
1225 int hci_blacklist_clear(struct hci_dev *hdev)
1227 struct list_head *p, *n;
1229 list_for_each_safe(p, n, &hdev->blacklist) {
1230 struct bdaddr_list *b;
1232 b = list_entry(p, struct bdaddr_list, list);
1234 list_del(p);
1235 kfree(b);
1238 return 0;
1241 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1243 struct bdaddr_list *entry;
1244 int err;
1246 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1247 return -EBADF;
1249 hci_dev_lock(hdev);
1251 if (hci_blacklist_lookup(hdev, bdaddr)) {
1252 err = -EEXIST;
1253 goto err;
1256 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1257 if (!entry) {
1258 return -ENOMEM;
1259 goto err;
1262 bacpy(&entry->bdaddr, bdaddr);
1264 list_add(&entry->list, &hdev->blacklist);
1266 err = 0;
1268 err:
1269 hci_dev_unlock(hdev);
1270 return err;
1273 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1275 struct bdaddr_list *entry;
1276 int err = 0;
1278 hci_dev_lock(hdev);
1280 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
1281 hci_blacklist_clear(hdev);
1282 goto done;
1285 entry = hci_blacklist_lookup(hdev, bdaddr);
1286 if (!entry) {
1287 err = -ENOENT;
1288 goto done;
1291 list_del(&entry->list);
1292 kfree(entry);
1294 done:
1295 hci_dev_unlock(hdev);
1296 return err;
1299 static void hci_clear_adv_cache(unsigned long arg)
1301 struct hci_dev *hdev = (void *) arg;
1303 hci_dev_lock(hdev);
1305 hci_adv_entries_clear(hdev);
1307 hci_dev_unlock(hdev);
1310 int hci_adv_entries_clear(struct hci_dev *hdev)
1312 struct adv_entry *entry, *tmp;
1314 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1315 list_del(&entry->list);
1316 kfree(entry);
1319 BT_DBG("%s adv cache cleared", hdev->name);
1321 return 0;
1324 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1326 struct adv_entry *entry;
1328 list_for_each_entry(entry, &hdev->adv_entries, list)
1329 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1330 return entry;
1332 return NULL;
1335 static inline int is_connectable_adv(u8 evt_type)
1337 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1338 return 1;
1340 return 0;
1343 int hci_add_adv_entry(struct hci_dev *hdev,
1344 struct hci_ev_le_advertising_info *ev)
1346 struct adv_entry *entry;
1348 if (!is_connectable_adv(ev->evt_type))
1349 return -EINVAL;
1351 /* Only new entries should be added to adv_entries. So, if
1352 * bdaddr was found, don't add it. */
1353 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1354 return 0;
1356 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1357 if (!entry)
1358 return -ENOMEM;
1360 bacpy(&entry->bdaddr, &ev->bdaddr);
1361 entry->bdaddr_type = ev->bdaddr_type;
1363 list_add(&entry->list, &hdev->adv_entries);
1365 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1366 batostr(&entry->bdaddr), entry->bdaddr_type);
1368 return 0;
1371 static struct crypto_blkcipher *alloc_cypher(void)
1373 if (enable_smp)
1374 return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
1376 return ERR_PTR(-ENOTSUPP);
1379 /* Register HCI device */
1380 int hci_register_dev(struct hci_dev *hdev)
1382 struct list_head *head = &hci_dev_list, *p;
1383 int i, id = 0;
1385 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1386 hdev->bus, hdev->owner);
1388 if (!hdev->open || !hdev->close || !hdev->destruct)
1389 return -EINVAL;
1391 write_lock_bh(&hci_dev_list_lock);
1393 /* Find first available device id */
1394 list_for_each(p, &hci_dev_list) {
1395 if (list_entry(p, struct hci_dev, list)->id != id)
1396 break;
1397 head = p; id++;
1400 sprintf(hdev->name, "hci%d", id);
1401 hdev->id = id;
1402 list_add(&hdev->list, head);
1404 atomic_set(&hdev->refcnt, 1);
1405 spin_lock_init(&hdev->lock);
1407 hdev->flags = 0;
1408 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1409 hdev->esco_type = (ESCO_HV1);
1410 hdev->link_mode = (HCI_LM_ACCEPT);
1411 hdev->io_capability = 0x03; /* No Input No Output */
1413 hdev->idle_timeout = 0;
1414 hdev->sniff_max_interval = 800;
1415 hdev->sniff_min_interval = 80;
1417 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1418 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1419 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1421 skb_queue_head_init(&hdev->rx_q);
1422 skb_queue_head_init(&hdev->cmd_q);
1423 skb_queue_head_init(&hdev->raw_q);
1425 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1427 for (i = 0; i < NUM_REASSEMBLY; i++)
1428 hdev->reassembly[i] = NULL;
1430 init_waitqueue_head(&hdev->req_wait_q);
1431 mutex_init(&hdev->req_lock);
1433 inquiry_cache_init(hdev);
1435 hci_conn_hash_init(hdev);
1437 INIT_LIST_HEAD(&hdev->blacklist);
1439 INIT_LIST_HEAD(&hdev->uuids);
1441 INIT_LIST_HEAD(&hdev->link_keys);
1443 INIT_LIST_HEAD(&hdev->remote_oob_data);
1445 INIT_LIST_HEAD(&hdev->adv_entries);
1446 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1447 (unsigned long) hdev);
1449 INIT_WORK(&hdev->power_on, hci_power_on);
1450 INIT_WORK(&hdev->power_off, hci_power_off);
1451 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1453 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1455 atomic_set(&hdev->promisc, 0);
1457 write_unlock_bh(&hci_dev_list_lock);
1459 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1460 if (!hdev->workqueue)
1461 goto nomem;
1463 hdev->tfm = alloc_cypher();
1464 if (IS_ERR(hdev->tfm))
1465 BT_INFO("Failed to load transform for ecb(aes): %ld",
1466 PTR_ERR(hdev->tfm));
1468 hci_register_sysfs(hdev);
1470 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1471 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1472 if (hdev->rfkill) {
1473 if (rfkill_register(hdev->rfkill) < 0) {
1474 rfkill_destroy(hdev->rfkill);
1475 hdev->rfkill = NULL;
1479 set_bit(HCI_AUTO_OFF, &hdev->flags);
1480 set_bit(HCI_SETUP, &hdev->flags);
1481 queue_work(hdev->workqueue, &hdev->power_on);
1483 hci_notify(hdev, HCI_DEV_REG);
1485 return id;
1487 nomem:
1488 write_lock_bh(&hci_dev_list_lock);
1489 list_del(&hdev->list);
1490 write_unlock_bh(&hci_dev_list_lock);
1492 return -ENOMEM;
1494 EXPORT_SYMBOL(hci_register_dev);
1496 /* Unregister HCI device */
1497 int hci_unregister_dev(struct hci_dev *hdev)
1499 int i;
1501 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1503 write_lock_bh(&hci_dev_list_lock);
1504 list_del(&hdev->list);
1505 write_unlock_bh(&hci_dev_list_lock);
1507 hci_dev_do_close(hdev);
1509 for (i = 0; i < NUM_REASSEMBLY; i++)
1510 kfree_skb(hdev->reassembly[i]);
1512 if (!test_bit(HCI_INIT, &hdev->flags) &&
1513 !test_bit(HCI_SETUP, &hdev->flags))
1514 mgmt_index_removed(hdev->id);
1516 if (!IS_ERR(hdev->tfm))
1517 crypto_free_blkcipher(hdev->tfm);
1519 hci_notify(hdev, HCI_DEV_UNREG);
1521 if (hdev->rfkill) {
1522 rfkill_unregister(hdev->rfkill);
1523 rfkill_destroy(hdev->rfkill);
1526 hci_unregister_sysfs(hdev);
1528 hci_del_off_timer(hdev);
1529 del_timer(&hdev->adv_timer);
1531 destroy_workqueue(hdev->workqueue);
1533 hci_dev_lock_bh(hdev);
1534 hci_blacklist_clear(hdev);
1535 hci_uuids_clear(hdev);
1536 hci_link_keys_clear(hdev);
1537 hci_remote_oob_data_clear(hdev);
1538 hci_adv_entries_clear(hdev);
1539 hci_dev_unlock_bh(hdev);
1541 __hci_dev_put(hdev);
1543 return 0;
1545 EXPORT_SYMBOL(hci_unregister_dev);
1547 /* Suspend HCI device */
1548 int hci_suspend_dev(struct hci_dev *hdev)
1550 hci_notify(hdev, HCI_DEV_SUSPEND);
1551 return 0;
1553 EXPORT_SYMBOL(hci_suspend_dev);
1555 /* Resume HCI device */
1556 int hci_resume_dev(struct hci_dev *hdev)
1558 hci_notify(hdev, HCI_DEV_RESUME);
1559 return 0;
1561 EXPORT_SYMBOL(hci_resume_dev);
1563 /* Receive frame from HCI drivers */
1564 int hci_recv_frame(struct sk_buff *skb)
1566 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1567 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1568 && !test_bit(HCI_INIT, &hdev->flags))) {
1569 kfree_skb(skb);
1570 return -ENXIO;
1573 /* Incomming skb */
1574 bt_cb(skb)->incoming = 1;
1576 /* Time stamp */
1577 __net_timestamp(skb);
1579 /* Queue frame for rx task */
1580 skb_queue_tail(&hdev->rx_q, skb);
1581 tasklet_schedule(&hdev->rx_task);
1583 return 0;
1585 EXPORT_SYMBOL(hci_recv_frame);
1587 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1588 int count, __u8 index)
1590 int len = 0;
1591 int hlen = 0;
1592 int remain = count;
1593 struct sk_buff *skb;
1594 struct bt_skb_cb *scb;
1596 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1597 index >= NUM_REASSEMBLY)
1598 return -EILSEQ;
1600 skb = hdev->reassembly[index];
1602 if (!skb) {
1603 switch (type) {
1604 case HCI_ACLDATA_PKT:
1605 len = HCI_MAX_FRAME_SIZE;
1606 hlen = HCI_ACL_HDR_SIZE;
1607 break;
1608 case HCI_EVENT_PKT:
1609 len = HCI_MAX_EVENT_SIZE;
1610 hlen = HCI_EVENT_HDR_SIZE;
1611 break;
1612 case HCI_SCODATA_PKT:
1613 len = HCI_MAX_SCO_SIZE;
1614 hlen = HCI_SCO_HDR_SIZE;
1615 break;
1618 skb = bt_skb_alloc(len, GFP_ATOMIC);
1619 if (!skb)
1620 return -ENOMEM;
1622 scb = (void *) skb->cb;
1623 scb->expect = hlen;
1624 scb->pkt_type = type;
1626 skb->dev = (void *) hdev;
1627 hdev->reassembly[index] = skb;
1630 while (count) {
1631 scb = (void *) skb->cb;
1632 len = min(scb->expect, (__u16)count);
1634 memcpy(skb_put(skb, len), data, len);
1636 count -= len;
1637 data += len;
1638 scb->expect -= len;
1639 remain = count;
1641 switch (type) {
1642 case HCI_EVENT_PKT:
1643 if (skb->len == HCI_EVENT_HDR_SIZE) {
1644 struct hci_event_hdr *h = hci_event_hdr(skb);
1645 scb->expect = h->plen;
1647 if (skb_tailroom(skb) < scb->expect) {
1648 kfree_skb(skb);
1649 hdev->reassembly[index] = NULL;
1650 return -ENOMEM;
1653 break;
1655 case HCI_ACLDATA_PKT:
1656 if (skb->len == HCI_ACL_HDR_SIZE) {
1657 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1658 scb->expect = __le16_to_cpu(h->dlen);
1660 if (skb_tailroom(skb) < scb->expect) {
1661 kfree_skb(skb);
1662 hdev->reassembly[index] = NULL;
1663 return -ENOMEM;
1666 break;
1668 case HCI_SCODATA_PKT:
1669 if (skb->len == HCI_SCO_HDR_SIZE) {
1670 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1671 scb->expect = h->dlen;
1673 if (skb_tailroom(skb) < scb->expect) {
1674 kfree_skb(skb);
1675 hdev->reassembly[index] = NULL;
1676 return -ENOMEM;
1679 break;
1682 if (scb->expect == 0) {
1683 /* Complete frame */
1685 bt_cb(skb)->pkt_type = type;
1686 hci_recv_frame(skb);
1688 hdev->reassembly[index] = NULL;
1689 return remain;
1693 return remain;
1696 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1698 int rem = 0;
1700 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1701 return -EILSEQ;
1703 while (count) {
1704 rem = hci_reassembly(hdev, type, data, count, type - 1);
1705 if (rem < 0)
1706 return rem;
1708 data += (count - rem);
1709 count = rem;
1712 return rem;
1714 EXPORT_SYMBOL(hci_recv_fragment);
1716 #define STREAM_REASSEMBLY 0
1718 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1720 int type;
1721 int rem = 0;
1723 while (count) {
1724 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1726 if (!skb) {
1727 struct { char type; } *pkt;
1729 /* Start of the frame */
1730 pkt = data;
1731 type = pkt->type;
1733 data++;
1734 count--;
1735 } else
1736 type = bt_cb(skb)->pkt_type;
1738 rem = hci_reassembly(hdev, type, data, count,
1739 STREAM_REASSEMBLY);
1740 if (rem < 0)
1741 return rem;
1743 data += (count - rem);
1744 count = rem;
1747 return rem;
1749 EXPORT_SYMBOL(hci_recv_stream_fragment);
1751 /* ---- Interface to upper protocols ---- */
1753 /* Register/Unregister protocols.
1754 * hci_task_lock is used to ensure that no tasks are running. */
1755 int hci_register_proto(struct hci_proto *hp)
1757 int err = 0;
1759 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1761 if (hp->id >= HCI_MAX_PROTO)
1762 return -EINVAL;
1764 write_lock_bh(&hci_task_lock);
1766 if (!hci_proto[hp->id])
1767 hci_proto[hp->id] = hp;
1768 else
1769 err = -EEXIST;
1771 write_unlock_bh(&hci_task_lock);
1773 return err;
1775 EXPORT_SYMBOL(hci_register_proto);
1777 int hci_unregister_proto(struct hci_proto *hp)
1779 int err = 0;
1781 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1783 if (hp->id >= HCI_MAX_PROTO)
1784 return -EINVAL;
1786 write_lock_bh(&hci_task_lock);
1788 if (hci_proto[hp->id])
1789 hci_proto[hp->id] = NULL;
1790 else
1791 err = -ENOENT;
1793 write_unlock_bh(&hci_task_lock);
1795 return err;
1797 EXPORT_SYMBOL(hci_unregister_proto);
1799 int hci_register_cb(struct hci_cb *cb)
1801 BT_DBG("%p name %s", cb, cb->name);
1803 write_lock_bh(&hci_cb_list_lock);
1804 list_add(&cb->list, &hci_cb_list);
1805 write_unlock_bh(&hci_cb_list_lock);
1807 return 0;
1809 EXPORT_SYMBOL(hci_register_cb);
1811 int hci_unregister_cb(struct hci_cb *cb)
1813 BT_DBG("%p name %s", cb, cb->name);
1815 write_lock_bh(&hci_cb_list_lock);
1816 list_del(&cb->list);
1817 write_unlock_bh(&hci_cb_list_lock);
1819 return 0;
1821 EXPORT_SYMBOL(hci_unregister_cb);
1823 static int hci_send_frame(struct sk_buff *skb)
1825 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1827 if (!hdev) {
1828 kfree_skb(skb);
1829 return -ENODEV;
1832 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1834 if (atomic_read(&hdev->promisc)) {
1835 /* Time stamp */
1836 __net_timestamp(skb);
1838 hci_send_to_sock(hdev, skb, NULL);
1841 /* Get rid of skb owner, prior to sending to the driver. */
1842 skb_orphan(skb);
1844 return hdev->send(skb);
1847 /* Send HCI command */
1848 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1850 int len = HCI_COMMAND_HDR_SIZE + plen;
1851 struct hci_command_hdr *hdr;
1852 struct sk_buff *skb;
1854 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1856 skb = bt_skb_alloc(len, GFP_ATOMIC);
1857 if (!skb) {
1858 BT_ERR("%s no memory for command", hdev->name);
1859 return -ENOMEM;
1862 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1863 hdr->opcode = cpu_to_le16(opcode);
1864 hdr->plen = plen;
1866 if (plen)
1867 memcpy(skb_put(skb, plen), param, plen);
1869 BT_DBG("skb len %d", skb->len);
1871 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1872 skb->dev = (void *) hdev;
1874 if (test_bit(HCI_INIT, &hdev->flags))
1875 hdev->init_last_cmd = opcode;
1877 skb_queue_tail(&hdev->cmd_q, skb);
1878 tasklet_schedule(&hdev->cmd_task);
1880 return 0;
1883 /* Get data from the previously sent command */
1884 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1886 struct hci_command_hdr *hdr;
1888 if (!hdev->sent_cmd)
1889 return NULL;
1891 hdr = (void *) hdev->sent_cmd->data;
1893 if (hdr->opcode != cpu_to_le16(opcode))
1894 return NULL;
1896 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1898 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1901 /* Send ACL data */
1902 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1904 struct hci_acl_hdr *hdr;
1905 int len = skb->len;
1907 skb_push(skb, HCI_ACL_HDR_SIZE);
1908 skb_reset_transport_header(skb);
1909 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1910 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1911 hdr->dlen = cpu_to_le16(len);
1914 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1916 struct hci_dev *hdev = conn->hdev;
1917 struct sk_buff *list;
1919 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1921 skb->dev = (void *) hdev;
1922 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1923 hci_add_acl_hdr(skb, conn->handle, flags);
1925 list = skb_shinfo(skb)->frag_list;
1926 if (!list) {
1927 /* Non fragmented */
1928 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1930 skb_queue_tail(&conn->data_q, skb);
1931 } else {
1932 /* Fragmented */
1933 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1935 skb_shinfo(skb)->frag_list = NULL;
1937 /* Queue all fragments atomically */
1938 spin_lock_bh(&conn->data_q.lock);
1940 __skb_queue_tail(&conn->data_q, skb);
1942 flags &= ~ACL_START;
1943 flags |= ACL_CONT;
1944 do {
1945 skb = list; list = list->next;
1947 skb->dev = (void *) hdev;
1948 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1949 hci_add_acl_hdr(skb, conn->handle, flags);
1951 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1953 __skb_queue_tail(&conn->data_q, skb);
1954 } while (list);
1956 spin_unlock_bh(&conn->data_q.lock);
1959 tasklet_schedule(&hdev->tx_task);
1961 EXPORT_SYMBOL(hci_send_acl);
1963 /* Send SCO data */
1964 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1966 struct hci_dev *hdev = conn->hdev;
1967 struct hci_sco_hdr hdr;
1969 BT_DBG("%s len %d", hdev->name, skb->len);
1971 hdr.handle = cpu_to_le16(conn->handle);
1972 hdr.dlen = skb->len;
1974 skb_push(skb, HCI_SCO_HDR_SIZE);
1975 skb_reset_transport_header(skb);
1976 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1978 skb->dev = (void *) hdev;
1979 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1981 skb_queue_tail(&conn->data_q, skb);
1982 tasklet_schedule(&hdev->tx_task);
1984 EXPORT_SYMBOL(hci_send_sco);
1986 /* ---- HCI TX task (outgoing data) ---- */
1988 /* HCI Connection scheduler */
1989 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1991 struct hci_conn_hash *h = &hdev->conn_hash;
1992 struct hci_conn *conn = NULL;
1993 int num = 0, min = ~0;
1994 struct list_head *p;
1996 /* We don't have to lock device here. Connections are always
1997 * added and removed with TX task disabled. */
1998 list_for_each(p, &h->list) {
1999 struct hci_conn *c;
2000 c = list_entry(p, struct hci_conn, list);
2002 if (c->type != type || skb_queue_empty(&c->data_q))
2003 continue;
2005 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2006 continue;
2008 num++;
2010 if (c->sent < min) {
2011 min = c->sent;
2012 conn = c;
2016 if (conn) {
2017 int cnt, q;
2019 switch (conn->type) {
2020 case ACL_LINK:
2021 cnt = hdev->acl_cnt;
2022 break;
2023 case SCO_LINK:
2024 case ESCO_LINK:
2025 cnt = hdev->sco_cnt;
2026 break;
2027 case LE_LINK:
2028 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2029 break;
2030 default:
2031 cnt = 0;
2032 BT_ERR("Unknown link type");
2035 q = cnt / num;
2036 *quote = q ? q : 1;
2037 } else
2038 *quote = 0;
2040 BT_DBG("conn %p quote %d", conn, *quote);
2041 return conn;
2044 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2046 struct hci_conn_hash *h = &hdev->conn_hash;
2047 struct list_head *p;
2048 struct hci_conn *c;
2050 BT_ERR("%s link tx timeout", hdev->name);
2052 /* Kill stalled connections */
2053 list_for_each(p, &h->list) {
2054 c = list_entry(p, struct hci_conn, list);
2055 if (c->type == type && c->sent) {
2056 BT_ERR("%s killing stalled connection %s",
2057 hdev->name, batostr(&c->dst));
2058 hci_acl_disconn(c, 0x13);
2063 static inline void hci_sched_acl(struct hci_dev *hdev)
2065 struct hci_conn *conn;
2066 struct sk_buff *skb;
2067 int quote;
2069 BT_DBG("%s", hdev->name);
2071 if (!test_bit(HCI_RAW, &hdev->flags)) {
2072 /* ACL tx timeout must be longer than maximum
2073 * link supervision timeout (40.9 seconds) */
2074 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2075 hci_link_tx_to(hdev, ACL_LINK);
2078 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
2079 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2080 BT_DBG("skb %p len %d", skb, skb->len);
2082 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2084 hci_send_frame(skb);
2085 hdev->acl_last_tx = jiffies;
2087 hdev->acl_cnt--;
2088 conn->sent++;
2093 /* Schedule SCO */
2094 static inline void hci_sched_sco(struct hci_dev *hdev)
2096 struct hci_conn *conn;
2097 struct sk_buff *skb;
2098 int quote;
2100 BT_DBG("%s", hdev->name);
2102 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2103 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2104 BT_DBG("skb %p len %d", skb, skb->len);
2105 hci_send_frame(skb);
2107 conn->sent++;
2108 if (conn->sent == ~0)
2109 conn->sent = 0;
2114 static inline void hci_sched_esco(struct hci_dev *hdev)
2116 struct hci_conn *conn;
2117 struct sk_buff *skb;
2118 int quote;
2120 BT_DBG("%s", hdev->name);
2122 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2123 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2124 BT_DBG("skb %p len %d", skb, skb->len);
2125 hci_send_frame(skb);
2127 conn->sent++;
2128 if (conn->sent == ~0)
2129 conn->sent = 0;
2134 static inline void hci_sched_le(struct hci_dev *hdev)
2136 struct hci_conn *conn;
2137 struct sk_buff *skb;
2138 int quote, cnt;
2140 BT_DBG("%s", hdev->name);
2142 if (!test_bit(HCI_RAW, &hdev->flags)) {
2143 /* LE tx timeout must be longer than maximum
2144 * link supervision timeout (40.9 seconds) */
2145 if (!hdev->le_cnt && hdev->le_pkts &&
2146 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2147 hci_link_tx_to(hdev, LE_LINK);
2150 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2151 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
2152 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2153 BT_DBG("skb %p len %d", skb, skb->len);
2155 hci_send_frame(skb);
2156 hdev->le_last_tx = jiffies;
2158 cnt--;
2159 conn->sent++;
2162 if (hdev->le_pkts)
2163 hdev->le_cnt = cnt;
2164 else
2165 hdev->acl_cnt = cnt;
2168 static void hci_tx_task(unsigned long arg)
2170 struct hci_dev *hdev = (struct hci_dev *) arg;
2171 struct sk_buff *skb;
2173 read_lock(&hci_task_lock);
2175 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2176 hdev->sco_cnt, hdev->le_cnt);
2178 /* Schedule queues and send stuff to HCI driver */
2180 hci_sched_acl(hdev);
2182 hci_sched_sco(hdev);
2184 hci_sched_esco(hdev);
2186 hci_sched_le(hdev);
2188 /* Send next queued raw (unknown type) packet */
2189 while ((skb = skb_dequeue(&hdev->raw_q)))
2190 hci_send_frame(skb);
2192 read_unlock(&hci_task_lock);
2195 /* ----- HCI RX task (incoming data processing) ----- */
2197 /* ACL data packet */
2198 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2200 struct hci_acl_hdr *hdr = (void *) skb->data;
2201 struct hci_conn *conn;
2202 __u16 handle, flags;
2204 skb_pull(skb, HCI_ACL_HDR_SIZE);
2206 handle = __le16_to_cpu(hdr->handle);
2207 flags = hci_flags(handle);
2208 handle = hci_handle(handle);
2210 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2212 hdev->stat.acl_rx++;
2214 hci_dev_lock(hdev);
2215 conn = hci_conn_hash_lookup_handle(hdev, handle);
2216 hci_dev_unlock(hdev);
2218 if (conn) {
2219 register struct hci_proto *hp;
2221 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2223 /* Send to upper protocol */
2224 hp = hci_proto[HCI_PROTO_L2CAP];
2225 if (hp && hp->recv_acldata) {
2226 hp->recv_acldata(conn, skb, flags);
2227 return;
2229 } else {
2230 BT_ERR("%s ACL packet for unknown connection handle %d",
2231 hdev->name, handle);
2234 kfree_skb(skb);
2237 /* SCO data packet */
2238 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2240 struct hci_sco_hdr *hdr = (void *) skb->data;
2241 struct hci_conn *conn;
2242 __u16 handle;
2244 skb_pull(skb, HCI_SCO_HDR_SIZE);
2246 handle = __le16_to_cpu(hdr->handle);
2248 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2250 hdev->stat.sco_rx++;
2252 hci_dev_lock(hdev);
2253 conn = hci_conn_hash_lookup_handle(hdev, handle);
2254 hci_dev_unlock(hdev);
2256 if (conn) {
2257 register struct hci_proto *hp;
2259 /* Send to upper protocol */
2260 hp = hci_proto[HCI_PROTO_SCO];
2261 if (hp && hp->recv_scodata) {
2262 hp->recv_scodata(conn, skb);
2263 return;
2265 } else {
2266 BT_ERR("%s SCO packet for unknown connection handle %d",
2267 hdev->name, handle);
2270 kfree_skb(skb);
2273 static void hci_rx_task(unsigned long arg)
2275 struct hci_dev *hdev = (struct hci_dev *) arg;
2276 struct sk_buff *skb;
2278 BT_DBG("%s", hdev->name);
2280 read_lock(&hci_task_lock);
2282 while ((skb = skb_dequeue(&hdev->rx_q))) {
2283 if (atomic_read(&hdev->promisc)) {
2284 /* Send copy to the sockets */
2285 hci_send_to_sock(hdev, skb, NULL);
2288 if (test_bit(HCI_RAW, &hdev->flags)) {
2289 kfree_skb(skb);
2290 continue;
2293 if (test_bit(HCI_INIT, &hdev->flags)) {
2294 /* Don't process data packets in this states. */
2295 switch (bt_cb(skb)->pkt_type) {
2296 case HCI_ACLDATA_PKT:
2297 case HCI_SCODATA_PKT:
2298 kfree_skb(skb);
2299 continue;
2303 /* Process frame */
2304 switch (bt_cb(skb)->pkt_type) {
2305 case HCI_EVENT_PKT:
2306 hci_event_packet(hdev, skb);
2307 break;
2309 case HCI_ACLDATA_PKT:
2310 BT_DBG("%s ACL data packet", hdev->name);
2311 hci_acldata_packet(hdev, skb);
2312 break;
2314 case HCI_SCODATA_PKT:
2315 BT_DBG("%s SCO data packet", hdev->name);
2316 hci_scodata_packet(hdev, skb);
2317 break;
2319 default:
2320 kfree_skb(skb);
2321 break;
2325 read_unlock(&hci_task_lock);
2328 static void hci_cmd_task(unsigned long arg)
2330 struct hci_dev *hdev = (struct hci_dev *) arg;
2331 struct sk_buff *skb;
2333 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2335 /* Send queued commands */
2336 if (atomic_read(&hdev->cmd_cnt)) {
2337 skb = skb_dequeue(&hdev->cmd_q);
2338 if (!skb)
2339 return;
2341 kfree_skb(hdev->sent_cmd);
2343 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2344 if (hdev->sent_cmd) {
2345 atomic_dec(&hdev->cmd_cnt);
2346 hci_send_frame(skb);
2347 mod_timer(&hdev->cmd_timer,
2348 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2349 } else {
2350 skb_queue_head(&hdev->cmd_q, skb);
2351 tasklet_schedule(&hdev->cmd_task);
2356 module_param(enable_smp, bool, 0644);
2357 MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");