ASoC: rsnd: nothing to do on rsnd_dai_remove()
[linux-2.6/btrfs-unstable.git] / net / bluetooth / mgmt.c
bloba03ca3ca91bfa77e2663a09f90ce2271addb2331
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
34 #include "smp.h"
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 4
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
41 MGMT_OP_READ_INFO,
42 MGMT_OP_SET_POWERED,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
46 MGMT_OP_SET_PAIRABLE,
47 MGMT_OP_SET_LINK_SECURITY,
48 MGMT_OP_SET_SSP,
49 MGMT_OP_SET_HS,
50 MGMT_OP_SET_LE,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
53 MGMT_OP_ADD_UUID,
54 MGMT_OP_REMOVE_UUID,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
57 MGMT_OP_DISCONNECT,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
62 MGMT_OP_PAIR_DEVICE,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
74 MGMT_OP_CONFIRM_NAME,
75 MGMT_OP_BLOCK_DEVICE,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
79 MGMT_OP_SET_BREDR,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
84 static const u16 mgmt_events[] = {
85 MGMT_EV_CONTROLLER_ERROR,
86 MGMT_EV_INDEX_ADDED,
87 MGMT_EV_INDEX_REMOVED,
88 MGMT_EV_NEW_SETTINGS,
89 MGMT_EV_CLASS_OF_DEV_CHANGED,
90 MGMT_EV_LOCAL_NAME_CHANGED,
91 MGMT_EV_NEW_LINK_KEY,
92 MGMT_EV_NEW_LONG_TERM_KEY,
93 MGMT_EV_DEVICE_CONNECTED,
94 MGMT_EV_DEVICE_DISCONNECTED,
95 MGMT_EV_CONNECT_FAILED,
96 MGMT_EV_PIN_CODE_REQUEST,
97 MGMT_EV_USER_CONFIRM_REQUEST,
98 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_AUTH_FAILED,
100 MGMT_EV_DEVICE_FOUND,
101 MGMT_EV_DISCOVERING,
102 MGMT_EV_DEVICE_BLOCKED,
103 MGMT_EV_DEVICE_UNBLOCKED,
104 MGMT_EV_DEVICE_UNPAIRED,
105 MGMT_EV_PASSKEY_NOTIFY,
108 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
110 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
111 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
113 struct pending_cmd {
114 struct list_head list;
115 u16 opcode;
116 int index;
117 void *param;
118 struct sock *sk;
119 void *user_data;
122 /* HCI to MGMT error code conversion table */
123 static u8 mgmt_status_table[] = {
124 MGMT_STATUS_SUCCESS,
125 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
126 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
127 MGMT_STATUS_FAILED, /* Hardware Failure */
128 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
129 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
130 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
131 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
132 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
133 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
134 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
135 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
136 MGMT_STATUS_BUSY, /* Command Disallowed */
137 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
138 MGMT_STATUS_REJECTED, /* Rejected Security */
139 MGMT_STATUS_REJECTED, /* Rejected Personal */
140 MGMT_STATUS_TIMEOUT, /* Host Timeout */
141 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
142 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
143 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
144 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
145 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
146 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
147 MGMT_STATUS_BUSY, /* Repeated Attempts */
148 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
149 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
150 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
151 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
152 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
153 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
154 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
155 MGMT_STATUS_FAILED, /* Unspecified Error */
156 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
157 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
158 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
159 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
160 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
161 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
162 MGMT_STATUS_FAILED, /* Unit Link Key Used */
163 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
164 MGMT_STATUS_TIMEOUT, /* Instant Passed */
165 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
166 MGMT_STATUS_FAILED, /* Transaction Collision */
167 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
168 MGMT_STATUS_REJECTED, /* QoS Rejected */
169 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
170 MGMT_STATUS_REJECTED, /* Insufficient Security */
171 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
172 MGMT_STATUS_BUSY, /* Role Switch Pending */
173 MGMT_STATUS_FAILED, /* Slot Violation */
174 MGMT_STATUS_FAILED, /* Role Switch Failed */
175 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
176 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
177 MGMT_STATUS_BUSY, /* Host Busy Pairing */
178 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
179 MGMT_STATUS_BUSY, /* Controller Busy */
180 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
181 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
182 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
183 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
184 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
187 static u8 mgmt_status(u8 hci_status)
189 if (hci_status < ARRAY_SIZE(mgmt_status_table))
190 return mgmt_status_table[hci_status];
192 return MGMT_STATUS_FAILED;
195 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
197 struct sk_buff *skb;
198 struct mgmt_hdr *hdr;
199 struct mgmt_ev_cmd_status *ev;
200 int err;
202 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
204 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
205 if (!skb)
206 return -ENOMEM;
208 hdr = (void *) skb_put(skb, sizeof(*hdr));
210 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
211 hdr->index = cpu_to_le16(index);
212 hdr->len = cpu_to_le16(sizeof(*ev));
214 ev = (void *) skb_put(skb, sizeof(*ev));
215 ev->status = status;
216 ev->opcode = cpu_to_le16(cmd);
218 err = sock_queue_rcv_skb(sk, skb);
219 if (err < 0)
220 kfree_skb(skb);
222 return err;
225 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
226 void *rp, size_t rp_len)
228 struct sk_buff *skb;
229 struct mgmt_hdr *hdr;
230 struct mgmt_ev_cmd_complete *ev;
231 int err;
233 BT_DBG("sock %p", sk);
235 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
236 if (!skb)
237 return -ENOMEM;
239 hdr = (void *) skb_put(skb, sizeof(*hdr));
241 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
242 hdr->index = cpu_to_le16(index);
243 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
245 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
246 ev->opcode = cpu_to_le16(cmd);
247 ev->status = status;
249 if (rp)
250 memcpy(ev->data, rp, rp_len);
252 err = sock_queue_rcv_skb(sk, skb);
253 if (err < 0)
254 kfree_skb(skb);
256 return err;
259 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
260 u16 data_len)
262 struct mgmt_rp_read_version rp;
264 BT_DBG("sock %p", sk);
266 rp.version = MGMT_VERSION;
267 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
269 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
270 sizeof(rp));
273 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
274 u16 data_len)
276 struct mgmt_rp_read_commands *rp;
277 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
278 const u16 num_events = ARRAY_SIZE(mgmt_events);
279 __le16 *opcode;
280 size_t rp_size;
281 int i, err;
283 BT_DBG("sock %p", sk);
285 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
287 rp = kmalloc(rp_size, GFP_KERNEL);
288 if (!rp)
289 return -ENOMEM;
291 rp->num_commands = __constant_cpu_to_le16(num_commands);
292 rp->num_events = __constant_cpu_to_le16(num_events);
294 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
295 put_unaligned_le16(mgmt_commands[i], opcode);
297 for (i = 0; i < num_events; i++, opcode++)
298 put_unaligned_le16(mgmt_events[i], opcode);
300 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
301 rp_size);
302 kfree(rp);
304 return err;
307 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
308 u16 data_len)
310 struct mgmt_rp_read_index_list *rp;
311 struct hci_dev *d;
312 size_t rp_len;
313 u16 count;
314 int err;
316 BT_DBG("sock %p", sk);
318 read_lock(&hci_dev_list_lock);
320 count = 0;
321 list_for_each_entry(d, &hci_dev_list, list) {
322 if (d->dev_type == HCI_BREDR)
323 count++;
326 rp_len = sizeof(*rp) + (2 * count);
327 rp = kmalloc(rp_len, GFP_ATOMIC);
328 if (!rp) {
329 read_unlock(&hci_dev_list_lock);
330 return -ENOMEM;
333 count = 0;
334 list_for_each_entry(d, &hci_dev_list, list) {
335 if (test_bit(HCI_SETUP, &d->dev_flags))
336 continue;
338 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
339 continue;
341 if (d->dev_type == HCI_BREDR) {
342 rp->index[count++] = cpu_to_le16(d->id);
343 BT_DBG("Added hci%u", d->id);
347 rp->num_controllers = cpu_to_le16(count);
348 rp_len = sizeof(*rp) + (2 * count);
350 read_unlock(&hci_dev_list_lock);
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
353 rp_len);
355 kfree(rp);
357 return err;
360 static u32 get_supported_settings(struct hci_dev *hdev)
362 u32 settings = 0;
364 settings |= MGMT_SETTING_POWERED;
365 settings |= MGMT_SETTING_PAIRABLE;
367 if (lmp_bredr_capable(hdev)) {
368 settings |= MGMT_SETTING_CONNECTABLE;
369 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
370 settings |= MGMT_SETTING_FAST_CONNECTABLE;
371 settings |= MGMT_SETTING_DISCOVERABLE;
372 settings |= MGMT_SETTING_BREDR;
373 settings |= MGMT_SETTING_LINK_SECURITY;
375 if (lmp_ssp_capable(hdev)) {
376 settings |= MGMT_SETTING_SSP;
377 settings |= MGMT_SETTING_HS;
381 if (lmp_le_capable(hdev)) {
382 settings |= MGMT_SETTING_LE;
383 settings |= MGMT_SETTING_ADVERTISING;
386 return settings;
389 static u32 get_current_settings(struct hci_dev *hdev)
391 u32 settings = 0;
393 if (hdev_is_powered(hdev))
394 settings |= MGMT_SETTING_POWERED;
396 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
397 settings |= MGMT_SETTING_CONNECTABLE;
399 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
400 settings |= MGMT_SETTING_FAST_CONNECTABLE;
402 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
403 settings |= MGMT_SETTING_DISCOVERABLE;
405 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
406 settings |= MGMT_SETTING_PAIRABLE;
408 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
409 settings |= MGMT_SETTING_BREDR;
411 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
412 settings |= MGMT_SETTING_LE;
414 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
415 settings |= MGMT_SETTING_LINK_SECURITY;
417 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
418 settings |= MGMT_SETTING_SSP;
420 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
421 settings |= MGMT_SETTING_HS;
423 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
424 settings |= MGMT_SETTING_ADVERTISING;
426 return settings;
429 #define PNP_INFO_SVCLASS_ID 0x1200
431 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
433 u8 *ptr = data, *uuids_start = NULL;
434 struct bt_uuid *uuid;
436 if (len < 4)
437 return ptr;
439 list_for_each_entry(uuid, &hdev->uuids, list) {
440 u16 uuid16;
442 if (uuid->size != 16)
443 continue;
445 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
446 if (uuid16 < 0x1100)
447 continue;
449 if (uuid16 == PNP_INFO_SVCLASS_ID)
450 continue;
452 if (!uuids_start) {
453 uuids_start = ptr;
454 uuids_start[0] = 1;
455 uuids_start[1] = EIR_UUID16_ALL;
456 ptr += 2;
459 /* Stop if not enough space to put next UUID */
460 if ((ptr - data) + sizeof(u16) > len) {
461 uuids_start[1] = EIR_UUID16_SOME;
462 break;
465 *ptr++ = (uuid16 & 0x00ff);
466 *ptr++ = (uuid16 & 0xff00) >> 8;
467 uuids_start[0] += sizeof(uuid16);
470 return ptr;
473 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
475 u8 *ptr = data, *uuids_start = NULL;
476 struct bt_uuid *uuid;
478 if (len < 6)
479 return ptr;
481 list_for_each_entry(uuid, &hdev->uuids, list) {
482 if (uuid->size != 32)
483 continue;
485 if (!uuids_start) {
486 uuids_start = ptr;
487 uuids_start[0] = 1;
488 uuids_start[1] = EIR_UUID32_ALL;
489 ptr += 2;
492 /* Stop if not enough space to put next UUID */
493 if ((ptr - data) + sizeof(u32) > len) {
494 uuids_start[1] = EIR_UUID32_SOME;
495 break;
498 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
499 ptr += sizeof(u32);
500 uuids_start[0] += sizeof(u32);
503 return ptr;
506 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
508 u8 *ptr = data, *uuids_start = NULL;
509 struct bt_uuid *uuid;
511 if (len < 18)
512 return ptr;
514 list_for_each_entry(uuid, &hdev->uuids, list) {
515 if (uuid->size != 128)
516 continue;
518 if (!uuids_start) {
519 uuids_start = ptr;
520 uuids_start[0] = 1;
521 uuids_start[1] = EIR_UUID128_ALL;
522 ptr += 2;
525 /* Stop if not enough space to put next UUID */
526 if ((ptr - data) + 16 > len) {
527 uuids_start[1] = EIR_UUID128_SOME;
528 break;
531 memcpy(ptr, uuid->uuid, 16);
532 ptr += 16;
533 uuids_start[0] += 16;
536 return ptr;
539 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
541 struct pending_cmd *cmd;
543 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
544 if (cmd->opcode == opcode)
545 return cmd;
548 return NULL;
551 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
553 u8 ad_len = 0;
554 size_t name_len;
556 name_len = strlen(hdev->dev_name);
557 if (name_len > 0) {
558 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
560 if (name_len > max_len) {
561 name_len = max_len;
562 ptr[1] = EIR_NAME_SHORT;
563 } else
564 ptr[1] = EIR_NAME_COMPLETE;
566 ptr[0] = name_len + 1;
568 memcpy(ptr + 2, hdev->dev_name, name_len);
570 ad_len += (name_len + 2);
571 ptr += (name_len + 2);
574 return ad_len;
577 static void update_scan_rsp_data(struct hci_request *req)
579 struct hci_dev *hdev = req->hdev;
580 struct hci_cp_le_set_scan_rsp_data cp;
581 u8 len;
583 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
584 return;
586 memset(&cp, 0, sizeof(cp));
588 len = create_scan_rsp_data(hdev, cp.data);
590 if (hdev->scan_rsp_data_len == len &&
591 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
592 return;
594 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
595 hdev->scan_rsp_data_len = len;
597 cp.length = len;
599 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
602 static u8 get_adv_discov_flags(struct hci_dev *hdev)
604 struct pending_cmd *cmd;
606 /* If there's a pending mgmt command the flags will not yet have
607 * their final values, so check for this first.
609 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
610 if (cmd) {
611 struct mgmt_mode *cp = cmd->param;
612 if (cp->val == 0x01)
613 return LE_AD_GENERAL;
614 else if (cp->val == 0x02)
615 return LE_AD_LIMITED;
616 } else {
617 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
618 return LE_AD_LIMITED;
619 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
620 return LE_AD_GENERAL;
623 return 0;
626 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
628 u8 ad_len = 0, flags = 0;
630 flags |= get_adv_discov_flags(hdev);
632 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
633 if (lmp_le_br_capable(hdev))
634 flags |= LE_AD_SIM_LE_BREDR_CTRL;
635 if (lmp_host_le_br_capable(hdev))
636 flags |= LE_AD_SIM_LE_BREDR_HOST;
637 } else {
638 flags |= LE_AD_NO_BREDR;
641 if (flags) {
642 BT_DBG("adv flags 0x%02x", flags);
644 ptr[0] = 2;
645 ptr[1] = EIR_FLAGS;
646 ptr[2] = flags;
648 ad_len += 3;
649 ptr += 3;
652 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
653 ptr[0] = 2;
654 ptr[1] = EIR_TX_POWER;
655 ptr[2] = (u8) hdev->adv_tx_power;
657 ad_len += 3;
658 ptr += 3;
661 return ad_len;
664 static void update_adv_data(struct hci_request *req)
666 struct hci_dev *hdev = req->hdev;
667 struct hci_cp_le_set_adv_data cp;
668 u8 len;
670 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
671 return;
673 memset(&cp, 0, sizeof(cp));
675 len = create_adv_data(hdev, cp.data);
677 if (hdev->adv_data_len == len &&
678 memcmp(cp.data, hdev->adv_data, len) == 0)
679 return;
681 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
682 hdev->adv_data_len = len;
684 cp.length = len;
686 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
689 static void create_eir(struct hci_dev *hdev, u8 *data)
691 u8 *ptr = data;
692 size_t name_len;
694 name_len = strlen(hdev->dev_name);
696 if (name_len > 0) {
697 /* EIR Data type */
698 if (name_len > 48) {
699 name_len = 48;
700 ptr[1] = EIR_NAME_SHORT;
701 } else
702 ptr[1] = EIR_NAME_COMPLETE;
704 /* EIR Data length */
705 ptr[0] = name_len + 1;
707 memcpy(ptr + 2, hdev->dev_name, name_len);
709 ptr += (name_len + 2);
712 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
713 ptr[0] = 2;
714 ptr[1] = EIR_TX_POWER;
715 ptr[2] = (u8) hdev->inq_tx_power;
717 ptr += 3;
720 if (hdev->devid_source > 0) {
721 ptr[0] = 9;
722 ptr[1] = EIR_DEVICE_ID;
724 put_unaligned_le16(hdev->devid_source, ptr + 2);
725 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
726 put_unaligned_le16(hdev->devid_product, ptr + 6);
727 put_unaligned_le16(hdev->devid_version, ptr + 8);
729 ptr += 10;
732 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
733 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
734 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
737 static void update_eir(struct hci_request *req)
739 struct hci_dev *hdev = req->hdev;
740 struct hci_cp_write_eir cp;
742 if (!hdev_is_powered(hdev))
743 return;
745 if (!lmp_ext_inq_capable(hdev))
746 return;
748 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
749 return;
751 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
752 return;
754 memset(&cp, 0, sizeof(cp));
756 create_eir(hdev, cp.data);
758 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
759 return;
761 memcpy(hdev->eir, cp.data, sizeof(cp.data));
763 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
766 static u8 get_service_classes(struct hci_dev *hdev)
768 struct bt_uuid *uuid;
769 u8 val = 0;
771 list_for_each_entry(uuid, &hdev->uuids, list)
772 val |= uuid->svc_hint;
774 return val;
777 static void update_class(struct hci_request *req)
779 struct hci_dev *hdev = req->hdev;
780 u8 cod[3];
782 BT_DBG("%s", hdev->name);
784 if (!hdev_is_powered(hdev))
785 return;
787 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
788 return;
790 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
791 return;
793 cod[0] = hdev->minor_class;
794 cod[1] = hdev->major_class;
795 cod[2] = get_service_classes(hdev);
797 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
798 cod[1] |= 0x20;
800 if (memcmp(cod, hdev->dev_class, 3) == 0)
801 return;
803 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
806 static void service_cache_off(struct work_struct *work)
808 struct hci_dev *hdev = container_of(work, struct hci_dev,
809 service_cache.work);
810 struct hci_request req;
812 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
813 return;
815 hci_req_init(&req, hdev);
817 hci_dev_lock(hdev);
819 update_eir(&req);
820 update_class(&req);
822 hci_dev_unlock(hdev);
824 hci_req_run(&req, NULL);
827 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
829 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
830 return;
832 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
834 /* Non-mgmt controlled devices get this bit set
835 * implicitly so that pairing works for them, however
836 * for mgmt we require user-space to explicitly enable
837 * it
839 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
842 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
843 void *data, u16 data_len)
845 struct mgmt_rp_read_info rp;
847 BT_DBG("sock %p %s", sk, hdev->name);
849 hci_dev_lock(hdev);
851 memset(&rp, 0, sizeof(rp));
853 bacpy(&rp.bdaddr, &hdev->bdaddr);
855 rp.version = hdev->hci_ver;
856 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
858 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
859 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
861 memcpy(rp.dev_class, hdev->dev_class, 3);
863 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
864 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
866 hci_dev_unlock(hdev);
868 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
869 sizeof(rp));
872 static void mgmt_pending_free(struct pending_cmd *cmd)
874 sock_put(cmd->sk);
875 kfree(cmd->param);
876 kfree(cmd);
879 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
880 struct hci_dev *hdev, void *data,
881 u16 len)
883 struct pending_cmd *cmd;
885 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
886 if (!cmd)
887 return NULL;
889 cmd->opcode = opcode;
890 cmd->index = hdev->id;
892 cmd->param = kmalloc(len, GFP_KERNEL);
893 if (!cmd->param) {
894 kfree(cmd);
895 return NULL;
898 if (data)
899 memcpy(cmd->param, data, len);
901 cmd->sk = sk;
902 sock_hold(sk);
904 list_add(&cmd->list, &hdev->mgmt_pending);
906 return cmd;
909 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
910 void (*cb)(struct pending_cmd *cmd,
911 void *data),
912 void *data)
914 struct pending_cmd *cmd, *tmp;
916 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
917 if (opcode > 0 && cmd->opcode != opcode)
918 continue;
920 cb(cmd, data);
924 static void mgmt_pending_remove(struct pending_cmd *cmd)
926 list_del(&cmd->list);
927 mgmt_pending_free(cmd);
930 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
932 __le32 settings = cpu_to_le32(get_current_settings(hdev));
934 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
935 sizeof(settings));
938 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
939 u16 len)
941 struct mgmt_mode *cp = data;
942 struct pending_cmd *cmd;
943 int err;
945 BT_DBG("request for %s", hdev->name);
947 if (cp->val != 0x00 && cp->val != 0x01)
948 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
949 MGMT_STATUS_INVALID_PARAMS);
951 hci_dev_lock(hdev);
953 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
954 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
955 MGMT_STATUS_BUSY);
956 goto failed;
959 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
960 cancel_delayed_work(&hdev->power_off);
962 if (cp->val) {
963 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
964 data, len);
965 err = mgmt_powered(hdev, 1);
966 goto failed;
970 if (!!cp->val == hdev_is_powered(hdev)) {
971 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
972 goto failed;
975 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
976 if (!cmd) {
977 err = -ENOMEM;
978 goto failed;
981 if (cp->val)
982 queue_work(hdev->req_workqueue, &hdev->power_on);
983 else
984 queue_work(hdev->req_workqueue, &hdev->power_off.work);
986 err = 0;
988 failed:
989 hci_dev_unlock(hdev);
990 return err;
993 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
994 struct sock *skip_sk)
996 struct sk_buff *skb;
997 struct mgmt_hdr *hdr;
999 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1000 if (!skb)
1001 return -ENOMEM;
1003 hdr = (void *) skb_put(skb, sizeof(*hdr));
1004 hdr->opcode = cpu_to_le16(event);
1005 if (hdev)
1006 hdr->index = cpu_to_le16(hdev->id);
1007 else
1008 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1009 hdr->len = cpu_to_le16(data_len);
1011 if (data)
1012 memcpy(skb_put(skb, data_len), data, data_len);
1014 /* Time stamp */
1015 __net_timestamp(skb);
1017 hci_send_to_control(skb, skip_sk);
1018 kfree_skb(skb);
1020 return 0;
1023 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1025 __le32 ev;
1027 ev = cpu_to_le32(get_current_settings(hdev));
1029 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1032 struct cmd_lookup {
1033 struct sock *sk;
1034 struct hci_dev *hdev;
1035 u8 mgmt_status;
1038 static void settings_rsp(struct pending_cmd *cmd, void *data)
1040 struct cmd_lookup *match = data;
1042 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1044 list_del(&cmd->list);
1046 if (match->sk == NULL) {
1047 match->sk = cmd->sk;
1048 sock_hold(match->sk);
1051 mgmt_pending_free(cmd);
1054 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1056 u8 *status = data;
1058 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1059 mgmt_pending_remove(cmd);
1062 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1064 if (!lmp_bredr_capable(hdev))
1065 return MGMT_STATUS_NOT_SUPPORTED;
1066 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1067 return MGMT_STATUS_REJECTED;
1068 else
1069 return MGMT_STATUS_SUCCESS;
1072 static u8 mgmt_le_support(struct hci_dev *hdev)
1074 if (!lmp_le_capable(hdev))
1075 return MGMT_STATUS_NOT_SUPPORTED;
1076 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1077 return MGMT_STATUS_REJECTED;
1078 else
1079 return MGMT_STATUS_SUCCESS;
1082 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1084 struct pending_cmd *cmd;
1085 struct mgmt_mode *cp;
1086 struct hci_request req;
1087 bool changed;
1089 BT_DBG("status 0x%02x", status);
1091 hci_dev_lock(hdev);
1093 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1094 if (!cmd)
1095 goto unlock;
1097 if (status) {
1098 u8 mgmt_err = mgmt_status(status);
1099 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1100 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1101 goto remove_cmd;
1104 cp = cmd->param;
1105 if (cp->val) {
1106 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1107 &hdev->dev_flags);
1109 if (hdev->discov_timeout > 0) {
1110 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1111 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1112 to);
1114 } else {
1115 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1116 &hdev->dev_flags);
1119 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1121 if (changed)
1122 new_settings(hdev, cmd->sk);
1124 /* When the discoverable mode gets changed, make sure
1125 * that class of device has the limited discoverable
1126 * bit correctly set.
1128 hci_req_init(&req, hdev);
1129 update_class(&req);
1130 hci_req_run(&req, NULL);
1132 remove_cmd:
1133 mgmt_pending_remove(cmd);
1135 unlock:
1136 hci_dev_unlock(hdev);
1139 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1140 u16 len)
1142 struct mgmt_cp_set_discoverable *cp = data;
1143 struct pending_cmd *cmd;
1144 struct hci_request req;
1145 u16 timeout;
1146 u8 scan;
1147 int err;
1149 BT_DBG("request for %s", hdev->name);
1151 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1152 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1153 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1154 MGMT_STATUS_REJECTED);
1156 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1157 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1158 MGMT_STATUS_INVALID_PARAMS);
1160 timeout = __le16_to_cpu(cp->timeout);
1162 /* Disabling discoverable requires that no timeout is set,
1163 * and enabling limited discoverable requires a timeout.
1165 if ((cp->val == 0x00 && timeout > 0) ||
1166 (cp->val == 0x02 && timeout == 0))
1167 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1168 MGMT_STATUS_INVALID_PARAMS);
1170 hci_dev_lock(hdev);
1172 if (!hdev_is_powered(hdev) && timeout > 0) {
1173 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1174 MGMT_STATUS_NOT_POWERED);
1175 goto failed;
1178 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1179 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1180 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1181 MGMT_STATUS_BUSY);
1182 goto failed;
1185 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1186 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1187 MGMT_STATUS_REJECTED);
1188 goto failed;
1191 if (!hdev_is_powered(hdev)) {
1192 bool changed = false;
1194 /* Setting limited discoverable when powered off is
1195 * not a valid operation since it requires a timeout
1196 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1198 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1199 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1200 changed = true;
1203 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1204 if (err < 0)
1205 goto failed;
1207 if (changed)
1208 err = new_settings(hdev, sk);
1210 goto failed;
1213 /* If the current mode is the same, then just update the timeout
1214 * value with the new value. And if only the timeout gets updated,
1215 * then no need for any HCI transactions.
1217 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1218 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1219 &hdev->dev_flags)) {
1220 cancel_delayed_work(&hdev->discov_off);
1221 hdev->discov_timeout = timeout;
1223 if (cp->val && hdev->discov_timeout > 0) {
1224 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1225 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1226 to);
1229 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1230 goto failed;
1233 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1234 if (!cmd) {
1235 err = -ENOMEM;
1236 goto failed;
1239 /* Cancel any potential discoverable timeout that might be
1240 * still active and store new timeout value. The arming of
1241 * the timeout happens in the complete handler.
1243 cancel_delayed_work(&hdev->discov_off);
1244 hdev->discov_timeout = timeout;
1246 /* Limited discoverable mode */
1247 if (cp->val == 0x02)
1248 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1249 else
1250 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1252 hci_req_init(&req, hdev);
1254 /* The procedure for LE-only controllers is much simpler - just
1255 * update the advertising data.
1257 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1258 goto update_ad;
1260 scan = SCAN_PAGE;
1262 if (cp->val) {
1263 struct hci_cp_write_current_iac_lap hci_cp;
1265 if (cp->val == 0x02) {
1266 /* Limited discoverable mode */
1267 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1268 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1269 hci_cp.iac_lap[1] = 0x8b;
1270 hci_cp.iac_lap[2] = 0x9e;
1271 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1272 hci_cp.iac_lap[4] = 0x8b;
1273 hci_cp.iac_lap[5] = 0x9e;
1274 } else {
1275 /* General discoverable mode */
1276 hci_cp.num_iac = 1;
1277 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1278 hci_cp.iac_lap[1] = 0x8b;
1279 hci_cp.iac_lap[2] = 0x9e;
1282 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1283 (hci_cp.num_iac * 3) + 1, &hci_cp);
1285 scan |= SCAN_INQUIRY;
1286 } else {
1287 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1290 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1292 update_ad:
1293 update_adv_data(&req);
1295 err = hci_req_run(&req, set_discoverable_complete);
1296 if (err < 0)
1297 mgmt_pending_remove(cmd);
1299 failed:
1300 hci_dev_unlock(hdev);
1301 return err;
1304 static void write_fast_connectable(struct hci_request *req, bool enable)
1306 struct hci_dev *hdev = req->hdev;
1307 struct hci_cp_write_page_scan_activity acp;
1308 u8 type;
1310 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1311 return;
1313 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1314 return;
1316 if (enable) {
1317 type = PAGE_SCAN_TYPE_INTERLACED;
1319 /* 160 msec page scan interval */
1320 acp.interval = __constant_cpu_to_le16(0x0100);
1321 } else {
1322 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1324 /* default 1.28 sec page scan */
1325 acp.interval = __constant_cpu_to_le16(0x0800);
1328 acp.window = __constant_cpu_to_le16(0x0012);
1330 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1331 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1332 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1333 sizeof(acp), &acp);
1335 if (hdev->page_scan_type != type)
1336 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1339 static u8 get_adv_type(struct hci_dev *hdev)
1341 struct pending_cmd *cmd;
1342 bool connectable;
1344 /* If there's a pending mgmt command the flag will not yet have
1345 * it's final value, so check for this first.
1347 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1348 if (cmd) {
1349 struct mgmt_mode *cp = cmd->param;
1350 connectable = !!cp->val;
1351 } else {
1352 connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1355 return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1358 static void enable_advertising(struct hci_request *req)
1360 struct hci_dev *hdev = req->hdev;
1361 struct hci_cp_le_set_adv_param cp;
1362 u8 enable = 0x01;
1364 memset(&cp, 0, sizeof(cp));
1365 cp.min_interval = __constant_cpu_to_le16(0x0800);
1366 cp.max_interval = __constant_cpu_to_le16(0x0800);
1367 cp.type = get_adv_type(hdev);
1368 cp.own_address_type = hdev->own_addr_type;
1369 cp.channel_map = 0x07;
1371 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1373 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1376 static void disable_advertising(struct hci_request *req)
1378 u8 enable = 0x00;
1380 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1383 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1385 struct pending_cmd *cmd;
1386 struct mgmt_mode *cp;
1387 bool changed;
1389 BT_DBG("status 0x%02x", status);
1391 hci_dev_lock(hdev);
1393 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1394 if (!cmd)
1395 goto unlock;
1397 if (status) {
1398 u8 mgmt_err = mgmt_status(status);
1399 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1400 goto remove_cmd;
1403 cp = cmd->param;
1404 if (cp->val)
1405 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1406 else
1407 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1409 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1411 if (changed)
1412 new_settings(hdev, cmd->sk);
1414 remove_cmd:
1415 mgmt_pending_remove(cmd);
1417 unlock:
1418 hci_dev_unlock(hdev);
1421 static int set_connectable_update_settings(struct hci_dev *hdev,
1422 struct sock *sk, u8 val)
1424 bool changed = false;
1425 int err;
1427 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1428 changed = true;
1430 if (val) {
1431 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1432 } else {
1433 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1434 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1437 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1438 if (err < 0)
1439 return err;
1441 if (changed)
1442 return new_settings(hdev, sk);
1444 return 0;
1447 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1448 u16 len)
1450 struct mgmt_mode *cp = data;
1451 struct pending_cmd *cmd;
1452 struct hci_request req;
1453 u8 scan;
1454 int err;
1456 BT_DBG("request for %s", hdev->name);
1458 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1459 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1460 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1461 MGMT_STATUS_REJECTED);
1463 if (cp->val != 0x00 && cp->val != 0x01)
1464 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1465 MGMT_STATUS_INVALID_PARAMS);
1467 hci_dev_lock(hdev);
1469 if (!hdev_is_powered(hdev)) {
1470 err = set_connectable_update_settings(hdev, sk, cp->val);
1471 goto failed;
1474 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1475 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1476 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1477 MGMT_STATUS_BUSY);
1478 goto failed;
1481 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1482 if (!cmd) {
1483 err = -ENOMEM;
1484 goto failed;
1487 hci_req_init(&req, hdev);
1489 /* If BR/EDR is not enabled and we disable advertising as a
1490 * by-product of disabling connectable, we need to update the
1491 * advertising flags.
1493 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1494 if (!cp->val) {
1495 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1496 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1498 update_adv_data(&req);
1499 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1500 if (cp->val) {
1501 scan = SCAN_PAGE;
1502 } else {
1503 scan = 0;
1505 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1506 hdev->discov_timeout > 0)
1507 cancel_delayed_work(&hdev->discov_off);
1510 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1513 /* If we're going from non-connectable to connectable or
1514 * vice-versa when fast connectable is enabled ensure that fast
1515 * connectable gets disabled. write_fast_connectable won't do
1516 * anything if the page scan parameters are already what they
1517 * should be.
1519 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1520 write_fast_connectable(&req, false);
1522 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1523 hci_conn_num(hdev, LE_LINK) == 0) {
1524 disable_advertising(&req);
1525 enable_advertising(&req);
1528 err = hci_req_run(&req, set_connectable_complete);
1529 if (err < 0) {
1530 mgmt_pending_remove(cmd);
1531 if (err == -ENODATA)
1532 err = set_connectable_update_settings(hdev, sk,
1533 cp->val);
1534 goto failed;
1537 failed:
1538 hci_dev_unlock(hdev);
1539 return err;
1542 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1543 u16 len)
1545 struct mgmt_mode *cp = data;
1546 bool changed;
1547 int err;
1549 BT_DBG("request for %s", hdev->name);
1551 if (cp->val != 0x00 && cp->val != 0x01)
1552 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1553 MGMT_STATUS_INVALID_PARAMS);
1555 hci_dev_lock(hdev);
1557 if (cp->val)
1558 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1559 else
1560 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1562 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1563 if (err < 0)
1564 goto unlock;
1566 if (changed)
1567 err = new_settings(hdev, sk);
1569 unlock:
1570 hci_dev_unlock(hdev);
1571 return err;
1574 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1575 u16 len)
1577 struct mgmt_mode *cp = data;
1578 struct pending_cmd *cmd;
1579 u8 val, status;
1580 int err;
1582 BT_DBG("request for %s", hdev->name);
1584 status = mgmt_bredr_support(hdev);
1585 if (status)
1586 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1587 status);
1589 if (cp->val != 0x00 && cp->val != 0x01)
1590 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1591 MGMT_STATUS_INVALID_PARAMS);
1593 hci_dev_lock(hdev);
1595 if (!hdev_is_powered(hdev)) {
1596 bool changed = false;
1598 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1599 &hdev->dev_flags)) {
1600 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1601 changed = true;
1604 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1605 if (err < 0)
1606 goto failed;
1608 if (changed)
1609 err = new_settings(hdev, sk);
1611 goto failed;
1614 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1615 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1616 MGMT_STATUS_BUSY);
1617 goto failed;
1620 val = !!cp->val;
1622 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1623 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1624 goto failed;
1627 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1628 if (!cmd) {
1629 err = -ENOMEM;
1630 goto failed;
1633 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1634 if (err < 0) {
1635 mgmt_pending_remove(cmd);
1636 goto failed;
1639 failed:
1640 hci_dev_unlock(hdev);
1641 return err;
1644 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1646 struct mgmt_mode *cp = data;
1647 struct pending_cmd *cmd;
1648 u8 status;
1649 int err;
1651 BT_DBG("request for %s", hdev->name);
1653 status = mgmt_bredr_support(hdev);
1654 if (status)
1655 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1657 if (!lmp_ssp_capable(hdev))
1658 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1659 MGMT_STATUS_NOT_SUPPORTED);
1661 if (cp->val != 0x00 && cp->val != 0x01)
1662 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1663 MGMT_STATUS_INVALID_PARAMS);
1665 hci_dev_lock(hdev);
1667 if (!hdev_is_powered(hdev)) {
1668 bool changed;
1670 if (cp->val) {
1671 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1672 &hdev->dev_flags);
1673 } else {
1674 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1675 &hdev->dev_flags);
1676 if (!changed)
1677 changed = test_and_clear_bit(HCI_HS_ENABLED,
1678 &hdev->dev_flags);
1679 else
1680 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1683 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1684 if (err < 0)
1685 goto failed;
1687 if (changed)
1688 err = new_settings(hdev, sk);
1690 goto failed;
1693 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1694 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1695 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1696 MGMT_STATUS_BUSY);
1697 goto failed;
1700 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1701 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1702 goto failed;
1705 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1706 if (!cmd) {
1707 err = -ENOMEM;
1708 goto failed;
1711 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1712 if (err < 0) {
1713 mgmt_pending_remove(cmd);
1714 goto failed;
1717 failed:
1718 hci_dev_unlock(hdev);
1719 return err;
1722 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1724 struct mgmt_mode *cp = data;
1725 bool changed;
1726 u8 status;
1727 int err;
1729 BT_DBG("request for %s", hdev->name);
1731 status = mgmt_bredr_support(hdev);
1732 if (status)
1733 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1735 if (!lmp_ssp_capable(hdev))
1736 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1737 MGMT_STATUS_NOT_SUPPORTED);
1739 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1740 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1741 MGMT_STATUS_REJECTED);
1743 if (cp->val != 0x00 && cp->val != 0x01)
1744 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1745 MGMT_STATUS_INVALID_PARAMS);
1747 hci_dev_lock(hdev);
1749 if (cp->val) {
1750 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1751 } else {
1752 if (hdev_is_powered(hdev)) {
1753 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1754 MGMT_STATUS_REJECTED);
1755 goto unlock;
1758 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1761 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1762 if (err < 0)
1763 goto unlock;
1765 if (changed)
1766 err = new_settings(hdev, sk);
1768 unlock:
1769 hci_dev_unlock(hdev);
1770 return err;
1773 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1775 struct cmd_lookup match = { NULL, hdev };
1777 if (status) {
1778 u8 mgmt_err = mgmt_status(status);
1780 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1781 &mgmt_err);
1782 return;
1785 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1787 new_settings(hdev, match.sk);
1789 if (match.sk)
1790 sock_put(match.sk);
1792 /* Make sure the controller has a good default for
1793 * advertising data. Restrict the update to when LE
1794 * has actually been enabled. During power on, the
1795 * update in powered_update_hci will take care of it.
1797 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1798 struct hci_request req;
1800 hci_dev_lock(hdev);
1802 hci_req_init(&req, hdev);
1803 update_adv_data(&req);
1804 update_scan_rsp_data(&req);
1805 hci_req_run(&req, NULL);
1807 hci_dev_unlock(hdev);
1811 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1813 struct mgmt_mode *cp = data;
1814 struct hci_cp_write_le_host_supported hci_cp;
1815 struct pending_cmd *cmd;
1816 struct hci_request req;
1817 int err;
1818 u8 val, enabled;
1820 BT_DBG("request for %s", hdev->name);
1822 if (!lmp_le_capable(hdev))
1823 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1824 MGMT_STATUS_NOT_SUPPORTED);
1826 if (cp->val != 0x00 && cp->val != 0x01)
1827 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1828 MGMT_STATUS_INVALID_PARAMS);
1830 /* LE-only devices do not allow toggling LE on/off */
1831 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1832 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1833 MGMT_STATUS_REJECTED);
1835 hci_dev_lock(hdev);
1837 val = !!cp->val;
1838 enabled = lmp_host_le_capable(hdev);
1840 if (!hdev_is_powered(hdev) || val == enabled) {
1841 bool changed = false;
1843 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1844 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1845 changed = true;
1848 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1849 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1850 changed = true;
1853 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1854 if (err < 0)
1855 goto unlock;
1857 if (changed)
1858 err = new_settings(hdev, sk);
1860 goto unlock;
1863 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1864 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1865 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1866 MGMT_STATUS_BUSY);
1867 goto unlock;
1870 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1871 if (!cmd) {
1872 err = -ENOMEM;
1873 goto unlock;
1876 hci_req_init(&req, hdev);
1878 memset(&hci_cp, 0, sizeof(hci_cp));
1880 if (val) {
1881 hci_cp.le = val;
1882 hci_cp.simul = lmp_le_br_capable(hdev);
1883 } else {
1884 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1885 disable_advertising(&req);
1888 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1889 &hci_cp);
1891 err = hci_req_run(&req, le_enable_complete);
1892 if (err < 0)
1893 mgmt_pending_remove(cmd);
1895 unlock:
1896 hci_dev_unlock(hdev);
1897 return err;
1900 /* This is a helper function to test for pending mgmt commands that can
1901 * cause CoD or EIR HCI commands. We can only allow one such pending
1902 * mgmt command at a time since otherwise we cannot easily track what
1903 * the current values are, will be, and based on that calculate if a new
1904 * HCI command needs to be sent and if yes with what value.
1906 static bool pending_eir_or_class(struct hci_dev *hdev)
1908 struct pending_cmd *cmd;
1910 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1911 switch (cmd->opcode) {
1912 case MGMT_OP_ADD_UUID:
1913 case MGMT_OP_REMOVE_UUID:
1914 case MGMT_OP_SET_DEV_CLASS:
1915 case MGMT_OP_SET_POWERED:
1916 return true;
1920 return false;
1923 static const u8 bluetooth_base_uuid[] = {
1924 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1925 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1928 static u8 get_uuid_size(const u8 *uuid)
1930 u32 val;
1932 if (memcmp(uuid, bluetooth_base_uuid, 12))
1933 return 128;
1935 val = get_unaligned_le32(&uuid[12]);
1936 if (val > 0xffff)
1937 return 32;
1939 return 16;
1942 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1944 struct pending_cmd *cmd;
1946 hci_dev_lock(hdev);
1948 cmd = mgmt_pending_find(mgmt_op, hdev);
1949 if (!cmd)
1950 goto unlock;
1952 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1953 hdev->dev_class, 3);
1955 mgmt_pending_remove(cmd);
1957 unlock:
1958 hci_dev_unlock(hdev);
1961 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1963 BT_DBG("status 0x%02x", status);
1965 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1968 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1970 struct mgmt_cp_add_uuid *cp = data;
1971 struct pending_cmd *cmd;
1972 struct hci_request req;
1973 struct bt_uuid *uuid;
1974 int err;
1976 BT_DBG("request for %s", hdev->name);
1978 hci_dev_lock(hdev);
1980 if (pending_eir_or_class(hdev)) {
1981 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1982 MGMT_STATUS_BUSY);
1983 goto failed;
1986 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1987 if (!uuid) {
1988 err = -ENOMEM;
1989 goto failed;
1992 memcpy(uuid->uuid, cp->uuid, 16);
1993 uuid->svc_hint = cp->svc_hint;
1994 uuid->size = get_uuid_size(cp->uuid);
1996 list_add_tail(&uuid->list, &hdev->uuids);
1998 hci_req_init(&req, hdev);
2000 update_class(&req);
2001 update_eir(&req);
2003 err = hci_req_run(&req, add_uuid_complete);
2004 if (err < 0) {
2005 if (err != -ENODATA)
2006 goto failed;
2008 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2009 hdev->dev_class, 3);
2010 goto failed;
2013 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2014 if (!cmd) {
2015 err = -ENOMEM;
2016 goto failed;
2019 err = 0;
2021 failed:
2022 hci_dev_unlock(hdev);
2023 return err;
2026 static bool enable_service_cache(struct hci_dev *hdev)
2028 if (!hdev_is_powered(hdev))
2029 return false;
2031 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2032 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2033 CACHE_TIMEOUT);
2034 return true;
2037 return false;
2040 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2042 BT_DBG("status 0x%02x", status);
2044 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2047 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2048 u16 len)
2050 struct mgmt_cp_remove_uuid *cp = data;
2051 struct pending_cmd *cmd;
2052 struct bt_uuid *match, *tmp;
2053 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2054 struct hci_request req;
2055 int err, found;
2057 BT_DBG("request for %s", hdev->name);
2059 hci_dev_lock(hdev);
2061 if (pending_eir_or_class(hdev)) {
2062 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2063 MGMT_STATUS_BUSY);
2064 goto unlock;
2067 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2068 err = hci_uuids_clear(hdev);
2070 if (enable_service_cache(hdev)) {
2071 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2072 0, hdev->dev_class, 3);
2073 goto unlock;
2076 goto update_class;
2079 found = 0;
2081 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2082 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2083 continue;
2085 list_del(&match->list);
2086 kfree(match);
2087 found++;
2090 if (found == 0) {
2091 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2092 MGMT_STATUS_INVALID_PARAMS);
2093 goto unlock;
2096 update_class:
2097 hci_req_init(&req, hdev);
2099 update_class(&req);
2100 update_eir(&req);
2102 err = hci_req_run(&req, remove_uuid_complete);
2103 if (err < 0) {
2104 if (err != -ENODATA)
2105 goto unlock;
2107 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2108 hdev->dev_class, 3);
2109 goto unlock;
2112 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2113 if (!cmd) {
2114 err = -ENOMEM;
2115 goto unlock;
2118 err = 0;
2120 unlock:
2121 hci_dev_unlock(hdev);
2122 return err;
2125 static void set_class_complete(struct hci_dev *hdev, u8 status)
2127 BT_DBG("status 0x%02x", status);
2129 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2132 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2133 u16 len)
2135 struct mgmt_cp_set_dev_class *cp = data;
2136 struct pending_cmd *cmd;
2137 struct hci_request req;
2138 int err;
2140 BT_DBG("request for %s", hdev->name);
2142 if (!lmp_bredr_capable(hdev))
2143 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2144 MGMT_STATUS_NOT_SUPPORTED);
2146 hci_dev_lock(hdev);
2148 if (pending_eir_or_class(hdev)) {
2149 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2150 MGMT_STATUS_BUSY);
2151 goto unlock;
2154 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2155 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2156 MGMT_STATUS_INVALID_PARAMS);
2157 goto unlock;
2160 hdev->major_class = cp->major;
2161 hdev->minor_class = cp->minor;
2163 if (!hdev_is_powered(hdev)) {
2164 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2165 hdev->dev_class, 3);
2166 goto unlock;
2169 hci_req_init(&req, hdev);
2171 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2172 hci_dev_unlock(hdev);
2173 cancel_delayed_work_sync(&hdev->service_cache);
2174 hci_dev_lock(hdev);
2175 update_eir(&req);
2178 update_class(&req);
2180 err = hci_req_run(&req, set_class_complete);
2181 if (err < 0) {
2182 if (err != -ENODATA)
2183 goto unlock;
2185 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2186 hdev->dev_class, 3);
2187 goto unlock;
2190 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2191 if (!cmd) {
2192 err = -ENOMEM;
2193 goto unlock;
2196 err = 0;
2198 unlock:
2199 hci_dev_unlock(hdev);
2200 return err;
2203 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2204 u16 len)
2206 struct mgmt_cp_load_link_keys *cp = data;
2207 u16 key_count, expected_len;
2208 int i;
2210 BT_DBG("request for %s", hdev->name);
2212 if (!lmp_bredr_capable(hdev))
2213 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2214 MGMT_STATUS_NOT_SUPPORTED);
2216 key_count = __le16_to_cpu(cp->key_count);
2218 expected_len = sizeof(*cp) + key_count *
2219 sizeof(struct mgmt_link_key_info);
2220 if (expected_len != len) {
2221 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2222 len, expected_len);
2223 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2224 MGMT_STATUS_INVALID_PARAMS);
2227 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2228 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2229 MGMT_STATUS_INVALID_PARAMS);
2231 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2232 key_count);
2234 for (i = 0; i < key_count; i++) {
2235 struct mgmt_link_key_info *key = &cp->keys[i];
2237 if (key->addr.type != BDADDR_BREDR)
2238 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2239 MGMT_STATUS_INVALID_PARAMS);
2242 hci_dev_lock(hdev);
2244 hci_link_keys_clear(hdev);
2246 if (cp->debug_keys)
2247 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2248 else
2249 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2251 for (i = 0; i < key_count; i++) {
2252 struct mgmt_link_key_info *key = &cp->keys[i];
2254 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2255 key->type, key->pin_len);
2258 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2260 hci_dev_unlock(hdev);
2262 return 0;
2265 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2266 u8 addr_type, struct sock *skip_sk)
2268 struct mgmt_ev_device_unpaired ev;
2270 bacpy(&ev.addr.bdaddr, bdaddr);
2271 ev.addr.type = addr_type;
2273 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2274 skip_sk);
2277 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2278 u16 len)
2280 struct mgmt_cp_unpair_device *cp = data;
2281 struct mgmt_rp_unpair_device rp;
2282 struct hci_cp_disconnect dc;
2283 struct pending_cmd *cmd;
2284 struct hci_conn *conn;
2285 int err;
2287 memset(&rp, 0, sizeof(rp));
2288 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2289 rp.addr.type = cp->addr.type;
2291 if (!bdaddr_type_is_valid(cp->addr.type))
2292 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2293 MGMT_STATUS_INVALID_PARAMS,
2294 &rp, sizeof(rp));
2296 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2297 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2298 MGMT_STATUS_INVALID_PARAMS,
2299 &rp, sizeof(rp));
2301 hci_dev_lock(hdev);
2303 if (!hdev_is_powered(hdev)) {
2304 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2305 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2306 goto unlock;
2309 if (cp->addr.type == BDADDR_BREDR)
2310 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2311 else
2312 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
2314 if (err < 0) {
2315 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2316 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2317 goto unlock;
2320 if (cp->disconnect) {
2321 if (cp->addr.type == BDADDR_BREDR)
2322 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2323 &cp->addr.bdaddr);
2324 else
2325 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2326 &cp->addr.bdaddr);
2327 } else {
2328 conn = NULL;
2331 if (!conn) {
2332 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2333 &rp, sizeof(rp));
2334 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2335 goto unlock;
2338 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2339 sizeof(*cp));
2340 if (!cmd) {
2341 err = -ENOMEM;
2342 goto unlock;
2345 dc.handle = cpu_to_le16(conn->handle);
2346 dc.reason = 0x13; /* Remote User Terminated Connection */
2347 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2348 if (err < 0)
2349 mgmt_pending_remove(cmd);
2351 unlock:
2352 hci_dev_unlock(hdev);
2353 return err;
2356 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2357 u16 len)
2359 struct mgmt_cp_disconnect *cp = data;
2360 struct mgmt_rp_disconnect rp;
2361 struct hci_cp_disconnect dc;
2362 struct pending_cmd *cmd;
2363 struct hci_conn *conn;
2364 int err;
2366 BT_DBG("");
2368 memset(&rp, 0, sizeof(rp));
2369 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2370 rp.addr.type = cp->addr.type;
2372 if (!bdaddr_type_is_valid(cp->addr.type))
2373 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2374 MGMT_STATUS_INVALID_PARAMS,
2375 &rp, sizeof(rp));
2377 hci_dev_lock(hdev);
2379 if (!test_bit(HCI_UP, &hdev->flags)) {
2380 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2381 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2382 goto failed;
2385 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2386 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2387 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2388 goto failed;
2391 if (cp->addr.type == BDADDR_BREDR)
2392 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2393 &cp->addr.bdaddr);
2394 else
2395 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2397 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2398 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2399 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2400 goto failed;
2403 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2404 if (!cmd) {
2405 err = -ENOMEM;
2406 goto failed;
2409 dc.handle = cpu_to_le16(conn->handle);
2410 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2412 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2413 if (err < 0)
2414 mgmt_pending_remove(cmd);
2416 failed:
2417 hci_dev_unlock(hdev);
2418 return err;
2421 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2423 switch (link_type) {
2424 case LE_LINK:
2425 switch (addr_type) {
2426 case ADDR_LE_DEV_PUBLIC:
2427 return BDADDR_LE_PUBLIC;
2429 default:
2430 /* Fallback to LE Random address type */
2431 return BDADDR_LE_RANDOM;
2434 default:
2435 /* Fallback to BR/EDR type */
2436 return BDADDR_BREDR;
2440 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2441 u16 data_len)
2443 struct mgmt_rp_get_connections *rp;
2444 struct hci_conn *c;
2445 size_t rp_len;
2446 int err;
2447 u16 i;
2449 BT_DBG("");
2451 hci_dev_lock(hdev);
2453 if (!hdev_is_powered(hdev)) {
2454 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2455 MGMT_STATUS_NOT_POWERED);
2456 goto unlock;
2459 i = 0;
2460 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2461 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2462 i++;
2465 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2466 rp = kmalloc(rp_len, GFP_KERNEL);
2467 if (!rp) {
2468 err = -ENOMEM;
2469 goto unlock;
2472 i = 0;
2473 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2474 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2475 continue;
2476 bacpy(&rp->addr[i].bdaddr, &c->dst);
2477 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2478 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2479 continue;
2480 i++;
2483 rp->conn_count = cpu_to_le16(i);
2485 /* Recalculate length in case of filtered SCO connections, etc */
2486 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2488 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2489 rp_len);
2491 kfree(rp);
2493 unlock:
2494 hci_dev_unlock(hdev);
2495 return err;
2498 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2499 struct mgmt_cp_pin_code_neg_reply *cp)
2501 struct pending_cmd *cmd;
2502 int err;
2504 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2505 sizeof(*cp));
2506 if (!cmd)
2507 return -ENOMEM;
2509 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2510 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2511 if (err < 0)
2512 mgmt_pending_remove(cmd);
2514 return err;
2517 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2518 u16 len)
2520 struct hci_conn *conn;
2521 struct mgmt_cp_pin_code_reply *cp = data;
2522 struct hci_cp_pin_code_reply reply;
2523 struct pending_cmd *cmd;
2524 int err;
2526 BT_DBG("");
2528 hci_dev_lock(hdev);
2530 if (!hdev_is_powered(hdev)) {
2531 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2532 MGMT_STATUS_NOT_POWERED);
2533 goto failed;
2536 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2537 if (!conn) {
2538 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2539 MGMT_STATUS_NOT_CONNECTED);
2540 goto failed;
2543 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2544 struct mgmt_cp_pin_code_neg_reply ncp;
2546 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2548 BT_ERR("PIN code is not 16 bytes long");
2550 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2551 if (err >= 0)
2552 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2553 MGMT_STATUS_INVALID_PARAMS);
2555 goto failed;
2558 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2559 if (!cmd) {
2560 err = -ENOMEM;
2561 goto failed;
2564 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2565 reply.pin_len = cp->pin_len;
2566 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2568 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2569 if (err < 0)
2570 mgmt_pending_remove(cmd);
2572 failed:
2573 hci_dev_unlock(hdev);
2574 return err;
2577 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2578 u16 len)
2580 struct mgmt_cp_set_io_capability *cp = data;
2582 BT_DBG("");
2584 hci_dev_lock(hdev);
2586 hdev->io_capability = cp->io_capability;
2588 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2589 hdev->io_capability);
2591 hci_dev_unlock(hdev);
2593 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2597 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2599 struct hci_dev *hdev = conn->hdev;
2600 struct pending_cmd *cmd;
2602 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2603 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2604 continue;
2606 if (cmd->user_data != conn)
2607 continue;
2609 return cmd;
2612 return NULL;
2615 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2617 struct mgmt_rp_pair_device rp;
2618 struct hci_conn *conn = cmd->user_data;
2620 bacpy(&rp.addr.bdaddr, &conn->dst);
2621 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2623 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2624 &rp, sizeof(rp));
2626 /* So we don't get further callbacks for this connection */
2627 conn->connect_cfm_cb = NULL;
2628 conn->security_cfm_cb = NULL;
2629 conn->disconn_cfm_cb = NULL;
2631 hci_conn_drop(conn);
2633 mgmt_pending_remove(cmd);
2636 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2638 struct pending_cmd *cmd;
2640 BT_DBG("status %u", status);
2642 cmd = find_pairing(conn);
2643 if (!cmd)
2644 BT_DBG("Unable to find a pending command");
2645 else
2646 pairing_complete(cmd, mgmt_status(status));
2649 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2651 struct pending_cmd *cmd;
2653 BT_DBG("status %u", status);
2655 if (!status)
2656 return;
2658 cmd = find_pairing(conn);
2659 if (!cmd)
2660 BT_DBG("Unable to find a pending command");
2661 else
2662 pairing_complete(cmd, mgmt_status(status));
2665 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2666 u16 len)
2668 struct mgmt_cp_pair_device *cp = data;
2669 struct mgmt_rp_pair_device rp;
2670 struct pending_cmd *cmd;
2671 u8 sec_level, auth_type;
2672 struct hci_conn *conn;
2673 int err;
2675 BT_DBG("");
2677 memset(&rp, 0, sizeof(rp));
2678 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2679 rp.addr.type = cp->addr.type;
2681 if (!bdaddr_type_is_valid(cp->addr.type))
2682 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2683 MGMT_STATUS_INVALID_PARAMS,
2684 &rp, sizeof(rp));
2686 hci_dev_lock(hdev);
2688 if (!hdev_is_powered(hdev)) {
2689 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2690 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2691 goto unlock;
2694 sec_level = BT_SECURITY_MEDIUM;
2695 if (cp->io_cap == 0x03)
2696 auth_type = HCI_AT_DEDICATED_BONDING;
2697 else
2698 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2700 if (cp->addr.type == BDADDR_BREDR)
2701 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2702 cp->addr.type, sec_level, auth_type);
2703 else
2704 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2705 cp->addr.type, sec_level, auth_type);
2707 if (IS_ERR(conn)) {
2708 int status;
2710 if (PTR_ERR(conn) == -EBUSY)
2711 status = MGMT_STATUS_BUSY;
2712 else
2713 status = MGMT_STATUS_CONNECT_FAILED;
2715 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2716 status, &rp,
2717 sizeof(rp));
2718 goto unlock;
2721 if (conn->connect_cfm_cb) {
2722 hci_conn_drop(conn);
2723 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2724 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2725 goto unlock;
2728 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2729 if (!cmd) {
2730 err = -ENOMEM;
2731 hci_conn_drop(conn);
2732 goto unlock;
2735 /* For LE, just connecting isn't a proof that the pairing finished */
2736 if (cp->addr.type == BDADDR_BREDR)
2737 conn->connect_cfm_cb = pairing_complete_cb;
2738 else
2739 conn->connect_cfm_cb = le_connect_complete_cb;
2741 conn->security_cfm_cb = pairing_complete_cb;
2742 conn->disconn_cfm_cb = pairing_complete_cb;
2743 conn->io_capability = cp->io_cap;
2744 cmd->user_data = conn;
2746 if (conn->state == BT_CONNECTED &&
2747 hci_conn_security(conn, sec_level, auth_type))
2748 pairing_complete(cmd, 0);
2750 err = 0;
2752 unlock:
2753 hci_dev_unlock(hdev);
2754 return err;
2757 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2758 u16 len)
2760 struct mgmt_addr_info *addr = data;
2761 struct pending_cmd *cmd;
2762 struct hci_conn *conn;
2763 int err;
2765 BT_DBG("");
2767 hci_dev_lock(hdev);
2769 if (!hdev_is_powered(hdev)) {
2770 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2771 MGMT_STATUS_NOT_POWERED);
2772 goto unlock;
2775 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2776 if (!cmd) {
2777 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2778 MGMT_STATUS_INVALID_PARAMS);
2779 goto unlock;
2782 conn = cmd->user_data;
2784 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2785 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2786 MGMT_STATUS_INVALID_PARAMS);
2787 goto unlock;
2790 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2792 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2793 addr, sizeof(*addr));
2794 unlock:
2795 hci_dev_unlock(hdev);
2796 return err;
2799 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2800 struct mgmt_addr_info *addr, u16 mgmt_op,
2801 u16 hci_op, __le32 passkey)
2803 struct pending_cmd *cmd;
2804 struct hci_conn *conn;
2805 int err;
2807 hci_dev_lock(hdev);
2809 if (!hdev_is_powered(hdev)) {
2810 err = cmd_complete(sk, hdev->id, mgmt_op,
2811 MGMT_STATUS_NOT_POWERED, addr,
2812 sizeof(*addr));
2813 goto done;
2816 if (addr->type == BDADDR_BREDR)
2817 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2818 else
2819 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2821 if (!conn) {
2822 err = cmd_complete(sk, hdev->id, mgmt_op,
2823 MGMT_STATUS_NOT_CONNECTED, addr,
2824 sizeof(*addr));
2825 goto done;
2828 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2829 /* Continue with pairing via SMP */
2830 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2832 if (!err)
2833 err = cmd_complete(sk, hdev->id, mgmt_op,
2834 MGMT_STATUS_SUCCESS, addr,
2835 sizeof(*addr));
2836 else
2837 err = cmd_complete(sk, hdev->id, mgmt_op,
2838 MGMT_STATUS_FAILED, addr,
2839 sizeof(*addr));
2841 goto done;
2844 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2845 if (!cmd) {
2846 err = -ENOMEM;
2847 goto done;
2850 /* Continue with pairing via HCI */
2851 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2852 struct hci_cp_user_passkey_reply cp;
2854 bacpy(&cp.bdaddr, &addr->bdaddr);
2855 cp.passkey = passkey;
2856 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2857 } else
2858 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2859 &addr->bdaddr);
2861 if (err < 0)
2862 mgmt_pending_remove(cmd);
2864 done:
2865 hci_dev_unlock(hdev);
2866 return err;
2869 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2870 void *data, u16 len)
2872 struct mgmt_cp_pin_code_neg_reply *cp = data;
2874 BT_DBG("");
2876 return user_pairing_resp(sk, hdev, &cp->addr,
2877 MGMT_OP_PIN_CODE_NEG_REPLY,
2878 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2881 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2882 u16 len)
2884 struct mgmt_cp_user_confirm_reply *cp = data;
2886 BT_DBG("");
2888 if (len != sizeof(*cp))
2889 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2890 MGMT_STATUS_INVALID_PARAMS);
2892 return user_pairing_resp(sk, hdev, &cp->addr,
2893 MGMT_OP_USER_CONFIRM_REPLY,
2894 HCI_OP_USER_CONFIRM_REPLY, 0);
2897 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2898 void *data, u16 len)
2900 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2902 BT_DBG("");
2904 return user_pairing_resp(sk, hdev, &cp->addr,
2905 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2906 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2909 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2910 u16 len)
2912 struct mgmt_cp_user_passkey_reply *cp = data;
2914 BT_DBG("");
2916 return user_pairing_resp(sk, hdev, &cp->addr,
2917 MGMT_OP_USER_PASSKEY_REPLY,
2918 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2921 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2922 void *data, u16 len)
2924 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2926 BT_DBG("");
2928 return user_pairing_resp(sk, hdev, &cp->addr,
2929 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2930 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2933 static void update_name(struct hci_request *req)
2935 struct hci_dev *hdev = req->hdev;
2936 struct hci_cp_write_local_name cp;
2938 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2940 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2943 static void set_name_complete(struct hci_dev *hdev, u8 status)
2945 struct mgmt_cp_set_local_name *cp;
2946 struct pending_cmd *cmd;
2948 BT_DBG("status 0x%02x", status);
2950 hci_dev_lock(hdev);
2952 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2953 if (!cmd)
2954 goto unlock;
2956 cp = cmd->param;
2958 if (status)
2959 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2960 mgmt_status(status));
2961 else
2962 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2963 cp, sizeof(*cp));
2965 mgmt_pending_remove(cmd);
2967 unlock:
2968 hci_dev_unlock(hdev);
2971 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2972 u16 len)
2974 struct mgmt_cp_set_local_name *cp = data;
2975 struct pending_cmd *cmd;
2976 struct hci_request req;
2977 int err;
2979 BT_DBG("");
2981 hci_dev_lock(hdev);
2983 /* If the old values are the same as the new ones just return a
2984 * direct command complete event.
2986 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2987 !memcmp(hdev->short_name, cp->short_name,
2988 sizeof(hdev->short_name))) {
2989 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2990 data, len);
2991 goto failed;
2994 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2996 if (!hdev_is_powered(hdev)) {
2997 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2999 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3000 data, len);
3001 if (err < 0)
3002 goto failed;
3004 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3005 sk);
3007 goto failed;
3010 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3011 if (!cmd) {
3012 err = -ENOMEM;
3013 goto failed;
3016 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3018 hci_req_init(&req, hdev);
3020 if (lmp_bredr_capable(hdev)) {
3021 update_name(&req);
3022 update_eir(&req);
3025 /* The name is stored in the scan response data and so
3026 * no need to udpate the advertising data here.
3028 if (lmp_le_capable(hdev))
3029 update_scan_rsp_data(&req);
3031 err = hci_req_run(&req, set_name_complete);
3032 if (err < 0)
3033 mgmt_pending_remove(cmd);
3035 failed:
3036 hci_dev_unlock(hdev);
3037 return err;
3040 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3041 void *data, u16 data_len)
3043 struct pending_cmd *cmd;
3044 int err;
3046 BT_DBG("%s", hdev->name);
3048 hci_dev_lock(hdev);
3050 if (!hdev_is_powered(hdev)) {
3051 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3052 MGMT_STATUS_NOT_POWERED);
3053 goto unlock;
3056 if (!lmp_ssp_capable(hdev)) {
3057 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3058 MGMT_STATUS_NOT_SUPPORTED);
3059 goto unlock;
3062 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3063 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3064 MGMT_STATUS_BUSY);
3065 goto unlock;
3068 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3069 if (!cmd) {
3070 err = -ENOMEM;
3071 goto unlock;
3074 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3075 if (err < 0)
3076 mgmt_pending_remove(cmd);
3078 unlock:
3079 hci_dev_unlock(hdev);
3080 return err;
3083 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3084 void *data, u16 len)
3086 struct mgmt_cp_add_remote_oob_data *cp = data;
3087 u8 status;
3088 int err;
3090 BT_DBG("%s ", hdev->name);
3092 hci_dev_lock(hdev);
3094 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
3095 cp->randomizer);
3096 if (err < 0)
3097 status = MGMT_STATUS_FAILED;
3098 else
3099 status = MGMT_STATUS_SUCCESS;
3101 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3102 &cp->addr, sizeof(cp->addr));
3104 hci_dev_unlock(hdev);
3105 return err;
3108 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3109 void *data, u16 len)
3111 struct mgmt_cp_remove_remote_oob_data *cp = data;
3112 u8 status;
3113 int err;
3115 BT_DBG("%s", hdev->name);
3117 hci_dev_lock(hdev);
3119 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3120 if (err < 0)
3121 status = MGMT_STATUS_INVALID_PARAMS;
3122 else
3123 status = MGMT_STATUS_SUCCESS;
3125 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3126 status, &cp->addr, sizeof(cp->addr));
3128 hci_dev_unlock(hdev);
3129 return err;
3132 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3134 struct pending_cmd *cmd;
3135 u8 type;
3136 int err;
3138 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3140 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3141 if (!cmd)
3142 return -ENOENT;
3144 type = hdev->discovery.type;
3146 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3147 &type, sizeof(type));
3148 mgmt_pending_remove(cmd);
3150 return err;
3153 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3155 BT_DBG("status %d", status);
3157 if (status) {
3158 hci_dev_lock(hdev);
3159 mgmt_start_discovery_failed(hdev, status);
3160 hci_dev_unlock(hdev);
3161 return;
3164 hci_dev_lock(hdev);
3165 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3166 hci_dev_unlock(hdev);
3168 switch (hdev->discovery.type) {
3169 case DISCOV_TYPE_LE:
3170 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3171 DISCOV_LE_TIMEOUT);
3172 break;
3174 case DISCOV_TYPE_INTERLEAVED:
3175 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3176 DISCOV_INTERLEAVED_TIMEOUT);
3177 break;
3179 case DISCOV_TYPE_BREDR:
3180 break;
3182 default:
3183 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3187 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3188 void *data, u16 len)
3190 struct mgmt_cp_start_discovery *cp = data;
3191 struct pending_cmd *cmd;
3192 struct hci_cp_le_set_scan_param param_cp;
3193 struct hci_cp_le_set_scan_enable enable_cp;
3194 struct hci_cp_inquiry inq_cp;
3195 struct hci_request req;
3196 /* General inquiry access code (GIAC) */
3197 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3198 u8 status;
3199 int err;
3201 BT_DBG("%s", hdev->name);
3203 hci_dev_lock(hdev);
3205 if (!hdev_is_powered(hdev)) {
3206 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3207 MGMT_STATUS_NOT_POWERED);
3208 goto failed;
3211 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3212 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3213 MGMT_STATUS_BUSY);
3214 goto failed;
3217 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3218 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3219 MGMT_STATUS_BUSY);
3220 goto failed;
3223 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3224 if (!cmd) {
3225 err = -ENOMEM;
3226 goto failed;
3229 hdev->discovery.type = cp->type;
3231 hci_req_init(&req, hdev);
3233 switch (hdev->discovery.type) {
3234 case DISCOV_TYPE_BREDR:
3235 status = mgmt_bredr_support(hdev);
3236 if (status) {
3237 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3238 status);
3239 mgmt_pending_remove(cmd);
3240 goto failed;
3243 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3244 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3245 MGMT_STATUS_BUSY);
3246 mgmt_pending_remove(cmd);
3247 goto failed;
3250 hci_inquiry_cache_flush(hdev);
3252 memset(&inq_cp, 0, sizeof(inq_cp));
3253 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3254 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3255 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3256 break;
3258 case DISCOV_TYPE_LE:
3259 case DISCOV_TYPE_INTERLEAVED:
3260 status = mgmt_le_support(hdev);
3261 if (status) {
3262 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3263 status);
3264 mgmt_pending_remove(cmd);
3265 goto failed;
3268 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3269 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3270 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3271 MGMT_STATUS_NOT_SUPPORTED);
3272 mgmt_pending_remove(cmd);
3273 goto failed;
3276 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3277 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3278 MGMT_STATUS_REJECTED);
3279 mgmt_pending_remove(cmd);
3280 goto failed;
3283 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
3284 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3285 MGMT_STATUS_BUSY);
3286 mgmt_pending_remove(cmd);
3287 goto failed;
3290 memset(&param_cp, 0, sizeof(param_cp));
3291 param_cp.type = LE_SCAN_ACTIVE;
3292 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3293 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3294 param_cp.own_address_type = hdev->own_addr_type;
3295 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3296 &param_cp);
3298 memset(&enable_cp, 0, sizeof(enable_cp));
3299 enable_cp.enable = LE_SCAN_ENABLE;
3300 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3301 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3302 &enable_cp);
3303 break;
3305 default:
3306 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3307 MGMT_STATUS_INVALID_PARAMS);
3308 mgmt_pending_remove(cmd);
3309 goto failed;
3312 err = hci_req_run(&req, start_discovery_complete);
3313 if (err < 0)
3314 mgmt_pending_remove(cmd);
3315 else
3316 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3318 failed:
3319 hci_dev_unlock(hdev);
3320 return err;
3323 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3325 struct pending_cmd *cmd;
3326 int err;
3328 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3329 if (!cmd)
3330 return -ENOENT;
3332 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3333 &hdev->discovery.type, sizeof(hdev->discovery.type));
3334 mgmt_pending_remove(cmd);
3336 return err;
3339 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3341 BT_DBG("status %d", status);
3343 hci_dev_lock(hdev);
3345 if (status) {
3346 mgmt_stop_discovery_failed(hdev, status);
3347 goto unlock;
3350 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3352 unlock:
3353 hci_dev_unlock(hdev);
3356 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3357 u16 len)
3359 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3360 struct pending_cmd *cmd;
3361 struct hci_cp_remote_name_req_cancel cp;
3362 struct inquiry_entry *e;
3363 struct hci_request req;
3364 struct hci_cp_le_set_scan_enable enable_cp;
3365 int err;
3367 BT_DBG("%s", hdev->name);
3369 hci_dev_lock(hdev);
3371 if (!hci_discovery_active(hdev)) {
3372 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3373 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3374 sizeof(mgmt_cp->type));
3375 goto unlock;
3378 if (hdev->discovery.type != mgmt_cp->type) {
3379 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3380 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3381 sizeof(mgmt_cp->type));
3382 goto unlock;
3385 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3386 if (!cmd) {
3387 err = -ENOMEM;
3388 goto unlock;
3391 hci_req_init(&req, hdev);
3393 switch (hdev->discovery.state) {
3394 case DISCOVERY_FINDING:
3395 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3396 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3397 } else {
3398 cancel_delayed_work(&hdev->le_scan_disable);
3400 memset(&enable_cp, 0, sizeof(enable_cp));
3401 enable_cp.enable = LE_SCAN_DISABLE;
3402 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
3403 sizeof(enable_cp), &enable_cp);
3406 break;
3408 case DISCOVERY_RESOLVING:
3409 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3410 NAME_PENDING);
3411 if (!e) {
3412 mgmt_pending_remove(cmd);
3413 err = cmd_complete(sk, hdev->id,
3414 MGMT_OP_STOP_DISCOVERY, 0,
3415 &mgmt_cp->type,
3416 sizeof(mgmt_cp->type));
3417 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3418 goto unlock;
3421 bacpy(&cp.bdaddr, &e->data.bdaddr);
3422 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3423 &cp);
3425 break;
3427 default:
3428 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3430 mgmt_pending_remove(cmd);
3431 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3432 MGMT_STATUS_FAILED, &mgmt_cp->type,
3433 sizeof(mgmt_cp->type));
3434 goto unlock;
3437 err = hci_req_run(&req, stop_discovery_complete);
3438 if (err < 0)
3439 mgmt_pending_remove(cmd);
3440 else
3441 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3443 unlock:
3444 hci_dev_unlock(hdev);
3445 return err;
3448 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3449 u16 len)
3451 struct mgmt_cp_confirm_name *cp = data;
3452 struct inquiry_entry *e;
3453 int err;
3455 BT_DBG("%s", hdev->name);
3457 hci_dev_lock(hdev);
3459 if (!hci_discovery_active(hdev)) {
3460 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3461 MGMT_STATUS_FAILED);
3462 goto failed;
3465 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3466 if (!e) {
3467 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3468 MGMT_STATUS_INVALID_PARAMS);
3469 goto failed;
3472 if (cp->name_known) {
3473 e->name_state = NAME_KNOWN;
3474 list_del(&e->list);
3475 } else {
3476 e->name_state = NAME_NEEDED;
3477 hci_inquiry_cache_update_resolve(hdev, e);
3480 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3481 sizeof(cp->addr));
3483 failed:
3484 hci_dev_unlock(hdev);
3485 return err;
3488 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3489 u16 len)
3491 struct mgmt_cp_block_device *cp = data;
3492 u8 status;
3493 int err;
3495 BT_DBG("%s", hdev->name);
3497 if (!bdaddr_type_is_valid(cp->addr.type))
3498 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3499 MGMT_STATUS_INVALID_PARAMS,
3500 &cp->addr, sizeof(cp->addr));
3502 hci_dev_lock(hdev);
3504 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3505 if (err < 0)
3506 status = MGMT_STATUS_FAILED;
3507 else
3508 status = MGMT_STATUS_SUCCESS;
3510 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3511 &cp->addr, sizeof(cp->addr));
3513 hci_dev_unlock(hdev);
3515 return err;
3518 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3519 u16 len)
3521 struct mgmt_cp_unblock_device *cp = data;
3522 u8 status;
3523 int err;
3525 BT_DBG("%s", hdev->name);
3527 if (!bdaddr_type_is_valid(cp->addr.type))
3528 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3529 MGMT_STATUS_INVALID_PARAMS,
3530 &cp->addr, sizeof(cp->addr));
3532 hci_dev_lock(hdev);
3534 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3535 if (err < 0)
3536 status = MGMT_STATUS_INVALID_PARAMS;
3537 else
3538 status = MGMT_STATUS_SUCCESS;
3540 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3541 &cp->addr, sizeof(cp->addr));
3543 hci_dev_unlock(hdev);
3545 return err;
3548 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3549 u16 len)
3551 struct mgmt_cp_set_device_id *cp = data;
3552 struct hci_request req;
3553 int err;
3554 __u16 source;
3556 BT_DBG("%s", hdev->name);
3558 source = __le16_to_cpu(cp->source);
3560 if (source > 0x0002)
3561 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3562 MGMT_STATUS_INVALID_PARAMS);
3564 hci_dev_lock(hdev);
3566 hdev->devid_source = source;
3567 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3568 hdev->devid_product = __le16_to_cpu(cp->product);
3569 hdev->devid_version = __le16_to_cpu(cp->version);
3571 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3573 hci_req_init(&req, hdev);
3574 update_eir(&req);
3575 hci_req_run(&req, NULL);
3577 hci_dev_unlock(hdev);
3579 return err;
3582 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3584 struct cmd_lookup match = { NULL, hdev };
3586 if (status) {
3587 u8 mgmt_err = mgmt_status(status);
3589 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3590 cmd_status_rsp, &mgmt_err);
3591 return;
3594 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3595 &match);
3597 new_settings(hdev, match.sk);
3599 if (match.sk)
3600 sock_put(match.sk);
3603 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3604 u16 len)
3606 struct mgmt_mode *cp = data;
3607 struct pending_cmd *cmd;
3608 struct hci_request req;
3609 u8 val, enabled, status;
3610 int err;
3612 BT_DBG("request for %s", hdev->name);
3614 status = mgmt_le_support(hdev);
3615 if (status)
3616 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3617 status);
3619 if (cp->val != 0x00 && cp->val != 0x01)
3620 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3621 MGMT_STATUS_INVALID_PARAMS);
3623 hci_dev_lock(hdev);
3625 val = !!cp->val;
3626 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3628 /* The following conditions are ones which mean that we should
3629 * not do any HCI communication but directly send a mgmt
3630 * response to user space (after toggling the flag if
3631 * necessary).
3633 if (!hdev_is_powered(hdev) || val == enabled ||
3634 hci_conn_num(hdev, LE_LINK) > 0) {
3635 bool changed = false;
3637 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3638 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3639 changed = true;
3642 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3643 if (err < 0)
3644 goto unlock;
3646 if (changed)
3647 err = new_settings(hdev, sk);
3649 goto unlock;
3652 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3653 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3654 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3655 MGMT_STATUS_BUSY);
3656 goto unlock;
3659 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3660 if (!cmd) {
3661 err = -ENOMEM;
3662 goto unlock;
3665 hci_req_init(&req, hdev);
3667 if (val)
3668 enable_advertising(&req);
3669 else
3670 disable_advertising(&req);
3672 err = hci_req_run(&req, set_advertising_complete);
3673 if (err < 0)
3674 mgmt_pending_remove(cmd);
3676 unlock:
3677 hci_dev_unlock(hdev);
3678 return err;
3681 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3682 void *data, u16 len)
3684 struct mgmt_cp_set_static_address *cp = data;
3685 int err;
3687 BT_DBG("%s", hdev->name);
3689 if (!lmp_le_capable(hdev))
3690 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3691 MGMT_STATUS_NOT_SUPPORTED);
3693 if (hdev_is_powered(hdev))
3694 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3695 MGMT_STATUS_REJECTED);
3697 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3698 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3699 return cmd_status(sk, hdev->id,
3700 MGMT_OP_SET_STATIC_ADDRESS,
3701 MGMT_STATUS_INVALID_PARAMS);
3703 /* Two most significant bits shall be set */
3704 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3705 return cmd_status(sk, hdev->id,
3706 MGMT_OP_SET_STATIC_ADDRESS,
3707 MGMT_STATUS_INVALID_PARAMS);
3710 hci_dev_lock(hdev);
3712 bacpy(&hdev->static_addr, &cp->bdaddr);
3714 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3716 hci_dev_unlock(hdev);
3718 return err;
3721 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3722 void *data, u16 len)
3724 struct mgmt_cp_set_scan_params *cp = data;
3725 __u16 interval, window;
3726 int err;
3728 BT_DBG("%s", hdev->name);
3730 if (!lmp_le_capable(hdev))
3731 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3732 MGMT_STATUS_NOT_SUPPORTED);
3734 interval = __le16_to_cpu(cp->interval);
3736 if (interval < 0x0004 || interval > 0x4000)
3737 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3738 MGMT_STATUS_INVALID_PARAMS);
3740 window = __le16_to_cpu(cp->window);
3742 if (window < 0x0004 || window > 0x4000)
3743 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3744 MGMT_STATUS_INVALID_PARAMS);
3746 if (window > interval)
3747 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3748 MGMT_STATUS_INVALID_PARAMS);
3750 hci_dev_lock(hdev);
3752 hdev->le_scan_interval = interval;
3753 hdev->le_scan_window = window;
3755 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3757 hci_dev_unlock(hdev);
3759 return err;
3762 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3764 struct pending_cmd *cmd;
3766 BT_DBG("status 0x%02x", status);
3768 hci_dev_lock(hdev);
3770 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3771 if (!cmd)
3772 goto unlock;
3774 if (status) {
3775 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3776 mgmt_status(status));
3777 } else {
3778 struct mgmt_mode *cp = cmd->param;
3780 if (cp->val)
3781 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3782 else
3783 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3785 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3786 new_settings(hdev, cmd->sk);
3789 mgmt_pending_remove(cmd);
3791 unlock:
3792 hci_dev_unlock(hdev);
3795 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3796 void *data, u16 len)
3798 struct mgmt_mode *cp = data;
3799 struct pending_cmd *cmd;
3800 struct hci_request req;
3801 int err;
3803 BT_DBG("%s", hdev->name);
3805 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3806 hdev->hci_ver < BLUETOOTH_VER_1_2)
3807 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3808 MGMT_STATUS_NOT_SUPPORTED);
3810 if (cp->val != 0x00 && cp->val != 0x01)
3811 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3812 MGMT_STATUS_INVALID_PARAMS);
3814 if (!hdev_is_powered(hdev))
3815 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3816 MGMT_STATUS_NOT_POWERED);
3818 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3819 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3820 MGMT_STATUS_REJECTED);
3822 hci_dev_lock(hdev);
3824 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3825 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3826 MGMT_STATUS_BUSY);
3827 goto unlock;
3830 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3831 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3832 hdev);
3833 goto unlock;
3836 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3837 data, len);
3838 if (!cmd) {
3839 err = -ENOMEM;
3840 goto unlock;
3843 hci_req_init(&req, hdev);
3845 write_fast_connectable(&req, cp->val);
3847 err = hci_req_run(&req, fast_connectable_complete);
3848 if (err < 0) {
3849 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3850 MGMT_STATUS_FAILED);
3851 mgmt_pending_remove(cmd);
3854 unlock:
3855 hci_dev_unlock(hdev);
3857 return err;
3860 static void set_bredr_scan(struct hci_request *req)
3862 struct hci_dev *hdev = req->hdev;
3863 u8 scan = 0;
3865 /* Ensure that fast connectable is disabled. This function will
3866 * not do anything if the page scan parameters are already what
3867 * they should be.
3869 write_fast_connectable(req, false);
3871 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3872 scan |= SCAN_PAGE;
3873 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3874 scan |= SCAN_INQUIRY;
3876 if (scan)
3877 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3880 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
3882 struct pending_cmd *cmd;
3884 BT_DBG("status 0x%02x", status);
3886 hci_dev_lock(hdev);
3888 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
3889 if (!cmd)
3890 goto unlock;
3892 if (status) {
3893 u8 mgmt_err = mgmt_status(status);
3895 /* We need to restore the flag if related HCI commands
3896 * failed.
3898 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3900 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
3901 } else {
3902 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
3903 new_settings(hdev, cmd->sk);
3906 mgmt_pending_remove(cmd);
3908 unlock:
3909 hci_dev_unlock(hdev);
3912 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3914 struct mgmt_mode *cp = data;
3915 struct pending_cmd *cmd;
3916 struct hci_request req;
3917 int err;
3919 BT_DBG("request for %s", hdev->name);
3921 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
3922 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3923 MGMT_STATUS_NOT_SUPPORTED);
3925 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3926 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3927 MGMT_STATUS_REJECTED);
3929 if (cp->val != 0x00 && cp->val != 0x01)
3930 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3931 MGMT_STATUS_INVALID_PARAMS);
3933 hci_dev_lock(hdev);
3935 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3936 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3937 goto unlock;
3940 if (!hdev_is_powered(hdev)) {
3941 if (!cp->val) {
3942 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
3943 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
3944 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3945 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3946 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
3949 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3951 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3952 if (err < 0)
3953 goto unlock;
3955 err = new_settings(hdev, sk);
3956 goto unlock;
3959 /* Reject disabling when powered on */
3960 if (!cp->val) {
3961 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3962 MGMT_STATUS_REJECTED);
3963 goto unlock;
3966 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
3967 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3968 MGMT_STATUS_BUSY);
3969 goto unlock;
3972 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
3973 if (!cmd) {
3974 err = -ENOMEM;
3975 goto unlock;
3978 /* We need to flip the bit already here so that update_adv_data
3979 * generates the correct flags.
3981 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3983 hci_req_init(&req, hdev);
3985 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3986 set_bredr_scan(&req);
3988 /* Since only the advertising data flags will change, there
3989 * is no need to update the scan response data.
3991 update_adv_data(&req);
3993 err = hci_req_run(&req, set_bredr_complete);
3994 if (err < 0)
3995 mgmt_pending_remove(cmd);
3997 unlock:
3998 hci_dev_unlock(hdev);
3999 return err;
4002 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4004 if (key->authenticated != 0x00 && key->authenticated != 0x01)
4005 return false;
4006 if (key->master != 0x00 && key->master != 0x01)
4007 return false;
4008 if (!bdaddr_type_is_le(key->addr.type))
4009 return false;
4010 return true;
4013 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4014 void *cp_data, u16 len)
4016 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4017 u16 key_count, expected_len;
4018 int i, err;
4020 BT_DBG("request for %s", hdev->name);
4022 if (!lmp_le_capable(hdev))
4023 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4024 MGMT_STATUS_NOT_SUPPORTED);
4026 key_count = __le16_to_cpu(cp->key_count);
4028 expected_len = sizeof(*cp) + key_count *
4029 sizeof(struct mgmt_ltk_info);
4030 if (expected_len != len) {
4031 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4032 len, expected_len);
4033 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4034 MGMT_STATUS_INVALID_PARAMS);
4037 BT_DBG("%s key_count %u", hdev->name, key_count);
4039 for (i = 0; i < key_count; i++) {
4040 struct mgmt_ltk_info *key = &cp->keys[i];
4042 if (!ltk_is_valid(key))
4043 return cmd_status(sk, hdev->id,
4044 MGMT_OP_LOAD_LONG_TERM_KEYS,
4045 MGMT_STATUS_INVALID_PARAMS);
4048 hci_dev_lock(hdev);
4050 hci_smp_ltks_clear(hdev);
4052 for (i = 0; i < key_count; i++) {
4053 struct mgmt_ltk_info *key = &cp->keys[i];
4054 u8 type, addr_type;
4056 if (key->addr.type == BDADDR_LE_PUBLIC)
4057 addr_type = ADDR_LE_DEV_PUBLIC;
4058 else
4059 addr_type = ADDR_LE_DEV_RANDOM;
4061 if (key->master)
4062 type = HCI_SMP_LTK;
4063 else
4064 type = HCI_SMP_LTK_SLAVE;
4066 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
4067 type, 0, key->authenticated, key->val,
4068 key->enc_size, key->ediv, key->rand);
4071 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4072 NULL, 0);
4074 hci_dev_unlock(hdev);
4076 return err;
4079 static const struct mgmt_handler {
4080 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4081 u16 data_len);
4082 bool var_len;
4083 size_t data_len;
4084 } mgmt_handlers[] = {
4085 { NULL }, /* 0x0000 (no command) */
4086 { read_version, false, MGMT_READ_VERSION_SIZE },
4087 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4088 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4089 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4090 { set_powered, false, MGMT_SETTING_SIZE },
4091 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4092 { set_connectable, false, MGMT_SETTING_SIZE },
4093 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4094 { set_pairable, false, MGMT_SETTING_SIZE },
4095 { set_link_security, false, MGMT_SETTING_SIZE },
4096 { set_ssp, false, MGMT_SETTING_SIZE },
4097 { set_hs, false, MGMT_SETTING_SIZE },
4098 { set_le, false, MGMT_SETTING_SIZE },
4099 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4100 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4101 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4102 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4103 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4104 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4105 { disconnect, false, MGMT_DISCONNECT_SIZE },
4106 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4107 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4108 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4109 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4110 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4111 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4112 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4113 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4114 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4115 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4116 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4117 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4118 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4119 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4120 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4121 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4122 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4123 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4124 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4125 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4126 { set_advertising, false, MGMT_SETTING_SIZE },
4127 { set_bredr, false, MGMT_SETTING_SIZE },
4128 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4129 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4133 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4135 void *buf;
4136 u8 *cp;
4137 struct mgmt_hdr *hdr;
4138 u16 opcode, index, len;
4139 struct hci_dev *hdev = NULL;
4140 const struct mgmt_handler *handler;
4141 int err;
4143 BT_DBG("got %zu bytes", msglen);
4145 if (msglen < sizeof(*hdr))
4146 return -EINVAL;
4148 buf = kmalloc(msglen, GFP_KERNEL);
4149 if (!buf)
4150 return -ENOMEM;
4152 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4153 err = -EFAULT;
4154 goto done;
4157 hdr = buf;
4158 opcode = __le16_to_cpu(hdr->opcode);
4159 index = __le16_to_cpu(hdr->index);
4160 len = __le16_to_cpu(hdr->len);
4162 if (len != msglen - sizeof(*hdr)) {
4163 err = -EINVAL;
4164 goto done;
4167 if (index != MGMT_INDEX_NONE) {
4168 hdev = hci_dev_get(index);
4169 if (!hdev) {
4170 err = cmd_status(sk, index, opcode,
4171 MGMT_STATUS_INVALID_INDEX);
4172 goto done;
4175 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4176 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4177 err = cmd_status(sk, index, opcode,
4178 MGMT_STATUS_INVALID_INDEX);
4179 goto done;
4183 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4184 mgmt_handlers[opcode].func == NULL) {
4185 BT_DBG("Unknown op %u", opcode);
4186 err = cmd_status(sk, index, opcode,
4187 MGMT_STATUS_UNKNOWN_COMMAND);
4188 goto done;
4191 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4192 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4193 err = cmd_status(sk, index, opcode,
4194 MGMT_STATUS_INVALID_INDEX);
4195 goto done;
4198 handler = &mgmt_handlers[opcode];
4200 if ((handler->var_len && len < handler->data_len) ||
4201 (!handler->var_len && len != handler->data_len)) {
4202 err = cmd_status(sk, index, opcode,
4203 MGMT_STATUS_INVALID_PARAMS);
4204 goto done;
4207 if (hdev)
4208 mgmt_init_hdev(sk, hdev);
4210 cp = buf + sizeof(*hdr);
4212 err = handler->func(sk, hdev, cp, len);
4213 if (err < 0)
4214 goto done;
4216 err = msglen;
4218 done:
4219 if (hdev)
4220 hci_dev_put(hdev);
4222 kfree(buf);
4223 return err;
4226 void mgmt_index_added(struct hci_dev *hdev)
4228 if (hdev->dev_type != HCI_BREDR)
4229 return;
4231 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4234 void mgmt_index_removed(struct hci_dev *hdev)
4236 u8 status = MGMT_STATUS_INVALID_INDEX;
4238 if (hdev->dev_type != HCI_BREDR)
4239 return;
4241 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4243 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4246 static void powered_complete(struct hci_dev *hdev, u8 status)
4248 struct cmd_lookup match = { NULL, hdev };
4250 BT_DBG("status 0x%02x", status);
4252 hci_dev_lock(hdev);
4254 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4256 new_settings(hdev, match.sk);
4258 hci_dev_unlock(hdev);
4260 if (match.sk)
4261 sock_put(match.sk);
4264 static int powered_update_hci(struct hci_dev *hdev)
4266 struct hci_request req;
4267 u8 link_sec;
4269 hci_req_init(&req, hdev);
4271 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4272 !lmp_host_ssp_capable(hdev)) {
4273 u8 ssp = 1;
4275 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4278 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4279 lmp_bredr_capable(hdev)) {
4280 struct hci_cp_write_le_host_supported cp;
4282 cp.le = 1;
4283 cp.simul = lmp_le_br_capable(hdev);
4285 /* Check first if we already have the right
4286 * host state (host features set)
4288 if (cp.le != lmp_host_le_capable(hdev) ||
4289 cp.simul != lmp_host_le_br_capable(hdev))
4290 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4291 sizeof(cp), &cp);
4294 if (lmp_le_capable(hdev)) {
4295 /* Set random address to static address if configured */
4296 if (bacmp(&hdev->static_addr, BDADDR_ANY))
4297 hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
4298 &hdev->static_addr);
4300 /* Make sure the controller has a good default for
4301 * advertising data. This also applies to the case
4302 * where BR/EDR was toggled during the AUTO_OFF phase.
4304 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4305 update_adv_data(&req);
4306 update_scan_rsp_data(&req);
4309 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4310 enable_advertising(&req);
4313 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4314 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4315 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4316 sizeof(link_sec), &link_sec);
4318 if (lmp_bredr_capable(hdev)) {
4319 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4320 set_bredr_scan(&req);
4321 update_class(&req);
4322 update_name(&req);
4323 update_eir(&req);
4326 return hci_req_run(&req, powered_complete);
4329 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4331 struct cmd_lookup match = { NULL, hdev };
4332 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4333 u8 zero_cod[] = { 0, 0, 0 };
4334 int err;
4336 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4337 return 0;
4339 if (powered) {
4340 if (powered_update_hci(hdev) == 0)
4341 return 0;
4343 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4344 &match);
4345 goto new_settings;
4348 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4349 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4351 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4352 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4353 zero_cod, sizeof(zero_cod), NULL);
4355 new_settings:
4356 err = new_settings(hdev, match.sk);
4358 if (match.sk)
4359 sock_put(match.sk);
4361 return err;
4364 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4366 struct pending_cmd *cmd;
4367 u8 status;
4369 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4370 if (!cmd)
4371 return;
4373 if (err == -ERFKILL)
4374 status = MGMT_STATUS_RFKILLED;
4375 else
4376 status = MGMT_STATUS_FAILED;
4378 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4380 mgmt_pending_remove(cmd);
4383 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4385 struct hci_request req;
4387 hci_dev_lock(hdev);
4389 /* When discoverable timeout triggers, then just make sure
4390 * the limited discoverable flag is cleared. Even in the case
4391 * of a timeout triggered from general discoverable, it is
4392 * safe to unconditionally clear the flag.
4394 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4395 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4397 hci_req_init(&req, hdev);
4398 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4399 u8 scan = SCAN_PAGE;
4400 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4401 sizeof(scan), &scan);
4403 update_class(&req);
4404 update_adv_data(&req);
4405 hci_req_run(&req, NULL);
4407 hdev->discov_timeout = 0;
4409 new_settings(hdev, NULL);
4411 hci_dev_unlock(hdev);
4414 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4416 bool changed;
4418 /* Nothing needed here if there's a pending command since that
4419 * commands request completion callback takes care of everything
4420 * necessary.
4422 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4423 return;
4425 if (discoverable) {
4426 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4427 } else {
4428 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4429 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4432 if (changed) {
4433 struct hci_request req;
4435 /* In case this change in discoverable was triggered by
4436 * a disabling of connectable there could be a need to
4437 * update the advertising flags.
4439 hci_req_init(&req, hdev);
4440 update_adv_data(&req);
4441 hci_req_run(&req, NULL);
4443 new_settings(hdev, NULL);
4447 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4449 bool changed;
4451 /* Nothing needed here if there's a pending command since that
4452 * commands request completion callback takes care of everything
4453 * necessary.
4455 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4456 return;
4458 if (connectable)
4459 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4460 else
4461 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4463 if (changed)
4464 new_settings(hdev, NULL);
4467 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4469 u8 mgmt_err = mgmt_status(status);
4471 if (scan & SCAN_PAGE)
4472 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4473 cmd_status_rsp, &mgmt_err);
4475 if (scan & SCAN_INQUIRY)
4476 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4477 cmd_status_rsp, &mgmt_err);
4480 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4481 bool persistent)
4483 struct mgmt_ev_new_link_key ev;
4485 memset(&ev, 0, sizeof(ev));
4487 ev.store_hint = persistent;
4488 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4489 ev.key.addr.type = BDADDR_BREDR;
4490 ev.key.type = key->type;
4491 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4492 ev.key.pin_len = key->pin_len;
4494 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4497 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4499 struct mgmt_ev_new_long_term_key ev;
4501 memset(&ev, 0, sizeof(ev));
4503 ev.store_hint = persistent;
4504 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4505 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4506 ev.key.authenticated = key->authenticated;
4507 ev.key.enc_size = key->enc_size;
4508 ev.key.ediv = key->ediv;
4510 if (key->type == HCI_SMP_LTK)
4511 ev.key.master = 1;
4513 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4514 memcpy(ev.key.val, key->val, sizeof(key->val));
4516 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4519 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4520 u8 data_len)
4522 eir[eir_len++] = sizeof(type) + data_len;
4523 eir[eir_len++] = type;
4524 memcpy(&eir[eir_len], data, data_len);
4525 eir_len += data_len;
4527 return eir_len;
4530 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4531 u8 addr_type, u32 flags, u8 *name, u8 name_len,
4532 u8 *dev_class)
4534 char buf[512];
4535 struct mgmt_ev_device_connected *ev = (void *) buf;
4536 u16 eir_len = 0;
4538 bacpy(&ev->addr.bdaddr, bdaddr);
4539 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4541 ev->flags = __cpu_to_le32(flags);
4543 if (name_len > 0)
4544 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4545 name, name_len);
4547 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4548 eir_len = eir_append_data(ev->eir, eir_len,
4549 EIR_CLASS_OF_DEV, dev_class, 3);
4551 ev->eir_len = cpu_to_le16(eir_len);
4553 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
4554 sizeof(*ev) + eir_len, NULL);
4557 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
4559 struct mgmt_cp_disconnect *cp = cmd->param;
4560 struct sock **sk = data;
4561 struct mgmt_rp_disconnect rp;
4563 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4564 rp.addr.type = cp->addr.type;
4566 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4567 sizeof(rp));
4569 *sk = cmd->sk;
4570 sock_hold(*sk);
4572 mgmt_pending_remove(cmd);
4575 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4577 struct hci_dev *hdev = data;
4578 struct mgmt_cp_unpair_device *cp = cmd->param;
4579 struct mgmt_rp_unpair_device rp;
4581 memset(&rp, 0, sizeof(rp));
4582 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4583 rp.addr.type = cp->addr.type;
4585 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
4587 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4589 mgmt_pending_remove(cmd);
4592 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
4593 u8 link_type, u8 addr_type, u8 reason)
4595 struct mgmt_ev_device_disconnected ev;
4596 struct sock *sk = NULL;
4598 if (link_type != ACL_LINK && link_type != LE_LINK)
4599 return;
4601 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4603 bacpy(&ev.addr.bdaddr, bdaddr);
4604 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4605 ev.reason = reason;
4607 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4609 if (sk)
4610 sock_put(sk);
4612 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4613 hdev);
4616 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
4617 u8 link_type, u8 addr_type, u8 status)
4619 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
4620 struct mgmt_cp_disconnect *cp;
4621 struct mgmt_rp_disconnect rp;
4622 struct pending_cmd *cmd;
4624 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4625 hdev);
4627 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4628 if (!cmd)
4629 return;
4631 cp = cmd->param;
4633 if (bacmp(bdaddr, &cp->addr.bdaddr))
4634 return;
4636 if (cp->addr.type != bdaddr_type)
4637 return;
4639 bacpy(&rp.addr.bdaddr, bdaddr);
4640 rp.addr.type = bdaddr_type;
4642 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
4643 mgmt_status(status), &rp, sizeof(rp));
4645 mgmt_pending_remove(cmd);
4648 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4649 u8 addr_type, u8 status)
4651 struct mgmt_ev_connect_failed ev;
4653 bacpy(&ev.addr.bdaddr, bdaddr);
4654 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4655 ev.status = mgmt_status(status);
4657 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4660 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4662 struct mgmt_ev_pin_code_request ev;
4664 bacpy(&ev.addr.bdaddr, bdaddr);
4665 ev.addr.type = BDADDR_BREDR;
4666 ev.secure = secure;
4668 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4671 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4672 u8 status)
4674 struct pending_cmd *cmd;
4675 struct mgmt_rp_pin_code_reply rp;
4677 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4678 if (!cmd)
4679 return;
4681 bacpy(&rp.addr.bdaddr, bdaddr);
4682 rp.addr.type = BDADDR_BREDR;
4684 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
4685 mgmt_status(status), &rp, sizeof(rp));
4687 mgmt_pending_remove(cmd);
4690 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4691 u8 status)
4693 struct pending_cmd *cmd;
4694 struct mgmt_rp_pin_code_reply rp;
4696 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
4697 if (!cmd)
4698 return;
4700 bacpy(&rp.addr.bdaddr, bdaddr);
4701 rp.addr.type = BDADDR_BREDR;
4703 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
4704 mgmt_status(status), &rp, sizeof(rp));
4706 mgmt_pending_remove(cmd);
4709 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4710 u8 link_type, u8 addr_type, __le32 value,
4711 u8 confirm_hint)
4713 struct mgmt_ev_user_confirm_request ev;
4715 BT_DBG("%s", hdev->name);
4717 bacpy(&ev.addr.bdaddr, bdaddr);
4718 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4719 ev.confirm_hint = confirm_hint;
4720 ev.value = value;
4722 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
4723 NULL);
4726 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4727 u8 link_type, u8 addr_type)
4729 struct mgmt_ev_user_passkey_request ev;
4731 BT_DBG("%s", hdev->name);
4733 bacpy(&ev.addr.bdaddr, bdaddr);
4734 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4736 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
4737 NULL);
4740 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4741 u8 link_type, u8 addr_type, u8 status,
4742 u8 opcode)
4744 struct pending_cmd *cmd;
4745 struct mgmt_rp_user_confirm_reply rp;
4746 int err;
4748 cmd = mgmt_pending_find(opcode, hdev);
4749 if (!cmd)
4750 return -ENOENT;
4752 bacpy(&rp.addr.bdaddr, bdaddr);
4753 rp.addr.type = link_to_bdaddr(link_type, addr_type);
4754 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
4755 &rp, sizeof(rp));
4757 mgmt_pending_remove(cmd);
4759 return err;
4762 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4763 u8 link_type, u8 addr_type, u8 status)
4765 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4766 status, MGMT_OP_USER_CONFIRM_REPLY);
4769 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4770 u8 link_type, u8 addr_type, u8 status)
4772 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4773 status,
4774 MGMT_OP_USER_CONFIRM_NEG_REPLY);
4777 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4778 u8 link_type, u8 addr_type, u8 status)
4780 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4781 status, MGMT_OP_USER_PASSKEY_REPLY);
4784 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4785 u8 link_type, u8 addr_type, u8 status)
4787 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4788 status,
4789 MGMT_OP_USER_PASSKEY_NEG_REPLY);
4792 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
4793 u8 link_type, u8 addr_type, u32 passkey,
4794 u8 entered)
4796 struct mgmt_ev_passkey_notify ev;
4798 BT_DBG("%s", hdev->name);
4800 bacpy(&ev.addr.bdaddr, bdaddr);
4801 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4802 ev.passkey = __cpu_to_le32(passkey);
4803 ev.entered = entered;
4805 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
4808 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4809 u8 addr_type, u8 status)
4811 struct mgmt_ev_auth_failed ev;
4813 bacpy(&ev.addr.bdaddr, bdaddr);
4814 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4815 ev.status = mgmt_status(status);
4817 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4820 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4822 struct cmd_lookup match = { NULL, hdev };
4823 bool changed;
4825 if (status) {
4826 u8 mgmt_err = mgmt_status(status);
4827 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4828 cmd_status_rsp, &mgmt_err);
4829 return;
4832 if (test_bit(HCI_AUTH, &hdev->flags))
4833 changed = !test_and_set_bit(HCI_LINK_SECURITY,
4834 &hdev->dev_flags);
4835 else
4836 changed = test_and_clear_bit(HCI_LINK_SECURITY,
4837 &hdev->dev_flags);
4839 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4840 &match);
4842 if (changed)
4843 new_settings(hdev, match.sk);
4845 if (match.sk)
4846 sock_put(match.sk);
4849 static void clear_eir(struct hci_request *req)
4851 struct hci_dev *hdev = req->hdev;
4852 struct hci_cp_write_eir cp;
4854 if (!lmp_ext_inq_capable(hdev))
4855 return;
4857 memset(hdev->eir, 0, sizeof(hdev->eir));
4859 memset(&cp, 0, sizeof(cp));
4861 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4864 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4866 struct cmd_lookup match = { NULL, hdev };
4867 struct hci_request req;
4868 bool changed = false;
4870 if (status) {
4871 u8 mgmt_err = mgmt_status(status);
4873 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4874 &hdev->dev_flags)) {
4875 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4876 new_settings(hdev, NULL);
4879 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4880 &mgmt_err);
4881 return;
4884 if (enable) {
4885 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4886 } else {
4887 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4888 if (!changed)
4889 changed = test_and_clear_bit(HCI_HS_ENABLED,
4890 &hdev->dev_flags);
4891 else
4892 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4895 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4897 if (changed)
4898 new_settings(hdev, match.sk);
4900 if (match.sk)
4901 sock_put(match.sk);
4903 hci_req_init(&req, hdev);
4905 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4906 update_eir(&req);
4907 else
4908 clear_eir(&req);
4910 hci_req_run(&req, NULL);
4913 static void sk_lookup(struct pending_cmd *cmd, void *data)
4915 struct cmd_lookup *match = data;
4917 if (match->sk == NULL) {
4918 match->sk = cmd->sk;
4919 sock_hold(match->sk);
4923 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
4924 u8 status)
4926 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4928 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
4929 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
4930 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4932 if (!status)
4933 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
4934 NULL);
4936 if (match.sk)
4937 sock_put(match.sk);
4940 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4942 struct mgmt_cp_set_local_name ev;
4943 struct pending_cmd *cmd;
4945 if (status)
4946 return;
4948 memset(&ev, 0, sizeof(ev));
4949 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
4950 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
4952 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
4953 if (!cmd) {
4954 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
4956 /* If this is a HCI command related to powering on the
4957 * HCI dev don't send any mgmt signals.
4959 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4960 return;
4963 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
4964 cmd ? cmd->sk : NULL);
4967 void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
4968 u8 *randomizer, u8 status)
4970 struct pending_cmd *cmd;
4972 BT_DBG("%s status %u", hdev->name, status);
4974 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4975 if (!cmd)
4976 return;
4978 if (status) {
4979 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4980 mgmt_status(status));
4981 } else {
4982 struct mgmt_rp_read_local_oob_data rp;
4984 memcpy(rp.hash, hash, sizeof(rp.hash));
4985 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
4987 cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4988 0, &rp, sizeof(rp));
4991 mgmt_pending_remove(cmd);
4994 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4995 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4996 ssp, u8 *eir, u16 eir_len)
4998 char buf[512];
4999 struct mgmt_ev_device_found *ev = (void *) buf;
5000 size_t ev_size;
5002 if (!hci_discovery_active(hdev))
5003 return;
5005 /* Leave 5 bytes for a potential CoD field */
5006 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5007 return;
5009 memset(buf, 0, sizeof(buf));
5011 bacpy(&ev->addr.bdaddr, bdaddr);
5012 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5013 ev->rssi = rssi;
5014 if (cfm_name)
5015 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5016 if (!ssp)
5017 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5019 if (eir_len > 0)
5020 memcpy(ev->eir, eir, eir_len);
5022 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5023 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5024 dev_class, 3);
5026 ev->eir_len = cpu_to_le16(eir_len);
5027 ev_size = sizeof(*ev) + eir_len;
5029 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5032 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5033 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5035 struct mgmt_ev_device_found *ev;
5036 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5037 u16 eir_len;
5039 ev = (struct mgmt_ev_device_found *) buf;
5041 memset(buf, 0, sizeof(buf));
5043 bacpy(&ev->addr.bdaddr, bdaddr);
5044 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5045 ev->rssi = rssi;
5047 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5048 name_len);
5050 ev->eir_len = cpu_to_le16(eir_len);
5052 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5055 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5057 struct mgmt_ev_discovering ev;
5058 struct pending_cmd *cmd;
5060 BT_DBG("%s discovering %u", hdev->name, discovering);
5062 if (discovering)
5063 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5064 else
5065 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5067 if (cmd != NULL) {
5068 u8 type = hdev->discovery.type;
5070 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5071 sizeof(type));
5072 mgmt_pending_remove(cmd);
5075 memset(&ev, 0, sizeof(ev));
5076 ev.type = hdev->discovery.type;
5077 ev.discovering = discovering;
5079 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5082 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5084 struct pending_cmd *cmd;
5085 struct mgmt_ev_device_blocked ev;
5087 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5089 bacpy(&ev.addr.bdaddr, bdaddr);
5090 ev.addr.type = type;
5092 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5093 cmd ? cmd->sk : NULL);
5096 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5098 struct pending_cmd *cmd;
5099 struct mgmt_ev_device_unblocked ev;
5101 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5103 bacpy(&ev.addr.bdaddr, bdaddr);
5104 ev.addr.type = type;
5106 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5107 cmd ? cmd->sk : NULL);
5110 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5112 BT_DBG("%s status %u", hdev->name, status);
5114 /* Clear the advertising mgmt setting if we failed to re-enable it */
5115 if (status) {
5116 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5117 new_settings(hdev, NULL);
5121 void mgmt_reenable_advertising(struct hci_dev *hdev)
5123 struct hci_request req;
5125 if (hci_conn_num(hdev, LE_LINK) > 0)
5126 return;
5128 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5129 return;
5131 hci_req_init(&req, hdev);
5132 enable_advertising(&req);
5134 /* If this fails we have no option but to let user space know
5135 * that we've disabled advertising.
5137 if (hci_req_run(&req, adv_enable_complete) < 0) {
5138 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5139 new_settings(hdev, NULL);