ipv4: Invalidate nexthop cache nh_saddr more correctly.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / hci_conn.c
blob7a6f56b2f49dfe5904f7f78f6ad46353e9ab1011
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
48 static void hci_le_connect(struct hci_conn *conn)
50 struct hci_dev *hdev = conn->hdev;
51 struct hci_cp_le_create_conn cp;
53 conn->state = BT_CONNECT;
54 conn->out = 1;
55 conn->link_mode |= HCI_LM_MASTER;
57 memset(&cp, 0, sizeof(cp));
58 cp.scan_interval = cpu_to_le16(0x0004);
59 cp.scan_window = cpu_to_le16(0x0004);
60 bacpy(&cp.peer_addr, &conn->dst);
61 cp.conn_interval_min = cpu_to_le16(0x0008);
62 cp.conn_interval_max = cpu_to_le16(0x0100);
63 cp.supervision_timeout = cpu_to_le16(0x0064);
64 cp.min_ce_len = cpu_to_le16(0x0001);
65 cp.max_ce_len = cpu_to_le16(0x0001);
67 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
70 static void hci_le_connect_cancel(struct hci_conn *conn)
72 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
75 void hci_acl_connect(struct hci_conn *conn)
77 struct hci_dev *hdev = conn->hdev;
78 struct inquiry_entry *ie;
79 struct hci_cp_create_conn cp;
81 BT_DBG("%p", conn);
83 conn->state = BT_CONNECT;
84 conn->out = 1;
86 conn->link_mode = HCI_LM_MASTER;
88 conn->attempt++;
90 conn->link_policy = hdev->link_policy;
92 memset(&cp, 0, sizeof(cp));
93 bacpy(&cp.bdaddr, &conn->dst);
94 cp.pscan_rep_mode = 0x02;
96 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
97 if (ie) {
98 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
99 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
100 cp.pscan_mode = ie->data.pscan_mode;
101 cp.clock_offset = ie->data.clock_offset |
102 cpu_to_le16(0x8000);
105 memcpy(conn->dev_class, ie->data.dev_class, 3);
106 conn->ssp_mode = ie->data.ssp_mode;
109 cp.pkt_type = cpu_to_le16(conn->pkt_type);
110 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
111 cp.role_switch = 0x01;
112 else
113 cp.role_switch = 0x00;
115 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
118 static void hci_acl_connect_cancel(struct hci_conn *conn)
120 struct hci_cp_create_conn_cancel cp;
122 BT_DBG("%p", conn);
124 if (conn->hdev->hci_ver < 2)
125 return;
127 bacpy(&cp.bdaddr, &conn->dst);
128 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
131 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
133 struct hci_cp_disconnect cp;
135 BT_DBG("%p", conn);
137 conn->state = BT_DISCONN;
139 cp.handle = cpu_to_le16(conn->handle);
140 cp.reason = reason;
141 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
144 void hci_add_sco(struct hci_conn *conn, __u16 handle)
146 struct hci_dev *hdev = conn->hdev;
147 struct hci_cp_add_sco cp;
149 BT_DBG("%p", conn);
151 conn->state = BT_CONNECT;
152 conn->out = 1;
154 conn->attempt++;
156 cp.handle = cpu_to_le16(handle);
157 cp.pkt_type = cpu_to_le16(conn->pkt_type);
159 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
162 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
164 struct hci_dev *hdev = conn->hdev;
165 struct hci_cp_setup_sync_conn cp;
167 BT_DBG("%p", conn);
169 conn->state = BT_CONNECT;
170 conn->out = 1;
172 conn->attempt++;
174 cp.handle = cpu_to_le16(handle);
175 cp.pkt_type = cpu_to_le16(conn->pkt_type);
177 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
178 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
179 cp.max_latency = cpu_to_le16(0xffff);
180 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
181 cp.retrans_effort = 0xff;
183 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
186 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
187 u16 latency, u16 to_multiplier)
189 struct hci_cp_le_conn_update cp;
190 struct hci_dev *hdev = conn->hdev;
192 memset(&cp, 0, sizeof(cp));
194 cp.handle = cpu_to_le16(conn->handle);
195 cp.conn_interval_min = cpu_to_le16(min);
196 cp.conn_interval_max = cpu_to_le16(max);
197 cp.conn_latency = cpu_to_le16(latency);
198 cp.supervision_timeout = cpu_to_le16(to_multiplier);
199 cp.min_ce_len = cpu_to_le16(0x0001);
200 cp.max_ce_len = cpu_to_le16(0x0001);
202 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
204 EXPORT_SYMBOL(hci_le_conn_update);
206 /* Device _must_ be locked */
207 void hci_sco_setup(struct hci_conn *conn, __u8 status)
209 struct hci_conn *sco = conn->link;
211 BT_DBG("%p", conn);
213 if (!sco)
214 return;
216 if (!status) {
217 if (lmp_esco_capable(conn->hdev))
218 hci_setup_sync(sco, conn->handle);
219 else
220 hci_add_sco(sco, conn->handle);
221 } else {
222 hci_proto_connect_cfm(sco, status);
223 hci_conn_del(sco);
227 static void hci_conn_timeout(unsigned long arg)
229 struct hci_conn *conn = (void *) arg;
230 struct hci_dev *hdev = conn->hdev;
231 __u8 reason;
233 BT_DBG("conn %p state %d", conn, conn->state);
235 if (atomic_read(&conn->refcnt))
236 return;
238 hci_dev_lock(hdev);
240 switch (conn->state) {
241 case BT_CONNECT:
242 case BT_CONNECT2:
243 if (conn->out) {
244 if (conn->type == ACL_LINK)
245 hci_acl_connect_cancel(conn);
246 else if (conn->type == LE_LINK)
247 hci_le_connect_cancel(conn);
249 break;
250 case BT_CONFIG:
251 case BT_CONNECTED:
252 reason = hci_proto_disconn_ind(conn);
253 hci_acl_disconn(conn, reason);
254 break;
255 default:
256 conn->state = BT_CLOSED;
257 break;
260 hci_dev_unlock(hdev);
263 static void hci_conn_idle(unsigned long arg)
265 struct hci_conn *conn = (void *) arg;
267 BT_DBG("conn %p mode %d", conn, conn->mode);
269 hci_conn_enter_sniff_mode(conn);
272 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
274 struct hci_conn *conn;
276 BT_DBG("%s dst %s", hdev->name, batostr(dst));
278 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
279 if (!conn)
280 return NULL;
282 bacpy(&conn->dst, dst);
283 conn->hdev = hdev;
284 conn->type = type;
285 conn->mode = HCI_CM_ACTIVE;
286 conn->state = BT_OPEN;
287 conn->auth_type = HCI_AT_GENERAL_BONDING;
288 conn->io_capability = hdev->io_capability;
289 conn->remote_auth = 0xff;
291 conn->power_save = 1;
292 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
294 switch (type) {
295 case ACL_LINK:
296 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
297 break;
298 case SCO_LINK:
299 if (lmp_esco_capable(hdev))
300 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
301 (hdev->esco_type & EDR_ESCO_MASK);
302 else
303 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
304 break;
305 case ESCO_LINK:
306 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
307 break;
310 skb_queue_head_init(&conn->data_q);
312 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
313 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
315 atomic_set(&conn->refcnt, 0);
317 hci_dev_hold(hdev);
319 tasklet_disable(&hdev->tx_task);
321 hci_conn_hash_add(hdev, conn);
322 if (hdev->notify)
323 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
325 atomic_set(&conn->devref, 0);
327 hci_conn_init_sysfs(conn);
329 tasklet_enable(&hdev->tx_task);
331 return conn;
334 int hci_conn_del(struct hci_conn *conn)
336 struct hci_dev *hdev = conn->hdev;
338 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
340 del_timer(&conn->idle_timer);
342 del_timer(&conn->disc_timer);
344 if (conn->type == ACL_LINK) {
345 struct hci_conn *sco = conn->link;
346 if (sco)
347 sco->link = NULL;
349 /* Unacked frames */
350 hdev->acl_cnt += conn->sent;
351 } else if (conn->type == LE_LINK) {
352 if (hdev->le_pkts)
353 hdev->le_cnt += conn->sent;
354 else
355 hdev->acl_cnt += conn->sent;
356 } else {
357 struct hci_conn *acl = conn->link;
358 if (acl) {
359 acl->link = NULL;
360 hci_conn_put(acl);
364 tasklet_disable(&hdev->tx_task);
366 hci_conn_hash_del(hdev, conn);
367 if (hdev->notify)
368 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
370 tasklet_enable(&hdev->tx_task);
372 skb_queue_purge(&conn->data_q);
374 hci_conn_put_device(conn);
376 hci_dev_put(hdev);
378 return 0;
381 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
383 int use_src = bacmp(src, BDADDR_ANY);
384 struct hci_dev *hdev = NULL;
385 struct list_head *p;
387 BT_DBG("%s -> %s", batostr(src), batostr(dst));
389 read_lock_bh(&hci_dev_list_lock);
391 list_for_each(p, &hci_dev_list) {
392 struct hci_dev *d = list_entry(p, struct hci_dev, list);
394 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
395 continue;
397 /* Simple routing:
398 * No source address - find interface with bdaddr != dst
399 * Source address - find interface with bdaddr == src
402 if (use_src) {
403 if (!bacmp(&d->bdaddr, src)) {
404 hdev = d; break;
406 } else {
407 if (bacmp(&d->bdaddr, dst)) {
408 hdev = d; break;
413 if (hdev)
414 hdev = hci_dev_hold(hdev);
416 read_unlock_bh(&hci_dev_list_lock);
417 return hdev;
419 EXPORT_SYMBOL(hci_get_route);
421 /* Create SCO, ACL or LE connection.
422 * Device _must_ be locked */
423 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
425 struct hci_conn *acl;
426 struct hci_conn *sco;
427 struct hci_conn *le;
429 BT_DBG("%s dst %s", hdev->name, batostr(dst));
431 if (type == LE_LINK) {
432 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
433 if (le)
434 return ERR_PTR(-EBUSY);
435 le = hci_conn_add(hdev, LE_LINK, dst);
436 if (!le)
437 return ERR_PTR(-ENOMEM);
438 if (le->state == BT_OPEN)
439 hci_le_connect(le);
441 hci_conn_hold(le);
443 return le;
446 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
447 if (!acl) {
448 acl = hci_conn_add(hdev, ACL_LINK, dst);
449 if (!acl)
450 return NULL;
453 hci_conn_hold(acl);
455 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
456 acl->sec_level = BT_SECURITY_LOW;
457 acl->pending_sec_level = sec_level;
458 acl->auth_type = auth_type;
459 hci_acl_connect(acl);
462 if (type == ACL_LINK)
463 return acl;
465 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
466 if (!sco) {
467 sco = hci_conn_add(hdev, type, dst);
468 if (!sco) {
469 hci_conn_put(acl);
470 return NULL;
474 acl->link = sco;
475 sco->link = acl;
477 hci_conn_hold(sco);
479 if (acl->state == BT_CONNECTED &&
480 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
481 acl->power_save = 1;
482 hci_conn_enter_active_mode(acl);
484 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
485 /* defer SCO setup until mode change completed */
486 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend);
487 return sco;
490 hci_sco_setup(acl, 0x00);
493 return sco;
495 EXPORT_SYMBOL(hci_connect);
497 /* Check link security requirement */
498 int hci_conn_check_link_mode(struct hci_conn *conn)
500 BT_DBG("conn %p", conn);
502 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
503 !(conn->link_mode & HCI_LM_ENCRYPT))
504 return 0;
506 return 1;
508 EXPORT_SYMBOL(hci_conn_check_link_mode);
510 /* Authenticate remote device */
511 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
513 BT_DBG("conn %p", conn);
515 if (conn->pending_sec_level > sec_level)
516 sec_level = conn->pending_sec_level;
518 if (sec_level > conn->sec_level)
519 conn->pending_sec_level = sec_level;
520 else if (conn->link_mode & HCI_LM_AUTH)
521 return 1;
523 /* Make sure we preserve an existing MITM requirement*/
524 auth_type |= (conn->auth_type & 0x01);
526 conn->auth_type = auth_type;
528 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
529 struct hci_cp_auth_requested cp;
530 cp.handle = cpu_to_le16(conn->handle);
531 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
532 sizeof(cp), &cp);
535 return 0;
538 /* Enable security */
539 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
541 BT_DBG("conn %p", conn);
543 if (sec_level == BT_SECURITY_SDP)
544 return 1;
546 if (sec_level == BT_SECURITY_LOW &&
547 (!conn->ssp_mode || !conn->hdev->ssp_mode))
548 return 1;
550 if (conn->link_mode & HCI_LM_ENCRYPT)
551 return hci_conn_auth(conn, sec_level, auth_type);
553 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
554 return 0;
556 if (hci_conn_auth(conn, sec_level, auth_type)) {
557 struct hci_cp_set_conn_encrypt cp;
558 cp.handle = cpu_to_le16(conn->handle);
559 cp.encrypt = 1;
560 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
561 sizeof(cp), &cp);
564 return 0;
566 EXPORT_SYMBOL(hci_conn_security);
568 /* Change link key */
569 int hci_conn_change_link_key(struct hci_conn *conn)
571 BT_DBG("conn %p", conn);
573 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
574 struct hci_cp_change_conn_link_key cp;
575 cp.handle = cpu_to_le16(conn->handle);
576 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
577 sizeof(cp), &cp);
580 return 0;
582 EXPORT_SYMBOL(hci_conn_change_link_key);
584 /* Switch role */
585 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
587 BT_DBG("conn %p", conn);
589 if (!role && conn->link_mode & HCI_LM_MASTER)
590 return 1;
592 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
593 struct hci_cp_switch_role cp;
594 bacpy(&cp.bdaddr, &conn->dst);
595 cp.role = role;
596 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
599 return 0;
601 EXPORT_SYMBOL(hci_conn_switch_role);
603 /* Enter active mode */
604 void hci_conn_enter_active_mode(struct hci_conn *conn)
606 struct hci_dev *hdev = conn->hdev;
608 BT_DBG("conn %p mode %d", conn, conn->mode);
610 if (test_bit(HCI_RAW, &hdev->flags))
611 return;
613 if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
614 goto timer;
616 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
617 struct hci_cp_exit_sniff_mode cp;
618 cp.handle = cpu_to_le16(conn->handle);
619 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
622 timer:
623 if (hdev->idle_timeout > 0)
624 mod_timer(&conn->idle_timer,
625 jiffies + msecs_to_jiffies(hdev->idle_timeout));
628 /* Enter sniff mode */
629 void hci_conn_enter_sniff_mode(struct hci_conn *conn)
631 struct hci_dev *hdev = conn->hdev;
633 BT_DBG("conn %p mode %d", conn, conn->mode);
635 if (test_bit(HCI_RAW, &hdev->flags))
636 return;
638 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
639 return;
641 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
642 return;
644 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
645 struct hci_cp_sniff_subrate cp;
646 cp.handle = cpu_to_le16(conn->handle);
647 cp.max_latency = cpu_to_le16(0);
648 cp.min_remote_timeout = cpu_to_le16(0);
649 cp.min_local_timeout = cpu_to_le16(0);
650 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
653 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
654 struct hci_cp_sniff_mode cp;
655 cp.handle = cpu_to_le16(conn->handle);
656 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
657 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
658 cp.attempt = cpu_to_le16(4);
659 cp.timeout = cpu_to_le16(1);
660 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
664 /* Drop all connection on the device */
665 void hci_conn_hash_flush(struct hci_dev *hdev)
667 struct hci_conn_hash *h = &hdev->conn_hash;
668 struct list_head *p;
670 BT_DBG("hdev %s", hdev->name);
672 p = h->list.next;
673 while (p != &h->list) {
674 struct hci_conn *c;
676 c = list_entry(p, struct hci_conn, list);
677 p = p->next;
679 c->state = BT_CLOSED;
681 hci_proto_disconn_cfm(c, 0x16);
682 hci_conn_del(c);
686 /* Check pending connect attempts */
687 void hci_conn_check_pending(struct hci_dev *hdev)
689 struct hci_conn *conn;
691 BT_DBG("hdev %s", hdev->name);
693 hci_dev_lock(hdev);
695 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
696 if (conn)
697 hci_acl_connect(conn);
699 hci_dev_unlock(hdev);
702 void hci_conn_hold_device(struct hci_conn *conn)
704 atomic_inc(&conn->devref);
706 EXPORT_SYMBOL(hci_conn_hold_device);
708 void hci_conn_put_device(struct hci_conn *conn)
710 if (atomic_dec_and_test(&conn->devref))
711 hci_conn_del_sysfs(conn);
713 EXPORT_SYMBOL(hci_conn_put_device);
715 int hci_get_conn_list(void __user *arg)
717 struct hci_conn_list_req req, *cl;
718 struct hci_conn_info *ci;
719 struct hci_dev *hdev;
720 struct list_head *p;
721 int n = 0, size, err;
723 if (copy_from_user(&req, arg, sizeof(req)))
724 return -EFAULT;
726 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
727 return -EINVAL;
729 size = sizeof(req) + req.conn_num * sizeof(*ci);
731 cl = kmalloc(size, GFP_KERNEL);
732 if (!cl)
733 return -ENOMEM;
735 hdev = hci_dev_get(req.dev_id);
736 if (!hdev) {
737 kfree(cl);
738 return -ENODEV;
741 ci = cl->conn_info;
743 hci_dev_lock_bh(hdev);
744 list_for_each(p, &hdev->conn_hash.list) {
745 register struct hci_conn *c;
746 c = list_entry(p, struct hci_conn, list);
748 bacpy(&(ci + n)->bdaddr, &c->dst);
749 (ci + n)->handle = c->handle;
750 (ci + n)->type = c->type;
751 (ci + n)->out = c->out;
752 (ci + n)->state = c->state;
753 (ci + n)->link_mode = c->link_mode;
754 if (++n >= req.conn_num)
755 break;
757 hci_dev_unlock_bh(hdev);
759 cl->dev_id = hdev->id;
760 cl->conn_num = n;
761 size = sizeof(req) + n * sizeof(*ci);
763 hci_dev_put(hdev);
765 err = copy_to_user(arg, cl, size);
766 kfree(cl);
768 return err ? -EFAULT : 0;
771 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
773 struct hci_conn_info_req req;
774 struct hci_conn_info ci;
775 struct hci_conn *conn;
776 char __user *ptr = arg + sizeof(req);
778 if (copy_from_user(&req, arg, sizeof(req)))
779 return -EFAULT;
781 hci_dev_lock_bh(hdev);
782 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
783 if (conn) {
784 bacpy(&ci.bdaddr, &conn->dst);
785 ci.handle = conn->handle;
786 ci.type = conn->type;
787 ci.out = conn->out;
788 ci.state = conn->state;
789 ci.link_mode = conn->link_mode;
791 hci_dev_unlock_bh(hdev);
793 if (!conn)
794 return -ENOENT;
796 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
799 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
801 struct hci_auth_info_req req;
802 struct hci_conn *conn;
804 if (copy_from_user(&req, arg, sizeof(req)))
805 return -EFAULT;
807 hci_dev_lock_bh(hdev);
808 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
809 if (conn)
810 req.type = conn->auth_type;
811 hci_dev_unlock_bh(hdev);
813 if (!conn)
814 return -ENOENT;
816 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;