RT-AC66 3.0.0.4.374.130 core
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / net / bluetooth / hci_conn.c
blob63980bd6b5f2e404f1b73f6163517767f0b1a349
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/unaligned.h>
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
48 #ifndef CONFIG_BT_HCI_CORE_DEBUG
49 #undef BT_DBG
50 #define BT_DBG(D...)
51 #endif
53 void hci_acl_connect(struct hci_conn *conn)
55 struct hci_dev *hdev = conn->hdev;
56 struct inquiry_entry *ie;
57 struct hci_cp_create_conn cp;
59 BT_DBG("%p", conn);
61 conn->state = BT_CONNECT;
62 conn->out = 1;
63 conn->link_mode = HCI_LM_MASTER;
65 conn->attempt++;
67 memset(&cp, 0, sizeof(cp));
68 bacpy(&cp.bdaddr, &conn->dst);
69 cp.pscan_rep_mode = 0x02;
71 if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst)) &&
72 inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
73 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
74 cp.pscan_mode = ie->data.pscan_mode;
75 cp.clock_offset = ie->data.clock_offset | cpu_to_le16(0x8000);
76 memcpy(conn->dev_class, ie->data.dev_class, 3);
79 cp.pkt_type = cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK);
80 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
81 cp.role_switch = 0x01;
82 else
83 cp.role_switch = 0x00;
85 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_CREATE_CONN, sizeof(cp), &cp);
88 static void hci_acl_connect_cancel(struct hci_conn *conn)
90 struct hci_cp_create_conn_cancel cp;
92 BT_DBG("%p", conn);
94 if (conn->hdev->hci_ver < 2)
95 return;
97 bacpy(&cp.bdaddr, &conn->dst);
98 hci_send_cmd(conn->hdev, OGF_LINK_CTL,
99 OCF_CREATE_CONN_CANCEL, sizeof(cp), &cp);
102 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
104 struct hci_cp_disconnect cp;
106 BT_DBG("%p", conn);
108 conn->state = BT_DISCONN;
110 cp.handle = cpu_to_le16(conn->handle);
111 cp.reason = reason;
112 hci_send_cmd(conn->hdev, OGF_LINK_CTL,
113 OCF_DISCONNECT, sizeof(cp), &cp);
116 void hci_add_sco(struct hci_conn *conn, __u16 handle)
118 struct hci_dev *hdev = conn->hdev;
119 struct hci_cp_add_sco cp;
121 BT_DBG("%p", conn);
123 conn->state = BT_CONNECT;
124 conn->out = 1;
126 cp.pkt_type = cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK);
127 cp.handle = cpu_to_le16(handle);
129 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_ADD_SCO, sizeof(cp), &cp);
132 static void hci_conn_timeout(unsigned long arg)
134 struct hci_conn *conn = (void *) arg;
135 struct hci_dev *hdev = conn->hdev;
137 BT_DBG("conn %p state %d", conn, conn->state);
139 if (atomic_read(&conn->refcnt))
140 return;
142 hci_dev_lock(hdev);
144 switch (conn->state) {
145 case BT_CONNECT:
146 hci_acl_connect_cancel(conn);
147 break;
148 case BT_CONNECTED:
149 hci_acl_disconn(conn, 0x13);
150 break;
151 default:
152 conn->state = BT_CLOSED;
153 break;
156 hci_dev_unlock(hdev);
159 static void hci_conn_idle(unsigned long arg)
161 struct hci_conn *conn = (void *) arg;
163 BT_DBG("conn %p mode %d", conn, conn->mode);
165 hci_conn_enter_sniff_mode(conn);
168 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
170 struct hci_conn *conn;
172 BT_DBG("%s dst %s", hdev->name, batostr(dst));
174 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
175 if (!conn)
176 return NULL;
178 bacpy(&conn->dst, dst);
179 conn->hdev = hdev;
180 conn->type = type;
181 conn->mode = HCI_CM_ACTIVE;
182 conn->state = BT_OPEN;
184 conn->power_save = 1;
186 skb_queue_head_init(&conn->data_q);
188 init_timer(&conn->disc_timer);
189 conn->disc_timer.function = hci_conn_timeout;
190 conn->disc_timer.data = (unsigned long) conn;
192 init_timer(&conn->idle_timer);
193 conn->idle_timer.function = hci_conn_idle;
194 conn->idle_timer.data = (unsigned long) conn;
196 atomic_set(&conn->refcnt, 0);
198 hci_dev_hold(hdev);
200 tasklet_disable(&hdev->tx_task);
202 hci_conn_hash_add(hdev, conn);
203 if (hdev->notify)
204 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
206 hci_conn_add_sysfs(conn);
208 tasklet_enable(&hdev->tx_task);
210 return conn;
213 int hci_conn_del(struct hci_conn *conn)
215 struct hci_dev *hdev = conn->hdev;
217 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
219 del_timer(&conn->idle_timer);
221 del_timer(&conn->disc_timer);
223 if (conn->type == SCO_LINK) {
224 struct hci_conn *acl = conn->link;
225 if (acl) {
226 acl->link = NULL;
227 hci_conn_put(acl);
229 } else {
230 struct hci_conn *sco = conn->link;
231 if (sco)
232 sco->link = NULL;
234 /* Unacked frames */
235 hdev->acl_cnt += conn->sent;
238 tasklet_disable(&hdev->tx_task);
240 hci_conn_del_sysfs(conn);
242 hci_conn_hash_del(hdev, conn);
243 if (hdev->notify)
244 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
246 tasklet_enable(&hdev->tx_task);
248 skb_queue_purge(&conn->data_q);
250 hci_dev_put(hdev);
252 /* will free via device release */
253 put_device(&conn->dev);
255 return 0;
258 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
260 int use_src = bacmp(src, BDADDR_ANY);
261 struct hci_dev *hdev = NULL;
262 struct list_head *p;
264 BT_DBG("%s -> %s", batostr(src), batostr(dst));
266 read_lock_bh(&hci_dev_list_lock);
268 list_for_each(p, &hci_dev_list) {
269 struct hci_dev *d = list_entry(p, struct hci_dev, list);
271 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
272 continue;
274 /* Simple routing:
275 * No source address - find interface with bdaddr != dst
276 * Source address - find interface with bdaddr == src
279 if (use_src) {
280 if (!bacmp(&d->bdaddr, src)) {
281 hdev = d; break;
283 } else {
284 if (bacmp(&d->bdaddr, dst)) {
285 hdev = d; break;
290 if (hdev)
291 hdev = hci_dev_hold(hdev);
293 read_unlock_bh(&hci_dev_list_lock);
294 return hdev;
296 EXPORT_SYMBOL(hci_get_route);
298 /* Create SCO or ACL connection.
299 * Device _must_ be locked */
300 struct hci_conn * hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst)
302 struct hci_conn *acl;
304 BT_DBG("%s dst %s", hdev->name, batostr(dst));
306 if (!(acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst))) {
307 if (!(acl = hci_conn_add(hdev, ACL_LINK, dst)))
308 return NULL;
311 hci_conn_hold(acl);
313 if (acl->state == BT_OPEN || acl->state == BT_CLOSED)
314 hci_acl_connect(acl);
316 if (type == SCO_LINK) {
317 struct hci_conn *sco;
319 if (!(sco = hci_conn_hash_lookup_ba(hdev, SCO_LINK, dst))) {
320 if (!(sco = hci_conn_add(hdev, SCO_LINK, dst))) {
321 hci_conn_put(acl);
322 return NULL;
325 acl->link = sco;
326 sco->link = acl;
328 hci_conn_hold(sco);
330 if (acl->state == BT_CONNECTED &&
331 (sco->state == BT_OPEN || sco->state == BT_CLOSED))
332 hci_add_sco(sco, acl->handle);
334 return sco;
335 } else {
336 return acl;
339 EXPORT_SYMBOL(hci_connect);
341 /* Authenticate remote device */
342 int hci_conn_auth(struct hci_conn *conn)
344 BT_DBG("conn %p", conn);
346 if (conn->link_mode & HCI_LM_AUTH)
347 return 1;
349 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
350 struct hci_cp_auth_requested cp;
351 cp.handle = cpu_to_le16(conn->handle);
352 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_AUTH_REQUESTED, sizeof(cp), &cp);
354 return 0;
356 EXPORT_SYMBOL(hci_conn_auth);
358 /* Enable encryption */
359 int hci_conn_encrypt(struct hci_conn *conn)
361 BT_DBG("conn %p", conn);
363 if (conn->link_mode & HCI_LM_ENCRYPT)
364 return 1;
366 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
367 return 0;
369 if (hci_conn_auth(conn)) {
370 struct hci_cp_set_conn_encrypt cp;
371 cp.handle = cpu_to_le16(conn->handle);
372 cp.encrypt = 1;
373 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp);
375 return 0;
377 EXPORT_SYMBOL(hci_conn_encrypt);
379 /* Change link key */
380 int hci_conn_change_link_key(struct hci_conn *conn)
382 BT_DBG("conn %p", conn);
384 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
385 struct hci_cp_change_conn_link_key cp;
386 cp.handle = cpu_to_le16(conn->handle);
387 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_CHANGE_CONN_LINK_KEY, sizeof(cp), &cp);
389 return 0;
391 EXPORT_SYMBOL(hci_conn_change_link_key);
393 /* Switch role */
394 int hci_conn_switch_role(struct hci_conn *conn, uint8_t role)
396 BT_DBG("conn %p", conn);
398 if (!role && conn->link_mode & HCI_LM_MASTER)
399 return 1;
401 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
402 struct hci_cp_switch_role cp;
403 bacpy(&cp.bdaddr, &conn->dst);
404 cp.role = role;
405 hci_send_cmd(conn->hdev, OGF_LINK_POLICY, OCF_SWITCH_ROLE, sizeof(cp), &cp);
407 return 0;
409 EXPORT_SYMBOL(hci_conn_switch_role);
411 /* Enter active mode */
412 void hci_conn_enter_active_mode(struct hci_conn *conn)
414 struct hci_dev *hdev = conn->hdev;
416 BT_DBG("conn %p mode %d", conn, conn->mode);
418 if (test_bit(HCI_RAW, &hdev->flags))
419 return;
421 if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
422 goto timer;
424 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
425 struct hci_cp_exit_sniff_mode cp;
426 cp.handle = cpu_to_le16(conn->handle);
427 hci_send_cmd(hdev, OGF_LINK_POLICY,
428 OCF_EXIT_SNIFF_MODE, sizeof(cp), &cp);
431 timer:
432 if (hdev->idle_timeout > 0)
433 mod_timer(&conn->idle_timer,
434 jiffies + msecs_to_jiffies(hdev->idle_timeout));
437 /* Enter sniff mode */
438 void hci_conn_enter_sniff_mode(struct hci_conn *conn)
440 struct hci_dev *hdev = conn->hdev;
442 BT_DBG("conn %p mode %d", conn, conn->mode);
444 if (test_bit(HCI_RAW, &hdev->flags))
445 return;
447 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
448 return;
450 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
451 return;
453 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
454 struct hci_cp_sniff_subrate cp;
455 cp.handle = cpu_to_le16(conn->handle);
456 cp.max_latency = cpu_to_le16(0);
457 cp.min_remote_timeout = cpu_to_le16(0);
458 cp.min_local_timeout = cpu_to_le16(0);
459 hci_send_cmd(hdev, OGF_LINK_POLICY,
460 OCF_SNIFF_SUBRATE, sizeof(cp), &cp);
463 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
464 struct hci_cp_sniff_mode cp;
465 cp.handle = cpu_to_le16(conn->handle);
466 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
467 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
468 cp.attempt = cpu_to_le16(4);
469 cp.timeout = cpu_to_le16(1);
470 hci_send_cmd(hdev, OGF_LINK_POLICY,
471 OCF_SNIFF_MODE, sizeof(cp), &cp);
475 /* Drop all connection on the device */
476 void hci_conn_hash_flush(struct hci_dev *hdev)
478 struct hci_conn_hash *h = &hdev->conn_hash;
479 struct list_head *p;
481 BT_DBG("hdev %s", hdev->name);
483 p = h->list.next;
484 while (p != &h->list) {
485 struct hci_conn *c;
487 c = list_entry(p, struct hci_conn, list);
488 p = p->next;
490 c->state = BT_CLOSED;
492 hci_proto_disconn_ind(c, 0x16);
493 hci_conn_del(c);
497 int hci_get_conn_list(void __user *arg)
499 struct hci_conn_list_req req, *cl;
500 struct hci_conn_info *ci;
501 struct hci_dev *hdev;
502 struct list_head *p;
503 int n = 0, size, err;
505 if (copy_from_user(&req, arg, sizeof(req)))
506 return -EFAULT;
508 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
509 return -EINVAL;
511 size = sizeof(req) + req.conn_num * sizeof(*ci);
513 if (!(cl = kmalloc(size, GFP_KERNEL)))
514 return -ENOMEM;
516 if (!(hdev = hci_dev_get(req.dev_id))) {
517 kfree(cl);
518 return -ENODEV;
521 ci = cl->conn_info;
523 hci_dev_lock_bh(hdev);
524 list_for_each(p, &hdev->conn_hash.list) {
525 register struct hci_conn *c;
526 c = list_entry(p, struct hci_conn, list);
528 bacpy(&(ci + n)->bdaddr, &c->dst);
529 (ci + n)->handle = c->handle;
530 (ci + n)->type = c->type;
531 (ci + n)->out = c->out;
532 (ci + n)->state = c->state;
533 (ci + n)->link_mode = c->link_mode;
534 if (++n >= req.conn_num)
535 break;
537 hci_dev_unlock_bh(hdev);
539 cl->dev_id = hdev->id;
540 cl->conn_num = n;
541 size = sizeof(req) + n * sizeof(*ci);
543 hci_dev_put(hdev);
545 err = copy_to_user(arg, cl, size);
546 kfree(cl);
548 return err ? -EFAULT : 0;
551 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
553 struct hci_conn_info_req req;
554 struct hci_conn_info ci;
555 struct hci_conn *conn;
556 char __user *ptr = arg + sizeof(req);
558 if (copy_from_user(&req, arg, sizeof(req)))
559 return -EFAULT;
561 hci_dev_lock_bh(hdev);
562 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
563 if (conn) {
564 bacpy(&ci.bdaddr, &conn->dst);
565 ci.handle = conn->handle;
566 ci.type = conn->type;
567 ci.out = conn->out;
568 ci.state = conn->state;
569 ci.link_mode = conn->link_mode;
571 hci_dev_unlock_bh(hdev);
573 if (!conn)
574 return -ENOENT;
576 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;