[ARM] Orion: register UART1 on QNAP TS-209 and TS-409
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / hci_conn.c
blobca8d05245ca0cb9e27586cfdbdeb90e5afff81e3
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/unaligned.h>
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
48 #ifndef CONFIG_BT_HCI_CORE_DEBUG
49 #undef BT_DBG
50 #define BT_DBG(D...)
51 #endif
53 void hci_acl_connect(struct hci_conn *conn)
55 struct hci_dev *hdev = conn->hdev;
56 struct inquiry_entry *ie;
57 struct hci_cp_create_conn cp;
59 BT_DBG("%p", conn);
61 conn->state = BT_CONNECT;
62 conn->out = 1;
64 conn->link_mode = HCI_LM_MASTER;
66 conn->attempt++;
68 conn->link_policy = hdev->link_policy;
70 memset(&cp, 0, sizeof(cp));
71 bacpy(&cp.bdaddr, &conn->dst);
72 cp.pscan_rep_mode = 0x02;
74 if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) {
75 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
76 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
77 cp.pscan_mode = ie->data.pscan_mode;
78 cp.clock_offset = ie->data.clock_offset |
79 cpu_to_le16(0x8000);
82 memcpy(conn->dev_class, ie->data.dev_class, 3);
83 conn->ssp_mode = ie->data.ssp_mode;
86 cp.pkt_type = cpu_to_le16(conn->pkt_type);
87 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
88 cp.role_switch = 0x01;
89 else
90 cp.role_switch = 0x00;
92 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
95 static void hci_acl_connect_cancel(struct hci_conn *conn)
97 struct hci_cp_create_conn_cancel cp;
99 BT_DBG("%p", conn);
101 if (conn->hdev->hci_ver < 2)
102 return;
104 bacpy(&cp.bdaddr, &conn->dst);
105 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
108 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
110 struct hci_cp_disconnect cp;
112 BT_DBG("%p", conn);
114 conn->state = BT_DISCONN;
116 cp.handle = cpu_to_le16(conn->handle);
117 cp.reason = reason;
118 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
121 void hci_add_sco(struct hci_conn *conn, __u16 handle)
123 struct hci_dev *hdev = conn->hdev;
124 struct hci_cp_add_sco cp;
126 BT_DBG("%p", conn);
128 conn->state = BT_CONNECT;
129 conn->out = 1;
131 cp.handle = cpu_to_le16(handle);
132 cp.pkt_type = cpu_to_le16(conn->pkt_type);
134 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
137 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
139 struct hci_dev *hdev = conn->hdev;
140 struct hci_cp_setup_sync_conn cp;
142 BT_DBG("%p", conn);
144 conn->state = BT_CONNECT;
145 conn->out = 1;
147 cp.handle = cpu_to_le16(handle);
148 cp.pkt_type = cpu_to_le16(conn->pkt_type);
150 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
151 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
152 cp.max_latency = cpu_to_le16(0xffff);
153 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
154 cp.retrans_effort = 0xff;
156 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
159 static void hci_conn_timeout(unsigned long arg)
161 struct hci_conn *conn = (void *) arg;
162 struct hci_dev *hdev = conn->hdev;
164 BT_DBG("conn %p state %d", conn, conn->state);
166 if (atomic_read(&conn->refcnt))
167 return;
169 hci_dev_lock(hdev);
171 switch (conn->state) {
172 case BT_CONNECT:
173 case BT_CONNECT2:
174 if (conn->type == ACL_LINK)
175 hci_acl_connect_cancel(conn);
176 else
177 hci_acl_disconn(conn, 0x13);
178 break;
179 case BT_CONFIG:
180 case BT_CONNECTED:
181 hci_acl_disconn(conn, 0x13);
182 break;
183 default:
184 conn->state = BT_CLOSED;
185 break;
188 hci_dev_unlock(hdev);
191 static void hci_conn_idle(unsigned long arg)
193 struct hci_conn *conn = (void *) arg;
195 BT_DBG("conn %p mode %d", conn, conn->mode);
197 hci_conn_enter_sniff_mode(conn);
200 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
202 struct hci_conn *conn;
204 BT_DBG("%s dst %s", hdev->name, batostr(dst));
206 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
207 if (!conn)
208 return NULL;
210 bacpy(&conn->dst, dst);
211 conn->hdev = hdev;
212 conn->type = type;
213 conn->mode = HCI_CM_ACTIVE;
214 conn->state = BT_OPEN;
216 conn->power_save = 1;
218 switch (type) {
219 case ACL_LINK:
220 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
221 break;
222 case SCO_LINK:
223 if (lmp_esco_capable(hdev))
224 conn->pkt_type = hdev->esco_type & SCO_ESCO_MASK;
225 else
226 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
227 break;
228 case ESCO_LINK:
229 conn->pkt_type = hdev->esco_type;
230 break;
233 skb_queue_head_init(&conn->data_q);
235 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
236 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
238 atomic_set(&conn->refcnt, 0);
240 hci_dev_hold(hdev);
242 tasklet_disable(&hdev->tx_task);
244 hci_conn_hash_add(hdev, conn);
245 if (hdev->notify)
246 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
248 tasklet_enable(&hdev->tx_task);
250 return conn;
253 int hci_conn_del(struct hci_conn *conn)
255 struct hci_dev *hdev = conn->hdev;
257 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
259 del_timer(&conn->idle_timer);
261 del_timer(&conn->disc_timer);
263 if (conn->type == ACL_LINK) {
264 struct hci_conn *sco = conn->link;
265 if (sco)
266 sco->link = NULL;
268 /* Unacked frames */
269 hdev->acl_cnt += conn->sent;
270 } else {
271 struct hci_conn *acl = conn->link;
272 if (acl) {
273 acl->link = NULL;
274 hci_conn_put(acl);
278 tasklet_disable(&hdev->tx_task);
280 hci_conn_hash_del(hdev, conn);
281 if (hdev->notify)
282 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
284 tasklet_enable(&hdev->tx_task);
286 skb_queue_purge(&conn->data_q);
288 return 0;
291 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
293 int use_src = bacmp(src, BDADDR_ANY);
294 struct hci_dev *hdev = NULL;
295 struct list_head *p;
297 BT_DBG("%s -> %s", batostr(src), batostr(dst));
299 read_lock_bh(&hci_dev_list_lock);
301 list_for_each(p, &hci_dev_list) {
302 struct hci_dev *d = list_entry(p, struct hci_dev, list);
304 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
305 continue;
307 /* Simple routing:
308 * No source address - find interface with bdaddr != dst
309 * Source address - find interface with bdaddr == src
312 if (use_src) {
313 if (!bacmp(&d->bdaddr, src)) {
314 hdev = d; break;
316 } else {
317 if (bacmp(&d->bdaddr, dst)) {
318 hdev = d; break;
323 if (hdev)
324 hdev = hci_dev_hold(hdev);
326 read_unlock_bh(&hci_dev_list_lock);
327 return hdev;
329 EXPORT_SYMBOL(hci_get_route);
331 /* Create SCO or ACL connection.
332 * Device _must_ be locked */
333 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst)
335 struct hci_conn *acl;
336 struct hci_conn *sco;
338 BT_DBG("%s dst %s", hdev->name, batostr(dst));
340 if (!(acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst))) {
341 if (!(acl = hci_conn_add(hdev, ACL_LINK, dst)))
342 return NULL;
345 hci_conn_hold(acl);
347 if (acl->state == BT_OPEN || acl->state == BT_CLOSED)
348 hci_acl_connect(acl);
350 if (type == ACL_LINK)
351 return acl;
353 if (!(sco = hci_conn_hash_lookup_ba(hdev, type, dst))) {
354 if (!(sco = hci_conn_add(hdev, type, dst))) {
355 hci_conn_put(acl);
356 return NULL;
360 acl->link = sco;
361 sco->link = acl;
363 hci_conn_hold(sco);
365 if (acl->state == BT_CONNECTED &&
366 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
367 if (lmp_esco_capable(hdev))
368 hci_setup_sync(sco, acl->handle);
369 else
370 hci_add_sco(sco, acl->handle);
373 return sco;
375 EXPORT_SYMBOL(hci_connect);
377 /* Authenticate remote device */
378 int hci_conn_auth(struct hci_conn *conn)
380 BT_DBG("conn %p", conn);
382 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0) {
383 if (!(conn->auth_type & 0x01)) {
384 conn->auth_type = HCI_AT_GENERAL_BONDING_MITM;
385 conn->link_mode &= ~HCI_LM_AUTH;
389 if (conn->link_mode & HCI_LM_AUTH)
390 return 1;
392 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
393 struct hci_cp_auth_requested cp;
394 cp.handle = cpu_to_le16(conn->handle);
395 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
396 sizeof(cp), &cp);
398 return 0;
400 EXPORT_SYMBOL(hci_conn_auth);
402 /* Enable encryption */
403 int hci_conn_encrypt(struct hci_conn *conn)
405 BT_DBG("conn %p", conn);
407 if (conn->link_mode & HCI_LM_ENCRYPT)
408 return hci_conn_auth(conn);
410 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
411 return 0;
413 if (hci_conn_auth(conn)) {
414 struct hci_cp_set_conn_encrypt cp;
415 cp.handle = cpu_to_le16(conn->handle);
416 cp.encrypt = 1;
417 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
418 sizeof(cp), &cp);
420 return 0;
422 EXPORT_SYMBOL(hci_conn_encrypt);
424 /* Change link key */
425 int hci_conn_change_link_key(struct hci_conn *conn)
427 BT_DBG("conn %p", conn);
429 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
430 struct hci_cp_change_conn_link_key cp;
431 cp.handle = cpu_to_le16(conn->handle);
432 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
433 sizeof(cp), &cp);
435 return 0;
437 EXPORT_SYMBOL(hci_conn_change_link_key);
439 /* Switch role */
440 int hci_conn_switch_role(struct hci_conn *conn, uint8_t role)
442 BT_DBG("conn %p", conn);
444 if (!role && conn->link_mode & HCI_LM_MASTER)
445 return 1;
447 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
448 struct hci_cp_switch_role cp;
449 bacpy(&cp.bdaddr, &conn->dst);
450 cp.role = role;
451 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
453 return 0;
455 EXPORT_SYMBOL(hci_conn_switch_role);
457 /* Enter active mode */
458 void hci_conn_enter_active_mode(struct hci_conn *conn)
460 struct hci_dev *hdev = conn->hdev;
462 BT_DBG("conn %p mode %d", conn, conn->mode);
464 if (test_bit(HCI_RAW, &hdev->flags))
465 return;
467 if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
468 goto timer;
470 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
471 struct hci_cp_exit_sniff_mode cp;
472 cp.handle = cpu_to_le16(conn->handle);
473 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
476 timer:
477 if (hdev->idle_timeout > 0)
478 mod_timer(&conn->idle_timer,
479 jiffies + msecs_to_jiffies(hdev->idle_timeout));
482 /* Enter sniff mode */
483 void hci_conn_enter_sniff_mode(struct hci_conn *conn)
485 struct hci_dev *hdev = conn->hdev;
487 BT_DBG("conn %p mode %d", conn, conn->mode);
489 if (test_bit(HCI_RAW, &hdev->flags))
490 return;
492 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
493 return;
495 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
496 return;
498 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
499 struct hci_cp_sniff_subrate cp;
500 cp.handle = cpu_to_le16(conn->handle);
501 cp.max_latency = cpu_to_le16(0);
502 cp.min_remote_timeout = cpu_to_le16(0);
503 cp.min_local_timeout = cpu_to_le16(0);
504 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
507 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
508 struct hci_cp_sniff_mode cp;
509 cp.handle = cpu_to_le16(conn->handle);
510 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
511 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
512 cp.attempt = cpu_to_le16(4);
513 cp.timeout = cpu_to_le16(1);
514 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
518 /* Drop all connection on the device */
519 void hci_conn_hash_flush(struct hci_dev *hdev)
521 struct hci_conn_hash *h = &hdev->conn_hash;
522 struct list_head *p;
524 BT_DBG("hdev %s", hdev->name);
526 p = h->list.next;
527 while (p != &h->list) {
528 struct hci_conn *c;
530 c = list_entry(p, struct hci_conn, list);
531 p = p->next;
533 c->state = BT_CLOSED;
535 hci_conn_del_sysfs(c);
537 hci_proto_disconn_ind(c, 0x16);
538 hci_conn_del(c);
542 /* Check pending connect attempts */
543 void hci_conn_check_pending(struct hci_dev *hdev)
545 struct hci_conn *conn;
547 BT_DBG("hdev %s", hdev->name);
549 hci_dev_lock(hdev);
551 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
552 if (conn)
553 hci_acl_connect(conn);
555 hci_dev_unlock(hdev);
558 int hci_get_conn_list(void __user *arg)
560 struct hci_conn_list_req req, *cl;
561 struct hci_conn_info *ci;
562 struct hci_dev *hdev;
563 struct list_head *p;
564 int n = 0, size, err;
566 if (copy_from_user(&req, arg, sizeof(req)))
567 return -EFAULT;
569 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
570 return -EINVAL;
572 size = sizeof(req) + req.conn_num * sizeof(*ci);
574 if (!(cl = kmalloc(size, GFP_KERNEL)))
575 return -ENOMEM;
577 if (!(hdev = hci_dev_get(req.dev_id))) {
578 kfree(cl);
579 return -ENODEV;
582 ci = cl->conn_info;
584 hci_dev_lock_bh(hdev);
585 list_for_each(p, &hdev->conn_hash.list) {
586 register struct hci_conn *c;
587 c = list_entry(p, struct hci_conn, list);
589 bacpy(&(ci + n)->bdaddr, &c->dst);
590 (ci + n)->handle = c->handle;
591 (ci + n)->type = c->type;
592 (ci + n)->out = c->out;
593 (ci + n)->state = c->state;
594 (ci + n)->link_mode = c->link_mode;
595 if (++n >= req.conn_num)
596 break;
598 hci_dev_unlock_bh(hdev);
600 cl->dev_id = hdev->id;
601 cl->conn_num = n;
602 size = sizeof(req) + n * sizeof(*ci);
604 hci_dev_put(hdev);
606 err = copy_to_user(arg, cl, size);
607 kfree(cl);
609 return err ? -EFAULT : 0;
612 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
614 struct hci_conn_info_req req;
615 struct hci_conn_info ci;
616 struct hci_conn *conn;
617 char __user *ptr = arg + sizeof(req);
619 if (copy_from_user(&req, arg, sizeof(req)))
620 return -EFAULT;
622 hci_dev_lock_bh(hdev);
623 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
624 if (conn) {
625 bacpy(&ci.bdaddr, &conn->dst);
626 ci.handle = conn->handle;
627 ci.type = conn->type;
628 ci.out = conn->out;
629 ci.state = conn->state;
630 ci.link_mode = conn->link_mode;
632 hci_dev_unlock_bh(hdev);
634 if (!conn)
635 return -ENOENT;
637 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
640 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
642 struct hci_auth_info_req req;
643 struct hci_conn *conn;
645 if (copy_from_user(&req, arg, sizeof(req)))
646 return -EFAULT;
648 hci_dev_lock_bh(hdev);
649 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
650 if (conn)
651 req.type = conn->auth_type;
652 hci_dev_unlock_bh(hdev);
654 if (!conn)
655 return -ENOENT;
657 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;