ASoC: fsl_dma: Pass the proper device for dma mapping routines
[linux-2.6/mini2440.git] / net / bluetooth / hci_conn.c
bloba4a789f24c8d4668d7b54997654f8a516d62898a
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/unaligned.h>
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
48 void hci_acl_connect(struct hci_conn *conn)
50 struct hci_dev *hdev = conn->hdev;
51 struct inquiry_entry *ie;
52 struct hci_cp_create_conn cp;
54 BT_DBG("%p", conn);
56 conn->state = BT_CONNECT;
57 conn->out = 1;
59 conn->link_mode = HCI_LM_MASTER;
61 conn->attempt++;
63 conn->link_policy = hdev->link_policy;
65 memset(&cp, 0, sizeof(cp));
66 bacpy(&cp.bdaddr, &conn->dst);
67 cp.pscan_rep_mode = 0x02;
69 if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) {
70 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
71 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
72 cp.pscan_mode = ie->data.pscan_mode;
73 cp.clock_offset = ie->data.clock_offset |
74 cpu_to_le16(0x8000);
77 memcpy(conn->dev_class, ie->data.dev_class, 3);
78 conn->ssp_mode = ie->data.ssp_mode;
81 cp.pkt_type = cpu_to_le16(conn->pkt_type);
82 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
83 cp.role_switch = 0x01;
84 else
85 cp.role_switch = 0x00;
87 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
90 static void hci_acl_connect_cancel(struct hci_conn *conn)
92 struct hci_cp_create_conn_cancel cp;
94 BT_DBG("%p", conn);
96 if (conn->hdev->hci_ver < 2)
97 return;
99 bacpy(&cp.bdaddr, &conn->dst);
100 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
103 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
105 struct hci_cp_disconnect cp;
107 BT_DBG("%p", conn);
109 conn->state = BT_DISCONN;
111 cp.handle = cpu_to_le16(conn->handle);
112 cp.reason = reason;
113 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
116 void hci_add_sco(struct hci_conn *conn, __u16 handle)
118 struct hci_dev *hdev = conn->hdev;
119 struct hci_cp_add_sco cp;
121 BT_DBG("%p", conn);
123 conn->state = BT_CONNECT;
124 conn->out = 1;
126 cp.handle = cpu_to_le16(handle);
127 cp.pkt_type = cpu_to_le16(conn->pkt_type);
129 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
132 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
134 struct hci_dev *hdev = conn->hdev;
135 struct hci_cp_setup_sync_conn cp;
137 BT_DBG("%p", conn);
139 conn->state = BT_CONNECT;
140 conn->out = 1;
142 cp.handle = cpu_to_le16(handle);
143 cp.pkt_type = cpu_to_le16(conn->pkt_type);
145 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
146 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
147 cp.max_latency = cpu_to_le16(0xffff);
148 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
149 cp.retrans_effort = 0xff;
151 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
154 static void hci_conn_timeout(unsigned long arg)
156 struct hci_conn *conn = (void *) arg;
157 struct hci_dev *hdev = conn->hdev;
159 BT_DBG("conn %p state %d", conn, conn->state);
161 if (atomic_read(&conn->refcnt))
162 return;
164 hci_dev_lock(hdev);
166 switch (conn->state) {
167 case BT_CONNECT:
168 case BT_CONNECT2:
169 if (conn->type == ACL_LINK)
170 hci_acl_connect_cancel(conn);
171 else
172 hci_acl_disconn(conn, 0x13);
173 break;
174 case BT_CONFIG:
175 case BT_CONNECTED:
176 hci_acl_disconn(conn, 0x13);
177 break;
178 default:
179 conn->state = BT_CLOSED;
180 break;
183 hci_dev_unlock(hdev);
186 static void hci_conn_idle(unsigned long arg)
188 struct hci_conn *conn = (void *) arg;
190 BT_DBG("conn %p mode %d", conn, conn->mode);
192 hci_conn_enter_sniff_mode(conn);
195 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
197 struct hci_conn *conn;
199 BT_DBG("%s dst %s", hdev->name, batostr(dst));
201 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
202 if (!conn)
203 return NULL;
205 bacpy(&conn->dst, dst);
206 conn->hdev = hdev;
207 conn->type = type;
208 conn->mode = HCI_CM_ACTIVE;
209 conn->state = BT_OPEN;
211 conn->power_save = 1;
213 switch (type) {
214 case ACL_LINK:
215 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
216 break;
217 case SCO_LINK:
218 if (lmp_esco_capable(hdev))
219 conn->pkt_type = hdev->esco_type & SCO_ESCO_MASK;
220 else
221 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
222 break;
223 case ESCO_LINK:
224 conn->pkt_type = hdev->esco_type;
225 break;
228 skb_queue_head_init(&conn->data_q);
230 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
231 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
233 atomic_set(&conn->refcnt, 0);
235 hci_dev_hold(hdev);
237 tasklet_disable(&hdev->tx_task);
239 hci_conn_hash_add(hdev, conn);
240 if (hdev->notify)
241 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
243 tasklet_enable(&hdev->tx_task);
245 return conn;
248 int hci_conn_del(struct hci_conn *conn)
250 struct hci_dev *hdev = conn->hdev;
252 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
254 del_timer(&conn->idle_timer);
256 del_timer(&conn->disc_timer);
258 if (conn->type == ACL_LINK) {
259 struct hci_conn *sco = conn->link;
260 if (sco)
261 sco->link = NULL;
263 /* Unacked frames */
264 hdev->acl_cnt += conn->sent;
265 } else {
266 struct hci_conn *acl = conn->link;
267 if (acl) {
268 acl->link = NULL;
269 hci_conn_put(acl);
273 tasklet_disable(&hdev->tx_task);
275 hci_conn_hash_del(hdev, conn);
276 if (hdev->notify)
277 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
279 tasklet_enable(&hdev->tx_task);
281 skb_queue_purge(&conn->data_q);
283 return 0;
286 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
288 int use_src = bacmp(src, BDADDR_ANY);
289 struct hci_dev *hdev = NULL;
290 struct list_head *p;
292 BT_DBG("%s -> %s", batostr(src), batostr(dst));
294 read_lock_bh(&hci_dev_list_lock);
296 list_for_each(p, &hci_dev_list) {
297 struct hci_dev *d = list_entry(p, struct hci_dev, list);
299 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
300 continue;
302 /* Simple routing:
303 * No source address - find interface with bdaddr != dst
304 * Source address - find interface with bdaddr == src
307 if (use_src) {
308 if (!bacmp(&d->bdaddr, src)) {
309 hdev = d; break;
311 } else {
312 if (bacmp(&d->bdaddr, dst)) {
313 hdev = d; break;
318 if (hdev)
319 hdev = hci_dev_hold(hdev);
321 read_unlock_bh(&hci_dev_list_lock);
322 return hdev;
324 EXPORT_SYMBOL(hci_get_route);
326 /* Create SCO or ACL connection.
327 * Device _must_ be locked */
328 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 auth_type)
330 struct hci_conn *acl;
331 struct hci_conn *sco;
333 BT_DBG("%s dst %s", hdev->name, batostr(dst));
335 if (!(acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst))) {
336 if (!(acl = hci_conn_add(hdev, ACL_LINK, dst)))
337 return NULL;
340 hci_conn_hold(acl);
342 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
343 acl->auth_type = auth_type;
344 hci_acl_connect(acl);
347 if (type == ACL_LINK)
348 return acl;
350 if (!(sco = hci_conn_hash_lookup_ba(hdev, type, dst))) {
351 if (!(sco = hci_conn_add(hdev, type, dst))) {
352 hci_conn_put(acl);
353 return NULL;
357 acl->link = sco;
358 sco->link = acl;
360 hci_conn_hold(sco);
362 if (acl->state == BT_CONNECTED &&
363 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
364 if (lmp_esco_capable(hdev))
365 hci_setup_sync(sco, acl->handle);
366 else
367 hci_add_sco(sco, acl->handle);
370 return sco;
372 EXPORT_SYMBOL(hci_connect);
374 /* Check link security requirement */
375 int hci_conn_check_link_mode(struct hci_conn *conn)
377 BT_DBG("conn %p", conn);
379 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
380 !(conn->link_mode & HCI_LM_ENCRYPT))
381 return 0;
383 return 1;
385 EXPORT_SYMBOL(hci_conn_check_link_mode);
387 /* Authenticate remote device */
388 int hci_conn_auth(struct hci_conn *conn)
390 BT_DBG("conn %p", conn);
392 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0) {
393 if (!(conn->auth_type & 0x01)) {
394 conn->auth_type |= 0x01;
395 conn->link_mode &= ~HCI_LM_AUTH;
399 if (conn->link_mode & HCI_LM_AUTH)
400 return 1;
402 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
403 struct hci_cp_auth_requested cp;
404 cp.handle = cpu_to_le16(conn->handle);
405 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
406 sizeof(cp), &cp);
408 return 0;
410 EXPORT_SYMBOL(hci_conn_auth);
412 /* Enable encryption */
413 int hci_conn_encrypt(struct hci_conn *conn)
415 BT_DBG("conn %p", conn);
417 if (conn->link_mode & HCI_LM_ENCRYPT)
418 return hci_conn_auth(conn);
420 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
421 return 0;
423 if (hci_conn_auth(conn)) {
424 struct hci_cp_set_conn_encrypt cp;
425 cp.handle = cpu_to_le16(conn->handle);
426 cp.encrypt = 1;
427 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
428 sizeof(cp), &cp);
430 return 0;
432 EXPORT_SYMBOL(hci_conn_encrypt);
434 /* Change link key */
435 int hci_conn_change_link_key(struct hci_conn *conn)
437 BT_DBG("conn %p", conn);
439 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
440 struct hci_cp_change_conn_link_key cp;
441 cp.handle = cpu_to_le16(conn->handle);
442 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
443 sizeof(cp), &cp);
445 return 0;
447 EXPORT_SYMBOL(hci_conn_change_link_key);
449 /* Switch role */
450 int hci_conn_switch_role(struct hci_conn *conn, uint8_t role)
452 BT_DBG("conn %p", conn);
454 if (!role && conn->link_mode & HCI_LM_MASTER)
455 return 1;
457 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
458 struct hci_cp_switch_role cp;
459 bacpy(&cp.bdaddr, &conn->dst);
460 cp.role = role;
461 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
463 return 0;
465 EXPORT_SYMBOL(hci_conn_switch_role);
467 /* Enter active mode */
468 void hci_conn_enter_active_mode(struct hci_conn *conn)
470 struct hci_dev *hdev = conn->hdev;
472 BT_DBG("conn %p mode %d", conn, conn->mode);
474 if (test_bit(HCI_RAW, &hdev->flags))
475 return;
477 if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
478 goto timer;
480 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
481 struct hci_cp_exit_sniff_mode cp;
482 cp.handle = cpu_to_le16(conn->handle);
483 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
486 timer:
487 if (hdev->idle_timeout > 0)
488 mod_timer(&conn->idle_timer,
489 jiffies + msecs_to_jiffies(hdev->idle_timeout));
492 /* Enter sniff mode */
493 void hci_conn_enter_sniff_mode(struct hci_conn *conn)
495 struct hci_dev *hdev = conn->hdev;
497 BT_DBG("conn %p mode %d", conn, conn->mode);
499 if (test_bit(HCI_RAW, &hdev->flags))
500 return;
502 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
503 return;
505 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
506 return;
508 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
509 struct hci_cp_sniff_subrate cp;
510 cp.handle = cpu_to_le16(conn->handle);
511 cp.max_latency = cpu_to_le16(0);
512 cp.min_remote_timeout = cpu_to_le16(0);
513 cp.min_local_timeout = cpu_to_le16(0);
514 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
517 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
518 struct hci_cp_sniff_mode cp;
519 cp.handle = cpu_to_le16(conn->handle);
520 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
521 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
522 cp.attempt = cpu_to_le16(4);
523 cp.timeout = cpu_to_le16(1);
524 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
528 /* Drop all connection on the device */
529 void hci_conn_hash_flush(struct hci_dev *hdev)
531 struct hci_conn_hash *h = &hdev->conn_hash;
532 struct list_head *p;
534 BT_DBG("hdev %s", hdev->name);
536 p = h->list.next;
537 while (p != &h->list) {
538 struct hci_conn *c;
540 c = list_entry(p, struct hci_conn, list);
541 p = p->next;
543 c->state = BT_CLOSED;
545 hci_conn_del_sysfs(c);
547 hci_proto_disconn_ind(c, 0x16);
548 hci_conn_del(c);
552 /* Check pending connect attempts */
553 void hci_conn_check_pending(struct hci_dev *hdev)
555 struct hci_conn *conn;
557 BT_DBG("hdev %s", hdev->name);
559 hci_dev_lock(hdev);
561 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
562 if (conn)
563 hci_acl_connect(conn);
565 hci_dev_unlock(hdev);
568 int hci_get_conn_list(void __user *arg)
570 struct hci_conn_list_req req, *cl;
571 struct hci_conn_info *ci;
572 struct hci_dev *hdev;
573 struct list_head *p;
574 int n = 0, size, err;
576 if (copy_from_user(&req, arg, sizeof(req)))
577 return -EFAULT;
579 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
580 return -EINVAL;
582 size = sizeof(req) + req.conn_num * sizeof(*ci);
584 if (!(cl = kmalloc(size, GFP_KERNEL)))
585 return -ENOMEM;
587 if (!(hdev = hci_dev_get(req.dev_id))) {
588 kfree(cl);
589 return -ENODEV;
592 ci = cl->conn_info;
594 hci_dev_lock_bh(hdev);
595 list_for_each(p, &hdev->conn_hash.list) {
596 register struct hci_conn *c;
597 c = list_entry(p, struct hci_conn, list);
599 bacpy(&(ci + n)->bdaddr, &c->dst);
600 (ci + n)->handle = c->handle;
601 (ci + n)->type = c->type;
602 (ci + n)->out = c->out;
603 (ci + n)->state = c->state;
604 (ci + n)->link_mode = c->link_mode;
605 if (++n >= req.conn_num)
606 break;
608 hci_dev_unlock_bh(hdev);
610 cl->dev_id = hdev->id;
611 cl->conn_num = n;
612 size = sizeof(req) + n * sizeof(*ci);
614 hci_dev_put(hdev);
616 err = copy_to_user(arg, cl, size);
617 kfree(cl);
619 return err ? -EFAULT : 0;
622 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
624 struct hci_conn_info_req req;
625 struct hci_conn_info ci;
626 struct hci_conn *conn;
627 char __user *ptr = arg + sizeof(req);
629 if (copy_from_user(&req, arg, sizeof(req)))
630 return -EFAULT;
632 hci_dev_lock_bh(hdev);
633 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
634 if (conn) {
635 bacpy(&ci.bdaddr, &conn->dst);
636 ci.handle = conn->handle;
637 ci.type = conn->type;
638 ci.out = conn->out;
639 ci.state = conn->state;
640 ci.link_mode = conn->link_mode;
642 hci_dev_unlock_bh(hdev);
644 if (!conn)
645 return -ENOENT;
647 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
650 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
652 struct hci_auth_info_req req;
653 struct hci_conn *conn;
655 if (copy_from_user(&req, arg, sizeof(req)))
656 return -EFAULT;
658 hci_dev_lock_bh(hdev);
659 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
660 if (conn)
661 req.type = conn->auth_type;
662 hci_dev_unlock_bh(hdev);
664 if (!conn)
665 return -ENOENT;
667 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;