Fix extraneous '&' in recent NFS client cleanup
[linux-2.6/libata-dev.git] / net / bluetooth / hci_conn.c
blob90e3a285a17eaf9a4bde198879748d8a4da680a7
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/poll.h>
35 #include <linux/fcntl.h>
36 #include <linux/init.h>
37 #include <linux/skbuff.h>
38 #include <linux/interrupt.h>
39 #include <linux/notifier.h>
40 #include <net/sock.h>
42 #include <asm/system.h>
43 #include <asm/uaccess.h>
44 #include <asm/unaligned.h>
46 #include <net/bluetooth/bluetooth.h>
47 #include <net/bluetooth/hci_core.h>
49 #ifndef CONFIG_BT_HCI_CORE_DEBUG
50 #undef BT_DBG
51 #define BT_DBG(D...)
52 #endif
54 static void hci_acl_connect(struct hci_conn *conn)
56 struct hci_dev *hdev = conn->hdev;
57 struct inquiry_entry *ie;
58 struct hci_cp_create_conn cp;
60 BT_DBG("%p", conn);
62 conn->state = BT_CONNECT;
63 conn->out = 1;
64 conn->link_mode = HCI_LM_MASTER;
66 memset(&cp, 0, sizeof(cp));
67 bacpy(&cp.bdaddr, &conn->dst);
68 cp.pscan_rep_mode = 0x02;
70 if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst)) &&
71 inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
72 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
73 cp.pscan_mode = ie->data.pscan_mode;
74 cp.clock_offset = ie->data.clock_offset | __cpu_to_le16(0x8000);
75 memcpy(conn->dev_class, ie->data.dev_class, 3);
78 cp.pkt_type = __cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK);
79 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
80 cp.role_switch = 0x01;
81 else
82 cp.role_switch = 0x00;
84 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_CREATE_CONN, sizeof(cp), &cp);
87 static void hci_acl_connect_cancel(struct hci_conn *conn)
89 struct hci_cp_create_conn_cancel cp;
91 BT_DBG("%p", conn);
93 if (conn->hdev->hci_ver < 2)
94 return;
96 bacpy(&cp.bdaddr, &conn->dst);
97 hci_send_cmd(conn->hdev, OGF_LINK_CTL,
98 OCF_CREATE_CONN_CANCEL, sizeof(cp), &cp);
101 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
103 struct hci_cp_disconnect cp;
105 BT_DBG("%p", conn);
107 conn->state = BT_DISCONN;
109 cp.handle = __cpu_to_le16(conn->handle);
110 cp.reason = reason;
111 hci_send_cmd(conn->hdev, OGF_LINK_CTL,
112 OCF_DISCONNECT, sizeof(cp), &cp);
115 void hci_add_sco(struct hci_conn *conn, __u16 handle)
117 struct hci_dev *hdev = conn->hdev;
118 struct hci_cp_add_sco cp;
120 BT_DBG("%p", conn);
122 conn->state = BT_CONNECT;
123 conn->out = 1;
125 cp.pkt_type = __cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK);
126 cp.handle = __cpu_to_le16(handle);
128 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_ADD_SCO, sizeof(cp), &cp);
131 static void hci_conn_timeout(unsigned long arg)
133 struct hci_conn *conn = (void *) arg;
134 struct hci_dev *hdev = conn->hdev;
136 BT_DBG("conn %p state %d", conn, conn->state);
138 if (atomic_read(&conn->refcnt))
139 return;
141 hci_dev_lock(hdev);
143 switch (conn->state) {
144 case BT_CONNECT:
145 hci_acl_connect_cancel(conn);
146 break;
147 case BT_CONNECTED:
148 hci_acl_disconn(conn, 0x13);
149 break;
150 default:
151 conn->state = BT_CLOSED;
152 break;
155 hci_dev_unlock(hdev);
158 static void hci_conn_idle(unsigned long arg)
160 struct hci_conn *conn = (void *) arg;
162 BT_DBG("conn %p mode %d", conn, conn->mode);
164 hci_conn_enter_sniff_mode(conn);
167 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
169 struct hci_conn *conn;
171 BT_DBG("%s dst %s", hdev->name, batostr(dst));
173 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
174 if (!conn)
175 return NULL;
177 bacpy(&conn->dst, dst);
178 conn->hdev = hdev;
179 conn->type = type;
180 conn->mode = HCI_CM_ACTIVE;
181 conn->state = BT_OPEN;
183 conn->power_save = 1;
185 skb_queue_head_init(&conn->data_q);
187 init_timer(&conn->disc_timer);
188 conn->disc_timer.function = hci_conn_timeout;
189 conn->disc_timer.data = (unsigned long) conn;
191 init_timer(&conn->idle_timer);
192 conn->idle_timer.function = hci_conn_idle;
193 conn->idle_timer.data = (unsigned long) conn;
195 atomic_set(&conn->refcnt, 0);
197 hci_dev_hold(hdev);
199 tasklet_disable(&hdev->tx_task);
201 hci_conn_hash_add(hdev, conn);
202 if (hdev->notify)
203 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
205 hci_conn_add_sysfs(conn);
207 tasklet_enable(&hdev->tx_task);
209 return conn;
212 int hci_conn_del(struct hci_conn *conn)
214 struct hci_dev *hdev = conn->hdev;
216 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
218 del_timer(&conn->idle_timer);
220 del_timer(&conn->disc_timer);
222 if (conn->type == SCO_LINK) {
223 struct hci_conn *acl = conn->link;
224 if (acl) {
225 acl->link = NULL;
226 hci_conn_put(acl);
228 } else {
229 struct hci_conn *sco = conn->link;
230 if (sco)
231 sco->link = NULL;
233 /* Unacked frames */
234 hdev->acl_cnt += conn->sent;
237 tasklet_disable(&hdev->tx_task);
239 hci_conn_del_sysfs(conn);
241 hci_conn_hash_del(hdev, conn);
242 if (hdev->notify)
243 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
245 tasklet_enable(&hdev->tx_task);
247 skb_queue_purge(&conn->data_q);
249 hci_dev_put(hdev);
251 /* will free via device release */
252 put_device(&conn->dev);
254 return 0;
257 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
259 int use_src = bacmp(src, BDADDR_ANY);
260 struct hci_dev *hdev = NULL;
261 struct list_head *p;
263 BT_DBG("%s -> %s", batostr(src), batostr(dst));
265 read_lock_bh(&hci_dev_list_lock);
267 list_for_each(p, &hci_dev_list) {
268 struct hci_dev *d = list_entry(p, struct hci_dev, list);
270 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
271 continue;
273 /* Simple routing:
274 * No source address - find interface with bdaddr != dst
275 * Source address - find interface with bdaddr == src
278 if (use_src) {
279 if (!bacmp(&d->bdaddr, src)) {
280 hdev = d; break;
282 } else {
283 if (bacmp(&d->bdaddr, dst)) {
284 hdev = d; break;
289 if (hdev)
290 hdev = hci_dev_hold(hdev);
292 read_unlock_bh(&hci_dev_list_lock);
293 return hdev;
295 EXPORT_SYMBOL(hci_get_route);
297 /* Create SCO or ACL connection.
298 * Device _must_ be locked */
299 struct hci_conn * hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst)
301 struct hci_conn *acl;
303 BT_DBG("%s dst %s", hdev->name, batostr(dst));
305 if (!(acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst))) {
306 if (!(acl = hci_conn_add(hdev, ACL_LINK, dst)))
307 return NULL;
310 hci_conn_hold(acl);
312 if (acl->state == BT_OPEN || acl->state == BT_CLOSED)
313 hci_acl_connect(acl);
315 if (type == SCO_LINK) {
316 struct hci_conn *sco;
318 if (!(sco = hci_conn_hash_lookup_ba(hdev, SCO_LINK, dst))) {
319 if (!(sco = hci_conn_add(hdev, SCO_LINK, dst))) {
320 hci_conn_put(acl);
321 return NULL;
324 acl->link = sco;
325 sco->link = acl;
327 hci_conn_hold(sco);
329 if (acl->state == BT_CONNECTED &&
330 (sco->state == BT_OPEN || sco->state == BT_CLOSED))
331 hci_add_sco(sco, acl->handle);
333 return sco;
334 } else {
335 return acl;
338 EXPORT_SYMBOL(hci_connect);
340 /* Authenticate remote device */
341 int hci_conn_auth(struct hci_conn *conn)
343 BT_DBG("conn %p", conn);
345 if (conn->link_mode & HCI_LM_AUTH)
346 return 1;
348 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
349 struct hci_cp_auth_requested cp;
350 cp.handle = __cpu_to_le16(conn->handle);
351 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_AUTH_REQUESTED, sizeof(cp), &cp);
353 return 0;
355 EXPORT_SYMBOL(hci_conn_auth);
357 /* Enable encryption */
358 int hci_conn_encrypt(struct hci_conn *conn)
360 BT_DBG("conn %p", conn);
362 if (conn->link_mode & HCI_LM_ENCRYPT)
363 return 1;
365 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
366 return 0;
368 if (hci_conn_auth(conn)) {
369 struct hci_cp_set_conn_encrypt cp;
370 cp.handle = __cpu_to_le16(conn->handle);
371 cp.encrypt = 1;
372 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp);
374 return 0;
376 EXPORT_SYMBOL(hci_conn_encrypt);
378 /* Change link key */
379 int hci_conn_change_link_key(struct hci_conn *conn)
381 BT_DBG("conn %p", conn);
383 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
384 struct hci_cp_change_conn_link_key cp;
385 cp.handle = __cpu_to_le16(conn->handle);
386 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_CHANGE_CONN_LINK_KEY, sizeof(cp), &cp);
388 return 0;
390 EXPORT_SYMBOL(hci_conn_change_link_key);
392 /* Switch role */
393 int hci_conn_switch_role(struct hci_conn *conn, uint8_t role)
395 BT_DBG("conn %p", conn);
397 if (!role && conn->link_mode & HCI_LM_MASTER)
398 return 1;
400 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
401 struct hci_cp_switch_role cp;
402 bacpy(&cp.bdaddr, &conn->dst);
403 cp.role = role;
404 hci_send_cmd(conn->hdev, OGF_LINK_POLICY, OCF_SWITCH_ROLE, sizeof(cp), &cp);
406 return 0;
408 EXPORT_SYMBOL(hci_conn_switch_role);
410 /* Enter active mode */
411 void hci_conn_enter_active_mode(struct hci_conn *conn)
413 struct hci_dev *hdev = conn->hdev;
415 BT_DBG("conn %p mode %d", conn, conn->mode);
417 if (test_bit(HCI_RAW, &hdev->flags))
418 return;
420 if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
421 goto timer;
423 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
424 struct hci_cp_exit_sniff_mode cp;
425 cp.handle = __cpu_to_le16(conn->handle);
426 hci_send_cmd(hdev, OGF_LINK_POLICY,
427 OCF_EXIT_SNIFF_MODE, sizeof(cp), &cp);
430 timer:
431 if (hdev->idle_timeout > 0)
432 mod_timer(&conn->idle_timer,
433 jiffies + msecs_to_jiffies(hdev->idle_timeout));
436 /* Enter sniff mode */
437 void hci_conn_enter_sniff_mode(struct hci_conn *conn)
439 struct hci_dev *hdev = conn->hdev;
441 BT_DBG("conn %p mode %d", conn, conn->mode);
443 if (test_bit(HCI_RAW, &hdev->flags))
444 return;
446 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
447 return;
449 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
450 return;
452 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
453 struct hci_cp_sniff_subrate cp;
454 cp.handle = __cpu_to_le16(conn->handle);
455 cp.max_latency = __constant_cpu_to_le16(0);
456 cp.min_remote_timeout = __constant_cpu_to_le16(0);
457 cp.min_local_timeout = __constant_cpu_to_le16(0);
458 hci_send_cmd(hdev, OGF_LINK_POLICY,
459 OCF_SNIFF_SUBRATE, sizeof(cp), &cp);
462 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
463 struct hci_cp_sniff_mode cp;
464 cp.handle = __cpu_to_le16(conn->handle);
465 cp.max_interval = __cpu_to_le16(hdev->sniff_max_interval);
466 cp.min_interval = __cpu_to_le16(hdev->sniff_min_interval);
467 cp.attempt = __constant_cpu_to_le16(4);
468 cp.timeout = __constant_cpu_to_le16(1);
469 hci_send_cmd(hdev, OGF_LINK_POLICY,
470 OCF_SNIFF_MODE, sizeof(cp), &cp);
474 /* Drop all connection on the device */
475 void hci_conn_hash_flush(struct hci_dev *hdev)
477 struct hci_conn_hash *h = &hdev->conn_hash;
478 struct list_head *p;
480 BT_DBG("hdev %s", hdev->name);
482 p = h->list.next;
483 while (p != &h->list) {
484 struct hci_conn *c;
486 c = list_entry(p, struct hci_conn, list);
487 p = p->next;
489 c->state = BT_CLOSED;
491 hci_proto_disconn_ind(c, 0x16);
492 hci_conn_del(c);
496 int hci_get_conn_list(void __user *arg)
498 struct hci_conn_list_req req, *cl;
499 struct hci_conn_info *ci;
500 struct hci_dev *hdev;
501 struct list_head *p;
502 int n = 0, size, err;
504 if (copy_from_user(&req, arg, sizeof(req)))
505 return -EFAULT;
507 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
508 return -EINVAL;
510 size = sizeof(req) + req.conn_num * sizeof(*ci);
512 if (!(cl = kmalloc(size, GFP_KERNEL)))
513 return -ENOMEM;
515 if (!(hdev = hci_dev_get(req.dev_id))) {
516 kfree(cl);
517 return -ENODEV;
520 ci = cl->conn_info;
522 hci_dev_lock_bh(hdev);
523 list_for_each(p, &hdev->conn_hash.list) {
524 register struct hci_conn *c;
525 c = list_entry(p, struct hci_conn, list);
527 bacpy(&(ci + n)->bdaddr, &c->dst);
528 (ci + n)->handle = c->handle;
529 (ci + n)->type = c->type;
530 (ci + n)->out = c->out;
531 (ci + n)->state = c->state;
532 (ci + n)->link_mode = c->link_mode;
533 if (++n >= req.conn_num)
534 break;
536 hci_dev_unlock_bh(hdev);
538 cl->dev_id = hdev->id;
539 cl->conn_num = n;
540 size = sizeof(req) + n * sizeof(*ci);
542 hci_dev_put(hdev);
544 err = copy_to_user(arg, cl, size);
545 kfree(cl);
547 return err ? -EFAULT : 0;
550 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
552 struct hci_conn_info_req req;
553 struct hci_conn_info ci;
554 struct hci_conn *conn;
555 char __user *ptr = arg + sizeof(req);
557 if (copy_from_user(&req, arg, sizeof(req)))
558 return -EFAULT;
560 hci_dev_lock_bh(hdev);
561 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
562 if (conn) {
563 bacpy(&ci.bdaddr, &conn->dst);
564 ci.handle = conn->handle;
565 ci.type = conn->type;
566 ci.out = conn->out;
567 ci.state = conn->state;
568 ci.link_mode = conn->link_mode;
570 hci_dev_unlock_bh(hdev);
572 if (!conn)
573 return -ENOENT;
575 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;