Bluetooth: Add enhanced security model for Simple Pairing
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / net / bluetooth / hci_core.h
blob4b14972c16940d774c09ecd720982b32a2d7eeca
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
28 #include <net/bluetooth/hci.h>
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP 0
32 #define HCI_PROTO_SCO 1
34 /* HCI Core structures */
35 struct inquiry_data {
36 bdaddr_t bdaddr;
37 __u8 pscan_rep_mode;
38 __u8 pscan_period_mode;
39 __u8 pscan_mode;
40 __u8 dev_class[3];
41 __le16 clock_offset;
42 __s8 rssi;
43 __u8 ssp_mode;
46 struct inquiry_entry {
47 struct inquiry_entry *next;
48 __u32 timestamp;
49 struct inquiry_data data;
52 struct inquiry_cache {
53 spinlock_t lock;
54 __u32 timestamp;
55 struct inquiry_entry *list;
58 struct hci_conn_hash {
59 struct list_head list;
60 spinlock_t lock;
61 unsigned int acl_num;
62 unsigned int sco_num;
65 struct hci_dev {
66 struct list_head list;
67 spinlock_t lock;
68 atomic_t refcnt;
70 char name[8];
71 unsigned long flags;
72 __u16 id;
73 __u8 type;
74 bdaddr_t bdaddr;
75 __u8 dev_name[248];
76 __u8 dev_class[3];
77 __u8 features[8];
78 __u8 commands[64];
79 __u8 ssp_mode;
80 __u8 hci_ver;
81 __u16 hci_rev;
82 __u16 manufacturer;
83 __u16 voice_setting;
85 __u16 pkt_type;
86 __u16 esco_type;
87 __u16 link_policy;
88 __u16 link_mode;
90 __u32 idle_timeout;
91 __u16 sniff_min_interval;
92 __u16 sniff_max_interval;
94 unsigned long quirks;
96 atomic_t cmd_cnt;
97 unsigned int acl_cnt;
98 unsigned int sco_cnt;
100 unsigned int acl_mtu;
101 unsigned int sco_mtu;
102 unsigned int acl_pkts;
103 unsigned int sco_pkts;
105 unsigned long cmd_last_tx;
106 unsigned long acl_last_tx;
107 unsigned long sco_last_tx;
109 struct tasklet_struct cmd_task;
110 struct tasklet_struct rx_task;
111 struct tasklet_struct tx_task;
113 struct sk_buff_head rx_q;
114 struct sk_buff_head raw_q;
115 struct sk_buff_head cmd_q;
117 struct sk_buff *sent_cmd;
118 struct sk_buff *reassembly[3];
120 struct semaphore req_lock;
121 wait_queue_head_t req_wait_q;
122 __u32 req_status;
123 __u32 req_result;
125 struct inquiry_cache inq_cache;
126 struct hci_conn_hash conn_hash;
128 struct hci_dev_stats stat;
130 struct sk_buff_head driver_init;
132 void *driver_data;
133 void *core_data;
135 atomic_t promisc;
137 struct device *parent;
138 struct device dev;
140 struct module *owner;
142 int (*open)(struct hci_dev *hdev);
143 int (*close)(struct hci_dev *hdev);
144 int (*flush)(struct hci_dev *hdev);
145 int (*send)(struct sk_buff *skb);
146 void (*destruct)(struct hci_dev *hdev);
147 void (*notify)(struct hci_dev *hdev, unsigned int evt);
148 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
151 struct hci_conn {
152 struct list_head list;
154 atomic_t refcnt;
155 spinlock_t lock;
157 bdaddr_t dst;
158 __u16 handle;
159 __u16 state;
160 __u8 mode;
161 __u8 type;
162 __u8 out;
163 __u8 attempt;
164 __u8 dev_class[3];
165 __u8 features[8];
166 __u8 ssp_mode;
167 __u16 interval;
168 __u16 pkt_type;
169 __u16 link_policy;
170 __u32 link_mode;
171 __u8 auth_type;
172 __u8 sec_level;
173 __u8 power_save;
174 unsigned long pend;
176 unsigned int sent;
178 struct sk_buff_head data_q;
180 struct timer_list disc_timer;
181 struct timer_list idle_timer;
183 struct work_struct work;
185 struct device dev;
187 struct hci_dev *hdev;
188 void *l2cap_data;
189 void *sco_data;
190 void *priv;
192 struct hci_conn *link;
195 extern struct hci_proto *hci_proto[];
196 extern struct list_head hci_dev_list;
197 extern struct list_head hci_cb_list;
198 extern rwlock_t hci_dev_list_lock;
199 extern rwlock_t hci_cb_list_lock;
201 /* ----- Inquiry cache ----- */
202 #define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
203 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
205 #define inquiry_cache_lock(c) spin_lock(&c->lock)
206 #define inquiry_cache_unlock(c) spin_unlock(&c->lock)
207 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
208 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
210 static inline void inquiry_cache_init(struct hci_dev *hdev)
212 struct inquiry_cache *c = &hdev->inq_cache;
213 spin_lock_init(&c->lock);
214 c->list = NULL;
217 static inline int inquiry_cache_empty(struct hci_dev *hdev)
219 struct inquiry_cache *c = &hdev->inq_cache;
220 return (c->list == NULL);
223 static inline long inquiry_cache_age(struct hci_dev *hdev)
225 struct inquiry_cache *c = &hdev->inq_cache;
226 return jiffies - c->timestamp;
229 static inline long inquiry_entry_age(struct inquiry_entry *e)
231 return jiffies - e->timestamp;
234 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
235 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
237 /* ----- HCI Connections ----- */
238 enum {
239 HCI_CONN_AUTH_PEND,
240 HCI_CONN_ENCRYPT_PEND,
241 HCI_CONN_RSWITCH_PEND,
242 HCI_CONN_MODE_CHANGE_PEND,
245 static inline void hci_conn_hash_init(struct hci_dev *hdev)
247 struct hci_conn_hash *h = &hdev->conn_hash;
248 INIT_LIST_HEAD(&h->list);
249 spin_lock_init(&h->lock);
250 h->acl_num = 0;
251 h->sco_num = 0;
254 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
256 struct hci_conn_hash *h = &hdev->conn_hash;
257 list_add(&c->list, &h->list);
258 if (c->type == ACL_LINK)
259 h->acl_num++;
260 else
261 h->sco_num++;
264 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
266 struct hci_conn_hash *h = &hdev->conn_hash;
267 list_del(&c->list);
268 if (c->type == ACL_LINK)
269 h->acl_num--;
270 else
271 h->sco_num--;
274 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
275 __u16 handle)
277 struct hci_conn_hash *h = &hdev->conn_hash;
278 struct list_head *p;
279 struct hci_conn *c;
281 list_for_each(p, &h->list) {
282 c = list_entry(p, struct hci_conn, list);
283 if (c->handle == handle)
284 return c;
286 return NULL;
289 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
290 __u8 type, bdaddr_t *ba)
292 struct hci_conn_hash *h = &hdev->conn_hash;
293 struct list_head *p;
294 struct hci_conn *c;
296 list_for_each(p, &h->list) {
297 c = list_entry(p, struct hci_conn, list);
298 if (c->type == type && !bacmp(&c->dst, ba))
299 return c;
301 return NULL;
304 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
305 __u8 type, __u16 state)
307 struct hci_conn_hash *h = &hdev->conn_hash;
308 struct list_head *p;
309 struct hci_conn *c;
311 list_for_each(p, &h->list) {
312 c = list_entry(p, struct hci_conn, list);
313 if (c->type == type && c->state == state)
314 return c;
316 return NULL;
319 void hci_acl_connect(struct hci_conn *conn);
320 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
321 void hci_add_sco(struct hci_conn *conn, __u16 handle);
322 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
324 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
325 int hci_conn_del(struct hci_conn *conn);
326 void hci_conn_hash_flush(struct hci_dev *hdev);
327 void hci_conn_check_pending(struct hci_dev *hdev);
329 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type);
330 int hci_conn_check_link_mode(struct hci_conn *conn);
331 int hci_conn_security(struct hci_conn *conn, __u8 sec_level);
332 int hci_conn_change_link_key(struct hci_conn *conn);
333 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
335 void hci_conn_enter_active_mode(struct hci_conn *conn);
336 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
338 static inline void hci_conn_hold(struct hci_conn *conn)
340 atomic_inc(&conn->refcnt);
341 del_timer(&conn->disc_timer);
344 static inline void hci_conn_put(struct hci_conn *conn)
346 if (atomic_dec_and_test(&conn->refcnt)) {
347 unsigned long timeo;
348 if (conn->type == ACL_LINK) {
349 del_timer(&conn->idle_timer);
350 if (conn->state == BT_CONNECTED) {
351 timeo = msecs_to_jiffies(HCI_DISCONN_TIMEOUT);
352 if (!conn->out)
353 timeo *= 5;
354 } else
355 timeo = msecs_to_jiffies(10);
356 } else
357 timeo = msecs_to_jiffies(10);
358 mod_timer(&conn->disc_timer, jiffies + timeo);
362 /* ----- HCI tasks ----- */
363 static inline void hci_sched_cmd(struct hci_dev *hdev)
365 tasklet_schedule(&hdev->cmd_task);
368 static inline void hci_sched_rx(struct hci_dev *hdev)
370 tasklet_schedule(&hdev->rx_task);
373 static inline void hci_sched_tx(struct hci_dev *hdev)
375 tasklet_schedule(&hdev->tx_task);
378 /* ----- HCI Devices ----- */
379 static inline void __hci_dev_put(struct hci_dev *d)
381 if (atomic_dec_and_test(&d->refcnt))
382 d->destruct(d);
385 static inline void hci_dev_put(struct hci_dev *d)
387 __hci_dev_put(d);
388 module_put(d->owner);
391 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
393 atomic_inc(&d->refcnt);
394 return d;
397 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
399 if (try_module_get(d->owner))
400 return __hci_dev_hold(d);
401 return NULL;
404 #define hci_dev_lock(d) spin_lock(&d->lock)
405 #define hci_dev_unlock(d) spin_unlock(&d->lock)
406 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
407 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
409 struct hci_dev *hci_dev_get(int index);
410 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
412 struct hci_dev *hci_alloc_dev(void);
413 void hci_free_dev(struct hci_dev *hdev);
414 int hci_register_dev(struct hci_dev *hdev);
415 int hci_unregister_dev(struct hci_dev *hdev);
416 int hci_suspend_dev(struct hci_dev *hdev);
417 int hci_resume_dev(struct hci_dev *hdev);
418 int hci_dev_open(__u16 dev);
419 int hci_dev_close(__u16 dev);
420 int hci_dev_reset(__u16 dev);
421 int hci_dev_reset_stat(__u16 dev);
422 int hci_dev_cmd(unsigned int cmd, void __user *arg);
423 int hci_get_dev_list(void __user *arg);
424 int hci_get_dev_info(void __user *arg);
425 int hci_get_conn_list(void __user *arg);
426 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
427 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
428 int hci_inquiry(void __user *arg);
430 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
432 /* Receive frame from HCI drivers */
433 static inline int hci_recv_frame(struct sk_buff *skb)
435 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
436 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
437 && !test_bit(HCI_INIT, &hdev->flags))) {
438 kfree_skb(skb);
439 return -ENXIO;
442 /* Incomming skb */
443 bt_cb(skb)->incoming = 1;
445 /* Time stamp */
446 __net_timestamp(skb);
448 /* Queue frame for rx task */
449 skb_queue_tail(&hdev->rx_q, skb);
450 hci_sched_rx(hdev);
451 return 0;
454 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
456 int hci_register_sysfs(struct hci_dev *hdev);
457 void hci_unregister_sysfs(struct hci_dev *hdev);
458 void hci_conn_add_sysfs(struct hci_conn *conn);
459 void hci_conn_del_sysfs(struct hci_conn *conn);
461 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
463 /* ----- LMP capabilities ----- */
464 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
465 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
466 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
467 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
468 #define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
469 #define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
471 /* ----- HCI protocols ----- */
472 struct hci_proto {
473 char *name;
474 unsigned int id;
475 unsigned long flags;
477 void *priv;
479 int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
480 int (*connect_cfm) (struct hci_conn *conn, __u8 status);
481 int (*disconn_ind) (struct hci_conn *conn, __u8 reason);
482 int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
483 int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
484 int (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
487 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
489 register struct hci_proto *hp;
490 int mask = 0;
492 hp = hci_proto[HCI_PROTO_L2CAP];
493 if (hp && hp->connect_ind)
494 mask |= hp->connect_ind(hdev, bdaddr, type);
496 hp = hci_proto[HCI_PROTO_SCO];
497 if (hp && hp->connect_ind)
498 mask |= hp->connect_ind(hdev, bdaddr, type);
500 return mask;
503 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
505 register struct hci_proto *hp;
507 hp = hci_proto[HCI_PROTO_L2CAP];
508 if (hp && hp->connect_cfm)
509 hp->connect_cfm(conn, status);
511 hp = hci_proto[HCI_PROTO_SCO];
512 if (hp && hp->connect_cfm)
513 hp->connect_cfm(conn, status);
516 static inline void hci_proto_disconn_ind(struct hci_conn *conn, __u8 reason)
518 register struct hci_proto *hp;
520 hp = hci_proto[HCI_PROTO_L2CAP];
521 if (hp && hp->disconn_ind)
522 hp->disconn_ind(conn, reason);
524 hp = hci_proto[HCI_PROTO_SCO];
525 if (hp && hp->disconn_ind)
526 hp->disconn_ind(conn, reason);
529 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
531 register struct hci_proto *hp;
532 __u8 encrypt;
534 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
535 return;
537 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
539 hp = hci_proto[HCI_PROTO_L2CAP];
540 if (hp && hp->security_cfm)
541 hp->security_cfm(conn, status, encrypt);
543 hp = hci_proto[HCI_PROTO_SCO];
544 if (hp && hp->security_cfm)
545 hp->security_cfm(conn, status, encrypt);
548 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
550 register struct hci_proto *hp;
552 hp = hci_proto[HCI_PROTO_L2CAP];
553 if (hp && hp->security_cfm)
554 hp->security_cfm(conn, status, encrypt);
556 hp = hci_proto[HCI_PROTO_SCO];
557 if (hp && hp->security_cfm)
558 hp->security_cfm(conn, status, encrypt);
561 int hci_register_proto(struct hci_proto *hproto);
562 int hci_unregister_proto(struct hci_proto *hproto);
564 /* ----- HCI callbacks ----- */
565 struct hci_cb {
566 struct list_head list;
568 char *name;
570 void (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
571 void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
572 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
575 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
577 struct list_head *p;
578 __u8 encrypt;
580 hci_proto_auth_cfm(conn, status);
582 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
583 return;
585 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
587 read_lock_bh(&hci_cb_list_lock);
588 list_for_each(p, &hci_cb_list) {
589 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
590 if (cb->security_cfm)
591 cb->security_cfm(conn, status, encrypt);
593 read_unlock_bh(&hci_cb_list_lock);
596 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
598 struct list_head *p;
600 hci_proto_encrypt_cfm(conn, status, encrypt);
602 read_lock_bh(&hci_cb_list_lock);
603 list_for_each(p, &hci_cb_list) {
604 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
605 if (cb->security_cfm)
606 cb->security_cfm(conn, status, encrypt);
608 read_unlock_bh(&hci_cb_list_lock);
611 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
613 struct list_head *p;
615 read_lock_bh(&hci_cb_list_lock);
616 list_for_each(p, &hci_cb_list) {
617 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
618 if (cb->key_change_cfm)
619 cb->key_change_cfm(conn, status);
621 read_unlock_bh(&hci_cb_list_lock);
624 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
626 struct list_head *p;
628 read_lock_bh(&hci_cb_list_lock);
629 list_for_each(p, &hci_cb_list) {
630 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
631 if (cb->role_switch_cfm)
632 cb->role_switch_cfm(conn, status, role);
634 read_unlock_bh(&hci_cb_list_lock);
637 int hci_register_cb(struct hci_cb *hcb);
638 int hci_unregister_cb(struct hci_cb *hcb);
640 int hci_register_notifier(struct notifier_block *nb);
641 int hci_unregister_notifier(struct notifier_block *nb);
643 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
644 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
645 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
647 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
649 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
651 /* ----- HCI Sockets ----- */
652 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
654 /* HCI info for socket */
655 #define hci_pi(sk) ((struct hci_pinfo *) sk)
657 struct hci_pinfo {
658 struct bt_sock bt;
659 struct hci_dev *hdev;
660 struct hci_filter filter;
661 __u32 cmsg_mask;
664 /* HCI security filter */
665 #define HCI_SFLT_MAX_OGF 5
667 struct hci_sec_filter {
668 __u32 type_mask;
669 __u32 event_mask[2];
670 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
673 /* ----- HCI requests ----- */
674 #define HCI_REQ_DONE 0
675 #define HCI_REQ_PEND 1
676 #define HCI_REQ_CANCELED 2
678 #define hci_req_lock(d) down(&d->req_lock)
679 #define hci_req_unlock(d) up(&d->req_lock)
681 void hci_req_complete(struct hci_dev *hdev, int result);
683 #endif /* __HCI_CORE_H */