Sync usage with man page.
[netbsd-mini2440.git] / sys / netbt / hci_link.c
blob12f068a9214e2f44d438f6026de99269a42b84ea
1 /* $NetBSD: hci_link.c,v 1.20 2008/04/24 11:38:37 ad Exp $ */
3 /*-
4 * Copyright (c) 2005 Iain Hibbert.
5 * Copyright (c) 2006 Itronix Inc.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of Itronix Inc. may not be used to endorse
17 * or promote products derived from this software without specific
18 * prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY ITRONIX INC. ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ITRONIX INC. BE LIABLE FOR ANY
24 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: hci_link.c,v 1.20 2008/04/24 11:38:37 ad Exp $");
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/proc.h>
41 #include <sys/queue.h>
42 #include <sys/systm.h>
44 #include <netbt/bluetooth.h>
45 #include <netbt/hci.h>
46 #include <netbt/l2cap.h>
47 #include <netbt/sco.h>
49 /*******************************************************************************
51 * HCI ACL Connections
55 * Automatically expire unused ACL connections after this number of
56 * seconds (if zero, do not expire unused connections) [sysctl]
58 int hci_acl_expiry = 10; /* seconds */
61 * hci_acl_open(unit, bdaddr)
63 * open ACL connection to remote bdaddr. Only one ACL connection is permitted
64 * between any two Bluetooth devices, so we look for an existing one before
65 * trying to start a new one.
67 struct hci_link *
68 hci_acl_open(struct hci_unit *unit, bdaddr_t *bdaddr)
70 struct hci_link *link;
71 struct hci_memo *memo;
72 hci_create_con_cp cp;
73 int err;
75 KASSERT(unit != NULL);
76 KASSERT(bdaddr != NULL);
78 link = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
79 if (link == NULL) {
80 link = hci_link_alloc(unit, bdaddr, HCI_LINK_ACL);
81 if (link == NULL)
82 return NULL;
85 switch(link->hl_state) {
86 case HCI_LINK_CLOSED:
88 * open connection to remote device
90 memset(&cp, 0, sizeof(cp));
91 bdaddr_copy(&cp.bdaddr, bdaddr);
92 cp.pkt_type = htole16(unit->hci_packet_type);
94 memo = hci_memo_find(unit, bdaddr);
95 if (memo != NULL) {
96 cp.page_scan_rep_mode = memo->page_scan_rep_mode;
97 cp.page_scan_mode = memo->page_scan_mode;
98 cp.clock_offset = memo->clock_offset;
101 if (unit->hci_link_policy & HCI_LINK_POLICY_ENABLE_ROLE_SWITCH)
102 cp.accept_role_switch = 1;
104 err = hci_send_cmd(unit, HCI_CMD_CREATE_CON, &cp, sizeof(cp));
105 if (err) {
106 hci_link_free(link, err);
107 return NULL;
110 link->hl_flags |= HCI_LINK_CREATE_CON;
111 link->hl_state = HCI_LINK_WAIT_CONNECT;
112 break;
114 case HCI_LINK_WAIT_CONNECT:
115 case HCI_LINK_WAIT_AUTH:
116 case HCI_LINK_WAIT_ENCRYPT:
117 case HCI_LINK_WAIT_SECURE:
119 * somebody else already trying to connect, we just
120 * sit on the bench with them..
122 break;
124 case HCI_LINK_OPEN:
126 * If already open, halt any expiry timeouts. We dont need
127 * to care about already invoking timeouts since refcnt >0
128 * will keep the link alive.
130 callout_stop(&link->hl_expire);
131 break;
133 default:
134 UNKNOWN(link->hl_state);
135 return NULL;
138 /* open */
139 link->hl_refcnt++;
141 return link;
145 * Close ACL connection. When there are no more references to this link,
146 * we can either close it down or schedule a delayed closedown.
148 void
149 hci_acl_close(struct hci_link *link, int err)
152 KASSERT(link != NULL);
154 if (--link->hl_refcnt == 0) {
155 if (link->hl_state == HCI_LINK_CLOSED)
156 hci_link_free(link, err);
157 else if (hci_acl_expiry > 0)
158 callout_schedule(&link->hl_expire, hci_acl_expiry * hz);
163 * Incoming ACL connection.
165 * Check the L2CAP listeners list and only accept when there is a
166 * potential listener available.
168 * There should not be a link to the same bdaddr already, we check
169 * anyway though its left unhandled for now.
171 struct hci_link *
172 hci_acl_newconn(struct hci_unit *unit, bdaddr_t *bdaddr)
174 struct hci_link *link;
175 struct l2cap_channel *chan;
177 LIST_FOREACH(chan, &l2cap_listen_list, lc_ncid) {
178 if (bdaddr_same(&unit->hci_bdaddr, &chan->lc_laddr.bt_bdaddr)
179 || bdaddr_any(&chan->lc_laddr.bt_bdaddr))
180 break;
183 if (chan == NULL) {
184 DPRINTF("%s: rejecting connection (no listeners)\n",
185 device_xname(unit->hci_dev));
187 return NULL;
190 link = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
191 if (link != NULL) {
192 DPRINTF("%s: rejecting connection (link exists)\n",
193 device_xname(unit->hci_dev));
195 return NULL;
198 link = hci_link_alloc(unit, bdaddr, HCI_LINK_ACL);
199 if (link != NULL) {
200 link->hl_state = HCI_LINK_WAIT_CONNECT;
202 if (hci_acl_expiry > 0)
203 callout_schedule(&link->hl_expire, hci_acl_expiry * hz);
206 return link;
209 void
210 hci_acl_timeout(void *arg)
212 struct hci_link *link = arg;
213 hci_discon_cp cp;
214 int err;
216 mutex_enter(bt_lock);
217 callout_ack(&link->hl_expire);
219 if (link->hl_refcnt > 0)
220 goto out;
222 DPRINTF("link #%d expired\n", link->hl_handle);
224 switch (link->hl_state) {
225 case HCI_LINK_CLOSED:
226 case HCI_LINK_WAIT_CONNECT:
227 hci_link_free(link, ECONNRESET);
228 break;
230 case HCI_LINK_WAIT_AUTH:
231 case HCI_LINK_WAIT_ENCRYPT:
232 case HCI_LINK_WAIT_SECURE:
233 case HCI_LINK_OPEN:
234 cp.con_handle = htole16(link->hl_handle);
235 cp.reason = 0x13; /* "Remote User Terminated Connection" */
237 err = hci_send_cmd(link->hl_unit, HCI_CMD_DISCONNECT,
238 &cp, sizeof(cp));
240 if (err) {
241 DPRINTF("error %d sending HCI_CMD_DISCONNECT\n",
242 err);
245 break;
247 default:
248 UNKNOWN(link->hl_state);
249 break;
252 out:
253 mutex_exit(bt_lock);
257 * Initiate any Link Mode change requests.
260 hci_acl_setmode(struct hci_link *link)
262 int err;
264 KASSERT(link != NULL);
265 KASSERT(link->hl_unit != NULL);
267 if (link->hl_state != HCI_LINK_OPEN)
268 return EINPROGRESS;
270 if ((link->hl_flags & HCI_LINK_AUTH_REQ)
271 && !(link->hl_flags & HCI_LINK_AUTH)) {
272 hci_auth_req_cp cp;
274 DPRINTF("requesting auth for handle #%d\n",
275 link->hl_handle);
277 link->hl_state = HCI_LINK_WAIT_AUTH;
278 cp.con_handle = htole16(link->hl_handle);
279 err = hci_send_cmd(link->hl_unit, HCI_CMD_AUTH_REQ,
280 &cp, sizeof(cp));
282 return (err == 0 ? EINPROGRESS : err);
285 if ((link->hl_flags & HCI_LINK_ENCRYPT_REQ)
286 && !(link->hl_flags & HCI_LINK_ENCRYPT)) {
287 hci_set_con_encryption_cp cp;
289 /* XXX we should check features for encryption capability */
291 DPRINTF("requesting encryption for handle #%d\n",
292 link->hl_handle);
294 link->hl_state = HCI_LINK_WAIT_ENCRYPT;
295 cp.con_handle = htole16(link->hl_handle);
296 cp.encryption_enable = 0x01;
298 err = hci_send_cmd(link->hl_unit, HCI_CMD_SET_CON_ENCRYPTION,
299 &cp, sizeof(cp));
301 return (err == 0 ? EINPROGRESS : err);
304 if ((link->hl_flags & HCI_LINK_SECURE_REQ)) {
305 hci_change_con_link_key_cp cp;
307 /* always change link key for SECURE requests */
308 link->hl_flags &= ~HCI_LINK_SECURE;
310 DPRINTF("changing link key for handle #%d\n",
311 link->hl_handle);
313 link->hl_state = HCI_LINK_WAIT_SECURE;
314 cp.con_handle = htole16(link->hl_handle);
316 err = hci_send_cmd(link->hl_unit, HCI_CMD_CHANGE_CON_LINK_KEY,
317 &cp, sizeof(cp));
319 return (err == 0 ? EINPROGRESS : err);
322 return 0;
326 * Link Mode changed.
328 * This is called from event handlers when the mode change
329 * is complete. We notify upstream and restart the link.
331 void
332 hci_acl_linkmode(struct hci_link *link)
334 struct l2cap_channel *chan, *next;
335 int err, mode = 0;
337 DPRINTF("handle #%d, auth %s, encrypt %s, secure %s\n",
338 link->hl_handle,
339 (link->hl_flags & HCI_LINK_AUTH ? "on" : "off"),
340 (link->hl_flags & HCI_LINK_ENCRYPT ? "on" : "off"),
341 (link->hl_flags & HCI_LINK_SECURE ? "on" : "off"));
343 if (link->hl_flags & HCI_LINK_AUTH)
344 mode |= L2CAP_LM_AUTH;
346 if (link->hl_flags & HCI_LINK_ENCRYPT)
347 mode |= L2CAP_LM_ENCRYPT;
349 if (link->hl_flags & HCI_LINK_SECURE)
350 mode |= L2CAP_LM_SECURE;
353 * The link state will only be OPEN here if the mode change
354 * was successful. So, we can proceed with L2CAP connections,
355 * or notify already establshed channels, to allow any that
356 * are dissatisfied to disconnect before we restart.
358 next = LIST_FIRST(&l2cap_active_list);
359 while ((chan = next) != NULL) {
360 next = LIST_NEXT(chan, lc_ncid);
362 if (chan->lc_link != link)
363 continue;
365 switch(chan->lc_state) {
366 case L2CAP_WAIT_SEND_CONNECT_REQ: /* we are connecting */
367 if ((mode & chan->lc_mode) != chan->lc_mode) {
368 l2cap_close(chan, ECONNABORTED);
369 break;
372 chan->lc_state = L2CAP_WAIT_RECV_CONNECT_RSP;
373 err = l2cap_send_connect_req(chan);
374 if (err) {
375 l2cap_close(chan, err);
376 break;
378 break;
380 case L2CAP_WAIT_SEND_CONNECT_RSP: /* they are connecting */
381 if ((mode & chan->lc_mode) != chan->lc_mode) {
382 l2cap_send_connect_rsp(link, chan->lc_ident,
383 0, chan->lc_rcid,
384 L2CAP_SECURITY_BLOCK);
386 l2cap_close(chan, ECONNABORTED);
387 break;
390 l2cap_send_connect_rsp(link, chan->lc_ident,
391 chan->lc_lcid, chan->lc_rcid,
392 L2CAP_SUCCESS);
394 chan->lc_state = L2CAP_WAIT_CONFIG;
395 chan->lc_flags |= (L2CAP_WAIT_CONFIG_RSP | L2CAP_WAIT_CONFIG_REQ);
396 err = l2cap_send_config_req(chan);
397 if (err) {
398 l2cap_close(chan, err);
399 break;
401 break;
403 case L2CAP_WAIT_RECV_CONNECT_RSP:
404 case L2CAP_WAIT_CONFIG:
405 case L2CAP_OPEN: /* already established */
406 (*chan->lc_proto->linkmode)(chan->lc_upper, mode);
407 break;
409 default:
410 break;
414 link->hl_state = HCI_LINK_OPEN;
415 hci_acl_start(link);
419 * Receive ACL Data
421 * we accumulate packet fragments on the hci_link structure
422 * until a full L2CAP frame is ready, then send it on.
424 void
425 hci_acl_recv(struct mbuf *m, struct hci_unit *unit)
427 struct hci_link *link;
428 hci_acldata_hdr_t hdr;
429 uint16_t handle, want;
430 int pb, got;
432 KASSERT(m != NULL);
433 KASSERT(unit != NULL);
435 KASSERT(m->m_pkthdr.len >= sizeof(hdr));
436 m_copydata(m, 0, sizeof(hdr), &hdr);
437 m_adj(m, sizeof(hdr));
439 #ifdef DIAGNOSTIC
440 if (hdr.type != HCI_ACL_DATA_PKT) {
441 aprint_error_dev(unit->hci_dev, "bad ACL packet type\n");
442 goto bad;
445 if (m->m_pkthdr.len != le16toh(hdr.length)) {
446 aprint_error_dev(unit->hci_dev,
447 "bad ACL packet length (%d != %d)\n",
448 m->m_pkthdr.len, le16toh(hdr.length));
449 goto bad;
451 #endif
453 hdr.length = le16toh(hdr.length);
454 hdr.con_handle = le16toh(hdr.con_handle);
455 handle = HCI_CON_HANDLE(hdr.con_handle);
456 pb = HCI_PB_FLAG(hdr.con_handle);
458 link = hci_link_lookup_handle(unit, handle);
459 if (link == NULL) {
460 hci_discon_cp cp;
462 DPRINTF("%s: dumping packet for unknown handle #%d\n",
463 device_xname(unit->hci_dev), handle);
466 * There is no way to find out what this connection handle is
467 * for, just get rid of it. This may happen, if a USB dongle
468 * is plugged into a self powered hub and does not reset when
469 * the system is shut down.
471 cp.con_handle = htole16(handle);
472 cp.reason = 0x13; /* "Remote User Terminated Connection" */
473 hci_send_cmd(unit, HCI_CMD_DISCONNECT, &cp, sizeof(cp));
474 goto bad;
477 switch (pb) {
478 case HCI_PACKET_START:
479 if (link->hl_rxp != NULL)
480 aprint_error_dev(unit->hci_dev,
481 "dropped incomplete ACL packet\n");
483 if (m->m_pkthdr.len < sizeof(l2cap_hdr_t)) {
484 aprint_error_dev(unit->hci_dev, "short ACL packet\n");
485 goto bad;
488 link->hl_rxp = m;
489 got = m->m_pkthdr.len;
490 break;
492 case HCI_PACKET_FRAGMENT:
493 if (link->hl_rxp == NULL) {
494 aprint_error_dev(unit->hci_dev,
495 "unexpected packet fragment\n");
497 goto bad;
500 got = m->m_pkthdr.len + link->hl_rxp->m_pkthdr.len;
501 m_cat(link->hl_rxp, m);
502 m = link->hl_rxp;
503 m->m_pkthdr.len = got;
504 break;
506 default:
507 aprint_error_dev(unit->hci_dev, "unknown packet type\n");
508 goto bad;
511 m_copydata(m, 0, sizeof(want), &want);
512 want = le16toh(want) + sizeof(l2cap_hdr_t) - got;
514 if (want > 0)
515 return;
517 link->hl_rxp = NULL;
519 if (want == 0) {
520 l2cap_recv_frame(m, link);
521 return;
524 bad:
525 m_freem(m);
529 * Send ACL data on link
531 * We must fragment packets into chunks of less than unit->hci_max_acl_size and
532 * prepend a relevant ACL header to each fragment. We keep a PDU structure
533 * attached to the link, so that completed fragments can be marked off and
534 * more data requested from above once the PDU is sent.
537 hci_acl_send(struct mbuf *m, struct hci_link *link,
538 struct l2cap_channel *chan)
540 struct l2cap_pdu *pdu;
541 struct mbuf *n = NULL;
542 int plen, mlen, num = 0;
544 KASSERT(link != NULL);
545 KASSERT(m != NULL);
546 KASSERT(m->m_flags & M_PKTHDR);
547 KASSERT(m->m_pkthdr.len > 0);
549 if (link->hl_state == HCI_LINK_CLOSED) {
550 m_freem(m);
551 return ENETDOWN;
554 pdu = pool_get(&l2cap_pdu_pool, PR_NOWAIT);
555 if (pdu == NULL)
556 goto nomem;
558 pdu->lp_chan = chan;
559 pdu->lp_pending = 0;
560 MBUFQ_INIT(&pdu->lp_data);
562 plen = m->m_pkthdr.len;
563 mlen = link->hl_unit->hci_max_acl_size;
565 DPRINTFN(5, "%s: handle #%d, plen = %d, max = %d\n",
566 device_xname(link->hl_unit->hci_dev), link->hl_handle, plen, mlen);
568 while (plen > 0) {
569 if (plen > mlen) {
570 n = m_split(m, mlen, M_DONTWAIT);
571 if (n == NULL)
572 goto nomem;
573 } else {
574 mlen = plen;
577 if (num++ == 0)
578 m->m_flags |= M_PROTO1; /* tag first fragment */
580 DPRINTFN(10, "chunk of %d (plen = %d) bytes\n", mlen, plen);
581 MBUFQ_ENQUEUE(&pdu->lp_data, m);
582 m = n;
583 plen -= mlen;
586 TAILQ_INSERT_TAIL(&link->hl_txq, pdu, lp_next);
587 link->hl_txqlen += num;
589 hci_acl_start(link);
591 return 0;
593 nomem:
594 if (m) m_freem(m);
595 if (pdu) {
596 MBUFQ_DRAIN(&pdu->lp_data);
597 pool_put(&l2cap_pdu_pool, pdu);
600 return ENOMEM;
604 * Start sending ACL data on link.
606 * This is called when the queue may need restarting: as new data
607 * is queued, after link mode changes have completed, or when device
608 * buffers have cleared.
610 * We may use all the available packet slots. The reason that we add
611 * the ACL encapsulation here rather than in hci_acl_send() is that L2CAP
612 * signal packets may be queued before the handle is given to us..
614 void
615 hci_acl_start(struct hci_link *link)
617 struct hci_unit *unit;
618 hci_acldata_hdr_t *hdr;
619 struct l2cap_pdu *pdu;
620 struct mbuf *m;
621 uint16_t handle;
623 KASSERT(link != NULL);
625 unit = link->hl_unit;
626 KASSERT(unit != NULL);
628 /* this is mainly to block ourselves (below) */
629 if (link->hl_state != HCI_LINK_OPEN)
630 return;
632 if (link->hl_txqlen == 0 || unit->hci_num_acl_pkts == 0)
633 return;
635 /* find first PDU with data to send */
636 pdu = TAILQ_FIRST(&link->hl_txq);
637 for (;;) {
638 if (pdu == NULL)
639 return;
641 if (MBUFQ_FIRST(&pdu->lp_data) != NULL)
642 break;
644 pdu = TAILQ_NEXT(pdu, lp_next);
647 while (unit->hci_num_acl_pkts > 0) {
648 MBUFQ_DEQUEUE(&pdu->lp_data, m);
649 KASSERT(m != NULL);
651 if (m->m_flags & M_PROTO1)
652 handle = HCI_MK_CON_HANDLE(link->hl_handle,
653 HCI_PACKET_START, 0);
654 else
655 handle = HCI_MK_CON_HANDLE(link->hl_handle,
656 HCI_PACKET_FRAGMENT, 0);
658 M_PREPEND(m, sizeof(*hdr), M_DONTWAIT);
659 if (m == NULL)
660 break;
662 hdr = mtod(m, hci_acldata_hdr_t *);
663 hdr->type = HCI_ACL_DATA_PKT;
664 hdr->con_handle = htole16(handle);
665 hdr->length = htole16(m->m_pkthdr.len - sizeof(*hdr));
667 link->hl_txqlen--;
668 pdu->lp_pending++;
670 hci_output_acl(unit, m);
672 if (MBUFQ_FIRST(&pdu->lp_data) == NULL) {
673 if (pdu->lp_chan) {
675 * This should enable streaming of PDUs - when
676 * we have placed all the fragments on the acl
677 * output queue, we trigger the L2CAP layer to
678 * send us down one more. Use a false state so
679 * we dont run into ourselves coming back from
680 * the future..
682 link->hl_state = HCI_LINK_BLOCK;
683 l2cap_start(pdu->lp_chan);
684 link->hl_state = HCI_LINK_OPEN;
687 pdu = TAILQ_NEXT(pdu, lp_next);
688 if (pdu == NULL)
689 break;
694 * We had our turn now, move to the back of the queue to let
695 * other links have a go at the output buffers..
697 if (TAILQ_NEXT(link, hl_next)) {
698 TAILQ_REMOVE(&unit->hci_links, link, hl_next);
699 TAILQ_INSERT_TAIL(&unit->hci_links, link, hl_next);
704 * Confirm ACL packets cleared from Controller buffers. We scan our PDU
705 * list to clear pending fragments and signal upstream for more data
706 * when a PDU is complete.
708 void
709 hci_acl_complete(struct hci_link *link, int num)
711 struct l2cap_pdu *pdu;
712 struct l2cap_channel *chan;
714 DPRINTFN(5, "handle #%d (%d)\n", link->hl_handle, num);
716 while (num > 0) {
717 pdu = TAILQ_FIRST(&link->hl_txq);
718 if (pdu == NULL) {
719 aprint_error_dev(link->hl_unit->hci_dev,
720 "%d packets completed on handle #%x but none pending!\n",
721 num, link->hl_handle);
723 return;
726 if (num >= pdu->lp_pending) {
727 num -= pdu->lp_pending;
728 pdu->lp_pending = 0;
730 if (MBUFQ_FIRST(&pdu->lp_data) == NULL) {
731 TAILQ_REMOVE(&link->hl_txq, pdu, lp_next);
732 chan = pdu->lp_chan;
733 if (chan != NULL) {
734 chan->lc_pending--;
735 (*chan->lc_proto->complete)
736 (chan->lc_upper, 1);
738 if (chan->lc_pending == 0)
739 l2cap_start(chan);
742 pool_put(&l2cap_pdu_pool, pdu);
744 } else {
745 pdu->lp_pending -= num;
746 num = 0;
751 /*******************************************************************************
753 * HCI SCO Connections
757 * Incoming SCO Connection. We check the list for anybody willing
758 * to take it.
760 struct hci_link *
761 hci_sco_newconn(struct hci_unit *unit, bdaddr_t *bdaddr)
763 struct sockaddr_bt laddr, raddr;
764 struct sco_pcb *pcb, *new;
765 struct hci_link *sco, *acl;
767 memset(&laddr, 0, sizeof(laddr));
768 laddr.bt_len = sizeof(laddr);
769 laddr.bt_family = AF_BLUETOOTH;
770 bdaddr_copy(&laddr.bt_bdaddr, &unit->hci_bdaddr);
772 memset(&raddr, 0, sizeof(raddr));
773 raddr.bt_len = sizeof(raddr);
774 raddr.bt_family = AF_BLUETOOTH;
775 bdaddr_copy(&raddr.bt_bdaddr, bdaddr);
778 * There should already be an ACL link up and running before
779 * the controller sends us SCO connection requests, but you
780 * never know..
782 acl = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
783 if (acl == NULL || acl->hl_state != HCI_LINK_OPEN)
784 return NULL;
786 LIST_FOREACH(pcb, &sco_pcb, sp_next) {
787 if ((pcb->sp_flags & SP_LISTENING) == 0)
788 continue;
790 new = (*pcb->sp_proto->newconn)(pcb->sp_upper, &laddr, &raddr);
791 if (new == NULL)
792 continue;
795 * Ok, got new pcb so we can start a new link and fill
796 * in all the details.
798 bdaddr_copy(&new->sp_laddr, &unit->hci_bdaddr);
799 bdaddr_copy(&new->sp_raddr, bdaddr);
801 sco = hci_link_alloc(unit, bdaddr, HCI_LINK_SCO);
802 if (sco == NULL) {
803 sco_detach(&new);
804 return NULL;
807 sco->hl_link = hci_acl_open(unit, bdaddr);
808 KASSERT(sco->hl_link == acl);
810 sco->hl_sco = new;
811 new->sp_link = sco;
813 new->sp_mtu = unit->hci_max_sco_size;
814 return sco;
817 return NULL;
821 * receive SCO packet, we only need to strip the header and send
822 * it to the right handler
824 void
825 hci_sco_recv(struct mbuf *m, struct hci_unit *unit)
827 struct hci_link *link;
828 hci_scodata_hdr_t hdr;
829 uint16_t handle;
831 KASSERT(m != NULL);
832 KASSERT(unit != NULL);
834 KASSERT(m->m_pkthdr.len >= sizeof(hdr));
835 m_copydata(m, 0, sizeof(hdr), &hdr);
836 m_adj(m, sizeof(hdr));
838 #ifdef DIAGNOSTIC
839 if (hdr.type != HCI_SCO_DATA_PKT) {
840 aprint_error_dev(unit->hci_dev, "bad SCO packet type\n");
841 goto bad;
844 if (m->m_pkthdr.len != hdr.length) {
845 aprint_error_dev(unit->hci_dev,
846 "bad SCO packet length (%d != %d)\n",
847 m->m_pkthdr.len, hdr.length);
849 goto bad;
851 #endif
853 hdr.con_handle = le16toh(hdr.con_handle);
854 handle = HCI_CON_HANDLE(hdr.con_handle);
856 link = hci_link_lookup_handle(unit, handle);
857 if (link == NULL || link->hl_type == HCI_LINK_ACL) {
858 DPRINTF("%s: dumping packet for unknown handle #%d\n",
859 device_xname(unit->hci_dev), handle);
861 goto bad;
864 (*link->hl_sco->sp_proto->input)(link->hl_sco->sp_upper, m);
865 return;
867 bad:
868 m_freem(m);
871 void
872 hci_sco_start(struct hci_link *link)
877 * SCO packets have completed at the controller, so we can
878 * signal up to free the buffer space.
880 void
881 hci_sco_complete(struct hci_link *link, int num)
884 DPRINTFN(5, "handle #%d (num=%d)\n", link->hl_handle, num);
885 link->hl_sco->sp_pending--;
886 (*link->hl_sco->sp_proto->complete)(link->hl_sco->sp_upper, num);
889 /*******************************************************************************
891 * Generic HCI Connection alloc/free/lookup etc
894 struct hci_link *
895 hci_link_alloc(struct hci_unit *unit, bdaddr_t *bdaddr, uint8_t type)
897 struct hci_link *link;
899 KASSERT(unit != NULL);
901 link = malloc(sizeof(struct hci_link), M_BLUETOOTH, M_NOWAIT | M_ZERO);
902 if (link == NULL)
903 return NULL;
905 link->hl_unit = unit;
906 link->hl_type = type;
907 link->hl_state = HCI_LINK_CLOSED;
908 bdaddr_copy(&link->hl_bdaddr, bdaddr);
910 /* init ACL portion */
911 callout_init(&link->hl_expire, 0);
912 callout_setfunc(&link->hl_expire, hci_acl_timeout, link);
914 TAILQ_INIT(&link->hl_txq); /* outgoing packets */
915 TAILQ_INIT(&link->hl_reqs); /* request queue */
917 link->hl_mtu = L2CAP_MTU_DEFAULT; /* L2CAP signal mtu */
918 link->hl_flush = L2CAP_FLUSH_TIMO_DEFAULT; /* flush timeout */
920 /* init SCO portion */
921 MBUFQ_INIT(&link->hl_data);
923 /* attach to unit */
924 TAILQ_INSERT_TAIL(&unit->hci_links, link, hl_next);
925 return link;
928 void
929 hci_link_free(struct hci_link *link, int err)
931 struct l2cap_req *req;
932 struct l2cap_pdu *pdu;
933 struct l2cap_channel *chan, *next;
935 KASSERT(link != NULL);
937 DPRINTF("#%d, type = %d, state = %d, refcnt = %d\n",
938 link->hl_handle, link->hl_type,
939 link->hl_state, link->hl_refcnt);
941 /* ACL reference count */
942 if (link->hl_refcnt > 0) {
943 next = LIST_FIRST(&l2cap_active_list);
944 while ((chan = next) != NULL) {
945 next = LIST_NEXT(chan, lc_ncid);
946 if (chan->lc_link == link)
947 l2cap_close(chan, err);
950 KASSERT(link->hl_refcnt == 0);
952 /* ACL L2CAP requests.. */
953 while ((req = TAILQ_FIRST(&link->hl_reqs)) != NULL)
954 l2cap_request_free(req);
956 KASSERT(TAILQ_EMPTY(&link->hl_reqs));
958 /* ACL outgoing data queue */
959 while ((pdu = TAILQ_FIRST(&link->hl_txq)) != NULL) {
960 TAILQ_REMOVE(&link->hl_txq, pdu, lp_next);
961 MBUFQ_DRAIN(&pdu->lp_data);
962 if (pdu->lp_pending)
963 link->hl_unit->hci_num_acl_pkts += pdu->lp_pending;
965 pool_put(&l2cap_pdu_pool, pdu);
968 KASSERT(TAILQ_EMPTY(&link->hl_txq));
970 /* ACL incoming data packet */
971 if (link->hl_rxp != NULL) {
972 m_freem(link->hl_rxp);
973 link->hl_rxp = NULL;
976 /* SCO master ACL link */
977 if (link->hl_link != NULL) {
978 hci_acl_close(link->hl_link, err);
979 link->hl_link = NULL;
982 /* SCO pcb */
983 if (link->hl_sco != NULL) {
984 struct sco_pcb *pcb;
986 pcb = link->hl_sco;
987 pcb->sp_link = NULL;
988 link->hl_sco = NULL;
989 (*pcb->sp_proto->disconnected)(pcb->sp_upper, err);
992 /* flush any SCO data */
993 MBUFQ_DRAIN(&link->hl_data);
996 * Halt the callout - if its already running we cannot free the
997 * link structure but the timeout function will call us back in
998 * any case.
1000 link->hl_state = HCI_LINK_CLOSED;
1001 callout_stop(&link->hl_expire);
1002 if (callout_invoking(&link->hl_expire))
1003 return;
1005 callout_destroy(&link->hl_expire);
1008 * If we made a note of clock offset, keep it in a memo
1009 * to facilitate reconnections to this device
1011 if (link->hl_clock != 0) {
1012 struct hci_memo *memo;
1014 memo = hci_memo_new(link->hl_unit, &link->hl_bdaddr);
1015 if (memo != NULL)
1016 memo->clock_offset = link->hl_clock;
1019 TAILQ_REMOVE(&link->hl_unit->hci_links, link, hl_next);
1020 free(link, M_BLUETOOTH);
1024 * Lookup HCI link by address and type. Note that for SCO links there may
1025 * be more than one link per address, so we only return links with no
1026 * handle (ie new links)
1028 struct hci_link *
1029 hci_link_lookup_bdaddr(struct hci_unit *unit, bdaddr_t *bdaddr, uint8_t type)
1031 struct hci_link *link;
1033 KASSERT(unit != NULL);
1034 KASSERT(bdaddr != NULL);
1036 TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1037 if (link->hl_type != type)
1038 continue;
1040 if (type == HCI_LINK_SCO && link->hl_handle != 0)
1041 continue;
1043 if (bdaddr_same(&link->hl_bdaddr, bdaddr))
1044 break;
1047 return link;
1050 struct hci_link *
1051 hci_link_lookup_handle(struct hci_unit *unit, uint16_t handle)
1053 struct hci_link *link;
1055 KASSERT(unit != NULL);
1057 TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1058 if (handle == link->hl_handle)
1059 break;
1062 return link;