mtree(8): Fix crc() prototype.
[dragonfly.git] / sys / netbt / hci_link.c
blobd85981f176abf81c68a026b8f18a0377999e793b
1 /* $OpenBSD: src/sys/netbt/hci_link.c,v 1.7 2008/02/24 21:34:48 uwe Exp $ */
2 /* $NetBSD: hci_link.c,v 1.16 2007/11/10 23:12:22 plunky Exp $ */
4 /*-
5 * Copyright (c) 2005 Iain Hibbert.
6 * Copyright (c) 2006 Itronix Inc.
7 * All rights reserved.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. The name of Itronix Inc. may not be used to endorse
18 * or promote products derived from this software without specific
19 * prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY ITRONIX INC. ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ITRONIX INC. BE LIABLE FOR ANY
25 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
28 * ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/proc.h>
39 #include <sys/queue.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
42 #include <sys/callout.h>
43 #include <net/if.h>
44 #include <sys/bus.h>
46 #include <netbt/bluetooth.h>
47 #include <netbt/hci.h>
48 #include <netbt/l2cap.h>
49 #include <netbt/sco.h>
51 /*******************************************************************************
53 * HCI ACL Connections
57 * Automatically expire unused ACL connections after this number of
58 * seconds (if zero, do not expire unused connections) [sysctl]
60 int hci_acl_expiry = 10; /* seconds */
63 * hci_acl_open(unit, bdaddr)
65 * open ACL connection to remote bdaddr. Only one ACL connection is permitted
66 * between any two Bluetooth devices, so we look for an existing one before
67 * trying to start a new one.
69 struct hci_link *
70 hci_acl_open(struct hci_unit *unit, bdaddr_t *bdaddr)
72 struct hci_link *link;
73 struct hci_memo *memo;
74 hci_create_con_cp cp;
75 int err;
77 KKASSERT(unit != NULL);
78 KKASSERT(bdaddr != NULL);
80 link = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
81 if (link == NULL) {
82 link = hci_link_alloc(unit);
83 if (link == NULL)
84 return NULL;
86 link->hl_type = HCI_LINK_ACL;
87 bdaddr_copy(&link->hl_bdaddr, bdaddr);
90 switch(link->hl_state) {
91 case HCI_LINK_CLOSED:
93 * open connection to remote device
95 memset(&cp, 0, sizeof(cp));
96 bdaddr_copy(&cp.bdaddr, bdaddr);
97 cp.pkt_type = htole16(unit->hci_packet_type);
99 memo = hci_memo_find(unit, bdaddr);
100 if (memo != NULL) {
101 cp.page_scan_rep_mode = memo->page_scan_rep_mode;
102 cp.page_scan_mode = memo->page_scan_mode;
103 cp.clock_offset = memo->clock_offset;
106 if (unit->hci_link_policy & HCI_LINK_POLICY_ENABLE_ROLE_SWITCH)
107 cp.accept_role_switch = 1;
109 err = hci_send_cmd(unit, HCI_CMD_CREATE_CON, &cp, sizeof(cp));
110 if (err) {
111 hci_link_free(link, err);
112 return NULL;
115 link->hl_state = HCI_LINK_WAIT_CONNECT;
116 break;
118 case HCI_LINK_WAIT_CONNECT:
119 case HCI_LINK_WAIT_AUTH:
120 case HCI_LINK_WAIT_ENCRYPT:
121 case HCI_LINK_WAIT_SECURE:
123 * somebody else already trying to connect, we just
124 * sit on the bench with them..
126 break;
128 case HCI_LINK_OPEN:
130 * If already open, halt any expiry callouts. We dont need
131 * to care about already invoking callouts since refcnt >0
132 * will keep the link alive.
134 callout_stop(&link->hl_expire);
135 break;
137 default:
138 UNKNOWN(link->hl_state);
139 return NULL;
142 /* open */
143 link->hl_refcnt++;
145 return link;
149 * Close ACL connection. When there are no more references to this link,
150 * we can either close it down or schedule a delayed closedown.
152 void
153 hci_acl_close(struct hci_link *link, int err)
155 KKASSERT(link != NULL);
157 if (--link->hl_refcnt == 0) {
158 if (link->hl_state == HCI_LINK_CLOSED)
159 hci_link_free(link, err);
160 else if (hci_acl_expiry > 0)
161 callout_reset(&link->hl_expire, hci_acl_expiry * hz,
162 hci_acl_timeout, link);
167 * Incoming ACL connection.
169 * For now, we accept all connections but it would be better to check
170 * the L2CAP listen list and only accept when there is a listener
171 * available.
173 * There should not be a link to the same bdaddr already, we check
174 * anyway though its left unhandled for now.
176 struct hci_link *
177 hci_acl_newconn(struct hci_unit *unit, bdaddr_t *bdaddr)
179 struct hci_link *link;
181 link = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
182 if (link != NULL)
183 return NULL;
185 link = hci_link_alloc(unit);
186 if (link != NULL) {
187 link->hl_state = HCI_LINK_WAIT_CONNECT;
188 link->hl_type = HCI_LINK_ACL;
189 bdaddr_copy(&link->hl_bdaddr, bdaddr);
191 if (hci_acl_expiry > 0)
192 callout_reset(&link->hl_expire, hci_acl_expiry * hz,
193 hci_acl_timeout, link);
196 return link;
199 void
200 hci_acl_timeout(void *arg)
202 struct hci_link *link = arg;
203 hci_discon_cp cp;
204 int err;
206 crit_enter();
208 if (link->hl_refcnt > 0)
209 goto out;
211 DPRINTF("link #%d expired\n", link->hl_handle);
213 switch (link->hl_state) {
214 case HCI_LINK_CLOSED:
215 case HCI_LINK_WAIT_CONNECT:
216 hci_link_free(link, ECONNRESET);
217 break;
219 case HCI_LINK_WAIT_AUTH:
220 case HCI_LINK_WAIT_ENCRYPT:
221 case HCI_LINK_WAIT_SECURE:
222 case HCI_LINK_OPEN:
223 cp.con_handle = htole16(link->hl_handle);
224 cp.reason = 0x13; /* "Remote User Terminated Connection" */
226 err = hci_send_cmd(link->hl_unit, HCI_CMD_DISCONNECT,
227 &cp, sizeof(cp));
229 if (err) {
230 DPRINTF("error %d sending HCI_CMD_DISCONNECT\n",
231 err);
234 break;
236 default:
237 UNKNOWN(link->hl_state);
238 break;
241 out:
242 crit_exit();
246 * Initiate any Link Mode change requests.
249 hci_acl_setmode(struct hci_link *link)
251 int err;
253 KKASSERT(link != NULL);
254 KKASSERT(link->hl_unit != NULL);
256 if (link->hl_state != HCI_LINK_OPEN)
257 return EINPROGRESS;
259 if ((link->hl_flags & HCI_LINK_AUTH_REQ)
260 && !(link->hl_flags & HCI_LINK_AUTH)) {
261 hci_auth_req_cp cp;
263 DPRINTF("(%s) requesting auth for handle #%d\n",
264 device_get_nameunit(link->hl_unit->hci_dev),
265 link->hl_handle);
267 link->hl_state = HCI_LINK_WAIT_AUTH;
268 cp.con_handle = htole16(link->hl_handle);
269 err = hci_send_cmd(link->hl_unit, HCI_CMD_AUTH_REQ,
270 &cp, sizeof(cp));
272 return (err == 0 ? EINPROGRESS : err);
275 if ((link->hl_flags & HCI_LINK_ENCRYPT_REQ)
276 && !(link->hl_flags & HCI_LINK_ENCRYPT)) {
277 hci_set_con_encryption_cp cp;
279 /* XXX we should check features for encryption capability */
281 DPRINTF("(%s) requesting encryption for handle #%d\n",
282 device_get_nameunit(link->hl_unit->hci_dev),
283 link->hl_handle);
285 link->hl_state = HCI_LINK_WAIT_ENCRYPT;
286 cp.con_handle = htole16(link->hl_handle);
287 cp.encryption_enable = 0x01;
289 err = hci_send_cmd(link->hl_unit, HCI_CMD_SET_CON_ENCRYPTION,
290 &cp, sizeof(cp));
292 return (err == 0 ? EINPROGRESS : err);
295 if ((link->hl_flags & HCI_LINK_SECURE_REQ)) {
296 hci_change_con_link_key_cp cp;
298 /* always change link key for SECURE requests */
299 link->hl_flags &= ~HCI_LINK_SECURE;
301 DPRINTF("(%s) changing link key for handle #%d\n",
302 device_get_nameunit(link->hl_unit->hci_dev),
303 link->hl_handle);
305 link->hl_state = HCI_LINK_WAIT_SECURE;
306 cp.con_handle = htole16(link->hl_handle);
308 err = hci_send_cmd(link->hl_unit, HCI_CMD_CHANGE_CON_LINK_KEY,
309 &cp, sizeof(cp));
311 return (err == 0 ? EINPROGRESS : err);
314 return 0;
318 * Link Mode changed.
320 * This is called from event handlers when the mode change
321 * is complete. We notify upstream and restart the link.
323 void
324 hci_acl_linkmode(struct hci_link *link)
326 struct l2cap_channel *chan, *next;
327 int err, mode = 0;
329 DPRINTF("(%s) handle #%d, auth %s, encrypt %s, secure %s\n",
330 device_get_nameunit(link->hl_unit->hci_dev), link->hl_handle,
331 (link->hl_flags & HCI_LINK_AUTH ? "on" : "off"),
332 (link->hl_flags & HCI_LINK_ENCRYPT ? "on" : "off"),
333 (link->hl_flags & HCI_LINK_SECURE ? "on" : "off"));
335 if (link->hl_flags & HCI_LINK_AUTH)
336 mode |= L2CAP_LM_AUTH;
338 if (link->hl_flags & HCI_LINK_ENCRYPT)
339 mode |= L2CAP_LM_ENCRYPT;
341 if (link->hl_flags & HCI_LINK_SECURE)
342 mode |= L2CAP_LM_SECURE;
345 * The link state will only be OPEN here if the mode change
346 * was successful. So, we can proceed with L2CAP connections,
347 * or notify already establshed channels, to allow any that
348 * are dissatisfied to disconnect before we restart.
350 next = LIST_FIRST(&l2cap_active_list);
351 while ((chan = next) != NULL) {
352 next = LIST_NEXT(chan, lc_ncid);
354 if (chan->lc_link != link)
355 continue;
357 switch(chan->lc_state) {
358 case L2CAP_WAIT_SEND_CONNECT_REQ: /* we are connecting */
359 if ((mode & chan->lc_mode) != chan->lc_mode) {
360 l2cap_close(chan, ECONNABORTED);
361 break;
364 chan->lc_state = L2CAP_WAIT_RECV_CONNECT_RSP;
365 err = l2cap_send_connect_req(chan);
366 if (err) {
367 l2cap_close(chan, err);
368 break;
370 break;
372 case L2CAP_WAIT_SEND_CONNECT_RSP: /* they are connecting */
373 if ((mode & chan->lc_mode) != chan->lc_mode) {
374 l2cap_send_connect_rsp(link, chan->lc_ident,
375 0, chan->lc_rcid,
376 L2CAP_SECURITY_BLOCK);
378 l2cap_close(chan, ECONNABORTED);
379 break;
382 l2cap_send_connect_rsp(link, chan->lc_ident,
383 chan->lc_lcid, chan->lc_rcid,
384 L2CAP_SUCCESS);
386 chan->lc_state = L2CAP_WAIT_CONFIG;
387 chan->lc_flags |= (L2CAP_WAIT_CONFIG_RSP | L2CAP_WAIT_CONFIG_REQ);
388 err = l2cap_send_config_req(chan);
389 if (err) {
390 l2cap_close(chan, err);
391 break;
393 break;
395 case L2CAP_WAIT_RECV_CONNECT_RSP:
396 case L2CAP_WAIT_CONFIG:
397 case L2CAP_OPEN: /* already established */
398 (*chan->lc_proto->linkmode)(chan->lc_upper, mode);
399 break;
401 default:
402 break;
406 link->hl_state = HCI_LINK_OPEN;
407 hci_acl_start(link);
411 * Receive ACL Data
413 * we accumulate packet fragments on the hci_link structure
414 * until a full L2CAP frame is ready, then send it on.
416 void
417 hci_acl_recv(struct mbuf *m, struct hci_unit *unit)
419 struct hci_link *link;
420 hci_acldata_hdr_t hdr;
421 uint16_t handle, want;
422 int pb, got;
424 KKASSERT(m != NULL);
425 KKASSERT(unit != NULL);
427 KKASSERT(m->m_pkthdr.len >= sizeof(hdr));
428 m_copydata(m, 0, sizeof(hdr), (caddr_t)&hdr);
429 m_adj(m, sizeof(hdr));
431 #ifdef DIAGNOSTIC
432 if (hdr.type != HCI_ACL_DATA_PKT) {
433 kprintf("%s: bad ACL packet type\n",
434 device_get_nameunit(unit->hci_dev));
435 goto bad;
438 if (m->m_pkthdr.len != letoh16(hdr.length)) {
439 kprintf("%s: bad ACL packet length (%d != %d)\n",
440 device_get_nameunit(unit->hci_dev), m->m_pkthdr.len,
441 letoh16(hdr.length));
442 goto bad;
444 #endif
446 hdr.length = letoh16(hdr.length);
447 hdr.con_handle = letoh16(hdr.con_handle);
448 handle = HCI_CON_HANDLE(hdr.con_handle);
449 pb = HCI_PB_FLAG(hdr.con_handle);
451 link = hci_link_lookup_handle(unit, handle);
452 if (link == NULL) {
453 hci_discon_cp cp;
455 DPRINTF("%s: dumping packet for unknown handle #%d\n",
456 device_get_nameunit(unit->hci_dev), handle);
459 * There is no way to find out what this connection handle is
460 * for, just get rid of it. This may happen, if a USB dongle
461 * is plugged into a self powered hub and does not reset when
462 * the system is shut down.
464 cp.con_handle = htole16(handle);
465 cp.reason = 0x13; /* "Remote User Terminated Connection" */
466 hci_send_cmd(unit, HCI_CMD_DISCONNECT, &cp, sizeof(cp));
467 goto bad;
470 switch (pb) {
471 case HCI_PACKET_START:
472 if (link->hl_rxp != NULL)
473 kprintf("%s: dropped incomplete ACL packet\n",
474 device_get_nameunit(unit->hci_dev));
476 if (m->m_pkthdr.len < sizeof(l2cap_hdr_t)) {
477 kprintf("%s: short ACL packet\n",
478 device_get_nameunit(unit->hci_dev));
480 goto bad;
483 link->hl_rxp = m;
484 got = m->m_pkthdr.len;
485 break;
487 case HCI_PACKET_FRAGMENT:
488 if (link->hl_rxp == NULL) {
489 kprintf("%s: unexpected packet fragment\n",
490 device_get_nameunit(unit->hci_dev));
492 goto bad;
495 got = m->m_pkthdr.len + link->hl_rxp->m_pkthdr.len;
496 m_cat(link->hl_rxp, m);
497 m = link->hl_rxp;
498 m->m_pkthdr.len = got;
499 break;
501 default:
502 kprintf("%s: unknown packet type\n",
503 device_get_nameunit(unit->hci_dev));
505 goto bad;
508 m_copydata(m, 0, sizeof(want), (caddr_t)&want);
509 want = letoh16(want) + sizeof(l2cap_hdr_t) - got;
511 if (want > 0)
512 return;
514 link->hl_rxp = NULL;
516 if (want == 0) {
517 l2cap_recv_frame(m, link);
518 return;
521 bad:
522 m_freem(m);
526 * Send ACL data on link
528 * We must fragment packets into chunks of less than unit->hci_max_acl_size and
529 * prepend a relevant ACL header to each fragment. We keep a PDU structure
530 * attached to the link, so that completed fragments can be marked off and
531 * more data requested from above once the PDU is sent.
534 hci_acl_send(struct mbuf *m, struct hci_link *link,
535 struct l2cap_channel *chan)
537 struct l2cap_pdu *pdu;
538 struct mbuf *n = NULL;
539 int plen, mlen, num = 0;
541 KKASSERT(link != NULL);
542 KKASSERT(m != NULL);
543 KKASSERT(m->m_flags & M_PKTHDR);
544 KKASSERT(m->m_pkthdr.len > 0);
546 if (link->hl_state == HCI_LINK_CLOSED) {
547 m_freem(m);
548 return ENETDOWN;
551 pdu = zalloc(l2cap_pdu_pool);
552 if (pdu == NULL)
553 goto nomem;
555 bzero(pdu, sizeof *pdu);
556 pdu->lp_chan = chan;
557 pdu->lp_pending = 0;
559 plen = m->m_pkthdr.len;
560 mlen = link->hl_unit->hci_max_acl_size;
562 DPRINTFN(5, "%s: handle #%d, plen = %d, max = %d\n",
563 device_get_nameunit(link->hl_unit->hci_dev),
564 link->hl_handle, plen, mlen);
566 while (plen > 0) {
567 if (plen > mlen) {
568 n = m_split(m, mlen, M_NOWAIT);
569 if (n == NULL)
570 goto nomem;
571 } else {
572 mlen = plen;
575 if (num++ == 0)
576 m->m_flags |= M_PROTO1; /* tag first fragment */
578 DPRINTFN(10, "(%s) chunk of %d (plen = %d) bytes\n",
579 device_get_nameunit(link->hl_unit->hci_dev), mlen, plen);
580 IF_ENQUEUE(&pdu->lp_data, m);
581 m = n;
582 plen -= mlen;
585 TAILQ_INSERT_TAIL(&link->hl_txq, pdu, lp_next);
586 link->hl_txqlen += num;
588 hci_acl_start(link);
590 return 0;
592 nomem:
593 if (m) m_freem(m);
594 if (pdu) {
595 IF_DRAIN(&pdu->lp_data);
596 zfree(l2cap_pdu_pool, pdu);
599 return ENOMEM;
603 * Start sending ACL data on link.
605 * This is called when the queue may need restarting: as new data
606 * is queued, after link mode changes have completed, or when device
607 * buffers have cleared.
609 * We may use all the available packet slots. The reason that we add
610 * the ACL encapsulation here rather than in hci_acl_send() is that L2CAP
611 * signal packets may be queued before the handle is given to us..
613 void
614 hci_acl_start(struct hci_link *link)
616 struct hci_unit *unit;
617 hci_acldata_hdr_t *hdr;
618 struct l2cap_pdu *pdu;
619 struct mbuf *m;
620 uint16_t handle;
622 KKASSERT(link != NULL);
624 unit = link->hl_unit;
625 KKASSERT(unit != NULL);
627 /* this is mainly to block ourselves (below) */
628 if (link->hl_state != HCI_LINK_OPEN)
629 return;
631 if (link->hl_txqlen == 0 || unit->hci_num_acl_pkts == 0)
632 return;
634 /* find first PDU with data to send */
635 pdu = TAILQ_FIRST(&link->hl_txq);
636 for (;;) {
637 if (pdu == NULL)
638 return;
640 if (!IF_QEMPTY(&pdu->lp_data))
641 break;
643 pdu = TAILQ_NEXT(pdu, lp_next);
646 while (unit->hci_num_acl_pkts > 0) {
647 IF_DEQUEUE(&pdu->lp_data, m);
648 KKASSERT(m != NULL);
650 if (m->m_flags & M_PROTO1)
651 handle = HCI_MK_CON_HANDLE(link->hl_handle,
652 HCI_PACKET_START, 0);
653 else
654 handle = HCI_MK_CON_HANDLE(link->hl_handle,
655 HCI_PACKET_FRAGMENT, 0);
657 M_PREPEND(m, sizeof(*hdr), M_NOWAIT);
658 if (m == NULL)
659 break;
661 hdr = mtod(m, hci_acldata_hdr_t *);
662 hdr->type = HCI_ACL_DATA_PKT;
663 hdr->con_handle = htole16(handle);
664 hdr->length = htole16(m->m_pkthdr.len - sizeof(*hdr));
666 link->hl_txqlen--;
667 pdu->lp_pending++;
669 hci_output_acl(unit, m);
671 if (IF_QEMPTY(&pdu->lp_data)) {
672 if (pdu->lp_chan) {
674 * This should enable streaming of PDUs - when
675 * we have placed all the fragments on the acl
676 * output queue, we trigger the L2CAP layer to
677 * send us down one more. Use a false state so
678 * we dont run into ourselves coming back from
679 * the future..
681 link->hl_state = HCI_LINK_BLOCK;
682 l2cap_start(pdu->lp_chan);
683 link->hl_state = HCI_LINK_OPEN;
686 pdu = TAILQ_NEXT(pdu, lp_next);
687 if (pdu == NULL)
688 break;
693 * We had our turn now, move to the back of the queue to let
694 * other links have a go at the output buffers..
696 if (TAILQ_NEXT(link, hl_next)) {
697 TAILQ_REMOVE(&unit->hci_links, link, hl_next);
698 TAILQ_INSERT_TAIL(&unit->hci_links, link, hl_next);
703 * Confirm ACL packets cleared from Controller buffers. We scan our PDU
704 * list to clear pending fragments and signal upstream for more data
705 * when a PDU is complete.
707 void
708 hci_acl_complete(struct hci_link *link, int num)
710 struct l2cap_pdu *pdu;
711 struct l2cap_channel *chan;
713 DPRINTFN(5, "(%s) handle #%d (%d)\n",
714 device_get_nameunit(link->hl_unit->hci_dev), link->hl_handle, num);
716 while (num > 0) {
717 pdu = TAILQ_FIRST(&link->hl_txq);
718 if (pdu == NULL) {
719 kprintf("%s: %d packets completed on handle #%x "
720 "but none pending!\n",
721 device_get_nameunit(link->hl_unit->hci_dev),
722 num, link->hl_handle);
723 return;
726 if (num >= pdu->lp_pending) {
727 num -= pdu->lp_pending;
728 pdu->lp_pending = 0;
730 if (IF_QEMPTY(&pdu->lp_data)) {
731 TAILQ_REMOVE(&link->hl_txq, pdu, lp_next);
732 chan = pdu->lp_chan;
733 if (chan != NULL) {
734 chan->lc_pending--;
735 (*chan->lc_proto->complete)
736 (chan->lc_upper, 1);
738 if (chan->lc_pending == 0)
739 l2cap_start(chan);
742 zfree(l2cap_pdu_pool, pdu);
744 } else {
745 pdu->lp_pending -= num;
746 num = 0;
751 /*******************************************************************************
753 * HCI SCO Connections
757 * Incoming SCO Connection. We check the list for anybody willing
758 * to take it.
760 struct hci_link *
761 hci_sco_newconn(struct hci_unit *unit, bdaddr_t *bdaddr)
763 struct sockaddr_bt laddr, raddr;
764 struct sco_pcb *pcb, *new;
765 struct hci_link *sco, *acl;
767 memset(&laddr, 0, sizeof(laddr));
768 laddr.bt_len = sizeof(laddr);
769 laddr.bt_family = AF_BLUETOOTH;
770 bdaddr_copy(&laddr.bt_bdaddr, &unit->hci_bdaddr);
772 memset(&raddr, 0, sizeof(raddr));
773 raddr.bt_len = sizeof(raddr);
774 raddr.bt_family = AF_BLUETOOTH;
775 bdaddr_copy(&raddr.bt_bdaddr, bdaddr);
778 * There should already be an ACL link up and running before
779 * the controller sends us SCO connection requests, but you
780 * never know..
782 acl = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
783 if (acl == NULL || acl->hl_state != HCI_LINK_OPEN)
784 return NULL;
786 LIST_FOREACH(pcb, &sco_pcb, sp_next) {
787 if ((pcb->sp_flags & SP_LISTENING) == 0)
788 continue;
790 new = (*pcb->sp_proto->newconn)(pcb->sp_upper, &laddr, &raddr);
791 if (new == NULL)
792 continue;
795 * Ok, got new pcb so we can start a new link and fill
796 * in all the details.
798 bdaddr_copy(&new->sp_laddr, &unit->hci_bdaddr);
799 bdaddr_copy(&new->sp_raddr, bdaddr);
801 sco = hci_link_alloc(unit);
802 if (sco == NULL) {
803 sco_detach(&new);
804 return NULL;
807 sco->hl_type = HCI_LINK_SCO;
808 bdaddr_copy(&sco->hl_bdaddr, bdaddr);
810 sco->hl_link = hci_acl_open(unit, bdaddr);
811 KKASSERT(sco->hl_link == acl);
813 sco->hl_sco = new;
814 new->sp_link = sco;
816 new->sp_mtu = unit->hci_max_sco_size;
817 return sco;
820 return NULL;
824 * receive SCO packet, we only need to strip the header and send
825 * it to the right handler
827 void
828 hci_sco_recv(struct mbuf *m, struct hci_unit *unit)
830 struct hci_link *link;
831 hci_scodata_hdr_t hdr;
832 uint16_t handle;
834 KKASSERT(m != NULL);
835 KKASSERT(unit != NULL);
837 KKASSERT(m->m_pkthdr.len >= sizeof(hdr));
838 m_copydata(m, 0, sizeof(hdr), (caddr_t)&hdr);
839 m_adj(m, sizeof(hdr));
841 #ifdef DIAGNOSTIC
842 if (hdr.type != HCI_SCO_DATA_PKT) {
843 kprintf("%s: bad SCO packet type\n",
844 device_get_nameunit(unit->hci_dev));
845 goto bad;
848 if (m->m_pkthdr.len != hdr.length) {
849 kprintf("%s: bad SCO packet length (%d != %d)\n",
850 device_get_nameunit(unit->hci_dev), m->m_pkthdr.len,
851 hdr.length);
852 goto bad;
854 #endif
856 hdr.con_handle = letoh16(hdr.con_handle);
857 handle = HCI_CON_HANDLE(hdr.con_handle);
859 link = hci_link_lookup_handle(unit, handle);
860 if (link == NULL || link->hl_type == HCI_LINK_ACL) {
861 DPRINTF("%s: dumping packet for unknown handle #%d\n",
862 device_get_nameunit(unit->hci_dev), handle);
864 goto bad;
867 (*link->hl_sco->sp_proto->input)(link->hl_sco->sp_upper, m);
868 return;
870 bad:
871 m_freem(m);
874 void
875 hci_sco_start(struct hci_link *link)
880 * SCO packets have completed at the controller, so we can
881 * signal up to free the buffer space.
883 void
884 hci_sco_complete(struct hci_link *link, int num)
887 DPRINTFN(5, "handle #%d (num=%d)\n", link->hl_handle, num);
888 link->hl_sco->sp_pending--;
889 (*link->hl_sco->sp_proto->complete)(link->hl_sco->sp_upper, num);
892 /*******************************************************************************
894 * Generic HCI Connection alloc/free/lookup etc
897 struct hci_link *
898 hci_link_alloc(struct hci_unit *unit)
900 struct hci_link *link;
902 KKASSERT(unit != NULL);
904 link = kmalloc(sizeof *link, M_BLUETOOTH, M_NOWAIT | M_ZERO);
905 if (link == NULL)
906 return NULL;
908 link->hl_unit = unit;
909 link->hl_state = HCI_LINK_CLOSED;
911 /* init ACL portion */
912 callout_init(&link->hl_expire);
914 crit_enter();
915 TAILQ_INIT(&link->hl_txq); /* outgoing packets */
916 TAILQ_INIT(&link->hl_reqs); /* request queue */
918 link->hl_mtu = L2CAP_MTU_DEFAULT; /* L2CAP signal mtu */
919 link->hl_flush = L2CAP_FLUSH_TIMO_DEFAULT; /* flush timeout */
921 /* init SCO portion */
922 /* &link->hl_data is already zero-initialized. */
924 /* attach to unit */
925 TAILQ_INSERT_HEAD(&unit->hci_links, link, hl_next);
926 crit_exit();
927 return link;
930 void
931 hci_link_free(struct hci_link *link, int err)
933 struct l2cap_req *req;
934 struct l2cap_pdu *pdu;
935 struct l2cap_channel *chan, *next;
937 KKASSERT(link != NULL);
939 DPRINTF("(%s) #%d, type = %d, state = %d, refcnt = %d\n",
940 device_get_nameunit(link->hl_unit->hci_dev), link->hl_handle,
941 link->hl_type, link->hl_state, link->hl_refcnt);
943 /* ACL reference count */
944 if (link->hl_refcnt > 0) {
945 next = LIST_FIRST(&l2cap_active_list);
946 while ((chan = next) != NULL) {
947 next = LIST_NEXT(chan, lc_ncid);
948 if (chan->lc_link == link)
949 l2cap_close(chan, err);
952 KKASSERT(link->hl_refcnt == 0);
954 /* ACL L2CAP requests.. */
955 while ((req = TAILQ_FIRST(&link->hl_reqs)) != NULL)
956 l2cap_request_free(req);
958 KKASSERT(TAILQ_EMPTY(&link->hl_reqs));
960 /* ACL outgoing data queue */
961 while ((pdu = TAILQ_FIRST(&link->hl_txq)) != NULL) {
962 TAILQ_REMOVE(&link->hl_txq, pdu, lp_next);
963 IF_DRAIN(&pdu->lp_data);
964 if (pdu->lp_pending)
965 link->hl_unit->hci_num_acl_pkts += pdu->lp_pending;
967 zfree(l2cap_pdu_pool, pdu);
970 KKASSERT(TAILQ_EMPTY(&link->hl_txq));
972 /* ACL incoming data packet */
973 if (link->hl_rxp != NULL) {
974 m_freem(link->hl_rxp);
975 link->hl_rxp = NULL;
978 /* SCO master ACL link */
979 if (link->hl_link != NULL) {
980 hci_acl_close(link->hl_link, err);
981 link->hl_link = NULL;
984 /* SCO pcb */
985 if (link->hl_sco != NULL) {
986 struct sco_pcb *pcb;
988 pcb = link->hl_sco;
989 pcb->sp_link = NULL;
990 link->hl_sco = NULL;
991 (*pcb->sp_proto->disconnected)(pcb->sp_upper, err);
994 /* flush any SCO data */
995 crit_enter();
996 IF_DRAIN(&link->hl_data);
997 crit_exit();
1000 * Halt the timeout - if its already running we cannot free the
1001 * link structure but the timeout function will call us back in
1002 * any case.
1004 link->hl_state = HCI_LINK_CLOSED;
1005 callout_stop(&link->hl_expire);
1006 if (callout_active(&link->hl_expire))
1007 return;
1010 * If we made a note of clock offset, keep it in a memo
1011 * to facilitate reconnections to this device
1013 if (link->hl_clock != 0) {
1014 struct hci_memo *memo;
1016 memo = hci_memo_new(link->hl_unit, &link->hl_bdaddr);
1017 if (memo != NULL)
1018 memo->clock_offset = link->hl_clock;
1021 crit_enter();
1022 TAILQ_REMOVE(&link->hl_unit->hci_links, link, hl_next);
1023 crit_exit();
1024 kfree(link, M_BLUETOOTH);
1028 * Lookup HCI link by type and state.
1030 struct hci_link *
1031 hci_link_lookup_state(struct hci_unit *unit, uint16_t type, uint16_t state)
1033 struct hci_link *link;
1035 TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1036 if (link->hl_type == type && link->hl_state == state)
1037 break;
1040 return link;
1044 * Lookup HCI link by address and type. Note that for SCO links there may
1045 * be more than one link per address, so we only return links with no
1046 * handle (ie new links)
1048 struct hci_link *
1049 hci_link_lookup_bdaddr(struct hci_unit *unit, bdaddr_t *bdaddr, uint16_t type)
1051 struct hci_link *link;
1053 KKASSERT(unit != NULL);
1054 KKASSERT(bdaddr != NULL);
1056 TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1057 if (link->hl_type != type)
1058 continue;
1060 if (type == HCI_LINK_SCO && link->hl_handle != 0)
1061 continue;
1063 if (bdaddr_same(&link->hl_bdaddr, bdaddr))
1064 break;
1067 return link;
1070 struct hci_link *
1071 hci_link_lookup_handle(struct hci_unit *unit, uint16_t handle)
1073 struct hci_link *link;
1075 KKASSERT(unit != NULL);
1077 TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1078 if (handle == link->hl_handle)
1079 break;
1082 return link;