3 * Bluetooth HCI Three-wire UART driver
5 * Copyright (C) 2012 Intel Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/kernel.h>
25 #include <linux/errno.h>
26 #include <linux/skbuff.h>
28 #include <net/bluetooth/bluetooth.h>
29 #include <net/bluetooth/hci_core.h>
33 #define HCI_3WIRE_ACK_PKT 0
34 #define HCI_3WIRE_LINK_PKT 15
36 /* Sliding window size */
37 #define H5_TX_WIN_MAX 4
39 #define H5_ACK_TIMEOUT msecs_to_jiffies(250)
40 #define H5_SYNC_TIMEOUT msecs_to_jiffies(100)
43 * Maximum Three-wire packet:
44 * 4 byte header + max value for 12-bit length + 2 bytes for CRC
46 #define H5_MAX_LEN (4 + 0xfff + 2)
48 /* Convenience macros for reading Three-wire header values */
49 #define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07)
50 #define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07)
51 #define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
52 #define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
53 #define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
54 #define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
56 #define SLIP_DELIMITER 0xc0
58 #define SLIP_ESC_DELIM 0xdc
59 #define SLIP_ESC_ESC 0xdd
63 H5_RX_ESC
, /* SLIP escape mode */
64 H5_TX_ACK_REQ
, /* Pending ack to send */
68 struct sk_buff_head unack
; /* Unack'ed packets queue */
69 struct sk_buff_head rel
; /* Reliable packets queue */
70 struct sk_buff_head unrel
; /* Unreliable packets queue */
74 struct sk_buff
*rx_skb
; /* Receive buffer */
75 size_t rx_pending
; /* Expecting more bytes */
76 u8 rx_ack
; /* Last ack number received */
78 int (*rx_func
)(struct hci_uart
*hu
, u8 c
);
80 struct timer_list timer
; /* Retransmission timer */
82 u8 tx_seq
; /* Next seq number to send */
83 u8 tx_ack
; /* Next ack number to send */
84 u8 tx_win
; /* Sliding window size */
99 static void h5_reset_rx(struct h5
*h5
);
101 static void h5_link_control(struct hci_uart
*hu
, const void *data
, size_t len
)
103 struct h5
*h5
= hu
->priv
;
104 struct sk_buff
*nskb
;
106 nskb
= alloc_skb(3, GFP_ATOMIC
);
110 hci_skb_pkt_type(nskb
) = HCI_3WIRE_LINK_PKT
;
112 skb_put_data(nskb
, data
, len
);
114 skb_queue_tail(&h5
->unrel
, nskb
);
117 static u8
h5_cfg_field(struct h5
*h5
)
119 /* Sliding window size (first 3 bits) */
120 return h5
->tx_win
& 0x07;
123 static void h5_timed_event(unsigned long arg
)
125 const unsigned char sync_req
[] = { 0x01, 0x7e };
126 unsigned char conf_req
[3] = { 0x03, 0xfc };
127 struct hci_uart
*hu
= (struct hci_uart
*)arg
;
128 struct h5
*h5
= hu
->priv
;
132 BT_DBG("%s", hu
->hdev
->name
);
134 if (h5
->state
== H5_UNINITIALIZED
)
135 h5_link_control(hu
, sync_req
, sizeof(sync_req
));
137 if (h5
->state
== H5_INITIALIZED
) {
138 conf_req
[2] = h5_cfg_field(h5
);
139 h5_link_control(hu
, conf_req
, sizeof(conf_req
));
142 if (h5
->state
!= H5_ACTIVE
) {
143 mod_timer(&h5
->timer
, jiffies
+ H5_SYNC_TIMEOUT
);
147 if (h5
->sleep
!= H5_AWAKE
) {
148 h5
->sleep
= H5_SLEEPING
;
152 BT_DBG("hu %p retransmitting %u pkts", hu
, h5
->unack
.qlen
);
154 spin_lock_irqsave_nested(&h5
->unack
.lock
, flags
, SINGLE_DEPTH_NESTING
);
156 while ((skb
= __skb_dequeue_tail(&h5
->unack
)) != NULL
) {
157 h5
->tx_seq
= (h5
->tx_seq
- 1) & 0x07;
158 skb_queue_head(&h5
->rel
, skb
);
161 spin_unlock_irqrestore(&h5
->unack
.lock
, flags
);
164 hci_uart_tx_wakeup(hu
);
167 static void h5_peer_reset(struct hci_uart
*hu
)
169 struct h5
*h5
= hu
->priv
;
171 BT_ERR("Peer device has reset");
173 h5
->state
= H5_UNINITIALIZED
;
175 del_timer(&h5
->timer
);
177 skb_queue_purge(&h5
->rel
);
178 skb_queue_purge(&h5
->unrel
);
179 skb_queue_purge(&h5
->unack
);
184 /* Send reset request to upper stack */
185 hci_reset_dev(hu
->hdev
);
188 static int h5_open(struct hci_uart
*hu
)
191 const unsigned char sync
[] = { 0x01, 0x7e };
195 h5
= kzalloc(sizeof(*h5
), GFP_KERNEL
);
201 skb_queue_head_init(&h5
->unack
);
202 skb_queue_head_init(&h5
->rel
);
203 skb_queue_head_init(&h5
->unrel
);
207 setup_timer(&h5
->timer
, h5_timed_event
, (unsigned long)hu
);
209 h5
->tx_win
= H5_TX_WIN_MAX
;
211 set_bit(HCI_UART_INIT_PENDING
, &hu
->hdev_flags
);
213 /* Send initial sync request */
214 h5_link_control(hu
, sync
, sizeof(sync
));
215 mod_timer(&h5
->timer
, jiffies
+ H5_SYNC_TIMEOUT
);
220 static int h5_close(struct hci_uart
*hu
)
222 struct h5
*h5
= hu
->priv
;
224 del_timer_sync(&h5
->timer
);
226 skb_queue_purge(&h5
->unack
);
227 skb_queue_purge(&h5
->rel
);
228 skb_queue_purge(&h5
->unrel
);
235 static void h5_pkt_cull(struct h5
*h5
)
237 struct sk_buff
*skb
, *tmp
;
242 spin_lock_irqsave(&h5
->unack
.lock
, flags
);
244 to_remove
= skb_queue_len(&h5
->unack
);
250 while (to_remove
> 0) {
251 if (h5
->rx_ack
== seq
)
255 seq
= (seq
- 1) & 0x07;
258 if (seq
!= h5
->rx_ack
)
259 BT_ERR("Controller acked invalid packet");
262 skb_queue_walk_safe(&h5
->unack
, skb
, tmp
) {
263 if (i
++ >= to_remove
)
266 __skb_unlink(skb
, &h5
->unack
);
270 if (skb_queue_empty(&h5
->unack
))
271 del_timer(&h5
->timer
);
274 spin_unlock_irqrestore(&h5
->unack
.lock
, flags
);
277 static void h5_handle_internal_rx(struct hci_uart
*hu
)
279 struct h5
*h5
= hu
->priv
;
280 const unsigned char sync_req
[] = { 0x01, 0x7e };
281 const unsigned char sync_rsp
[] = { 0x02, 0x7d };
282 unsigned char conf_req
[3] = { 0x03, 0xfc };
283 const unsigned char conf_rsp
[] = { 0x04, 0x7b };
284 const unsigned char wakeup_req
[] = { 0x05, 0xfa };
285 const unsigned char woken_req
[] = { 0x06, 0xf9 };
286 const unsigned char sleep_req
[] = { 0x07, 0x78 };
287 const unsigned char *hdr
= h5
->rx_skb
->data
;
288 const unsigned char *data
= &h5
->rx_skb
->data
[4];
290 BT_DBG("%s", hu
->hdev
->name
);
292 if (H5_HDR_PKT_TYPE(hdr
) != HCI_3WIRE_LINK_PKT
)
295 if (H5_HDR_LEN(hdr
) < 2)
298 conf_req
[2] = h5_cfg_field(h5
);
300 if (memcmp(data
, sync_req
, 2) == 0) {
301 if (h5
->state
== H5_ACTIVE
)
303 h5_link_control(hu
, sync_rsp
, 2);
304 } else if (memcmp(data
, sync_rsp
, 2) == 0) {
305 if (h5
->state
== H5_ACTIVE
)
307 h5
->state
= H5_INITIALIZED
;
308 h5_link_control(hu
, conf_req
, 3);
309 } else if (memcmp(data
, conf_req
, 2) == 0) {
310 h5_link_control(hu
, conf_rsp
, 2);
311 h5_link_control(hu
, conf_req
, 3);
312 } else if (memcmp(data
, conf_rsp
, 2) == 0) {
313 if (H5_HDR_LEN(hdr
) > 2)
314 h5
->tx_win
= (data
[2] & 0x07);
315 BT_DBG("Three-wire init complete. tx_win %u", h5
->tx_win
);
316 h5
->state
= H5_ACTIVE
;
317 hci_uart_init_ready(hu
);
319 } else if (memcmp(data
, sleep_req
, 2) == 0) {
320 BT_DBG("Peer went to sleep");
321 h5
->sleep
= H5_SLEEPING
;
323 } else if (memcmp(data
, woken_req
, 2) == 0) {
324 BT_DBG("Peer woke up");
325 h5
->sleep
= H5_AWAKE
;
326 } else if (memcmp(data
, wakeup_req
, 2) == 0) {
327 BT_DBG("Peer requested wakeup");
328 h5_link_control(hu
, woken_req
, 2);
329 h5
->sleep
= H5_AWAKE
;
331 BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data
[0], data
[1]);
335 hci_uart_tx_wakeup(hu
);
338 static void h5_complete_rx_pkt(struct hci_uart
*hu
)
340 struct h5
*h5
= hu
->priv
;
341 const unsigned char *hdr
= h5
->rx_skb
->data
;
343 if (H5_HDR_RELIABLE(hdr
)) {
344 h5
->tx_ack
= (h5
->tx_ack
+ 1) % 8;
345 set_bit(H5_TX_ACK_REQ
, &h5
->flags
);
346 hci_uart_tx_wakeup(hu
);
349 h5
->rx_ack
= H5_HDR_ACK(hdr
);
353 switch (H5_HDR_PKT_TYPE(hdr
)) {
355 case HCI_ACLDATA_PKT
:
356 case HCI_SCODATA_PKT
:
357 hci_skb_pkt_type(h5
->rx_skb
) = H5_HDR_PKT_TYPE(hdr
);
359 /* Remove Three-wire header */
360 skb_pull(h5
->rx_skb
, 4);
362 hci_recv_frame(hu
->hdev
, h5
->rx_skb
);
368 h5_handle_internal_rx(hu
);
375 static int h5_rx_crc(struct hci_uart
*hu
, unsigned char c
)
377 h5_complete_rx_pkt(hu
);
382 static int h5_rx_payload(struct hci_uart
*hu
, unsigned char c
)
384 struct h5
*h5
= hu
->priv
;
385 const unsigned char *hdr
= h5
->rx_skb
->data
;
387 if (H5_HDR_CRC(hdr
)) {
388 h5
->rx_func
= h5_rx_crc
;
391 h5_complete_rx_pkt(hu
);
397 static int h5_rx_3wire_hdr(struct hci_uart
*hu
, unsigned char c
)
399 struct h5
*h5
= hu
->priv
;
400 const unsigned char *hdr
= h5
->rx_skb
->data
;
402 BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
403 hu
->hdev
->name
, H5_HDR_SEQ(hdr
), H5_HDR_ACK(hdr
),
404 H5_HDR_CRC(hdr
), H5_HDR_RELIABLE(hdr
), H5_HDR_PKT_TYPE(hdr
),
407 if (((hdr
[0] + hdr
[1] + hdr
[2] + hdr
[3]) & 0xff) != 0xff) {
408 BT_ERR("Invalid header checksum");
413 if (H5_HDR_RELIABLE(hdr
) && H5_HDR_SEQ(hdr
) != h5
->tx_ack
) {
414 BT_ERR("Out-of-order packet arrived (%u != %u)",
415 H5_HDR_SEQ(hdr
), h5
->tx_ack
);
420 if (h5
->state
!= H5_ACTIVE
&&
421 H5_HDR_PKT_TYPE(hdr
) != HCI_3WIRE_LINK_PKT
) {
422 BT_ERR("Non-link packet received in non-active state");
427 h5
->rx_func
= h5_rx_payload
;
428 h5
->rx_pending
= H5_HDR_LEN(hdr
);
433 static int h5_rx_pkt_start(struct hci_uart
*hu
, unsigned char c
)
435 struct h5
*h5
= hu
->priv
;
437 if (c
== SLIP_DELIMITER
)
440 h5
->rx_func
= h5_rx_3wire_hdr
;
443 h5
->rx_skb
= bt_skb_alloc(H5_MAX_LEN
, GFP_ATOMIC
);
445 BT_ERR("Can't allocate mem for new packet");
450 h5
->rx_skb
->dev
= (void *)hu
->hdev
;
455 static int h5_rx_delimiter(struct hci_uart
*hu
, unsigned char c
)
457 struct h5
*h5
= hu
->priv
;
459 if (c
== SLIP_DELIMITER
)
460 h5
->rx_func
= h5_rx_pkt_start
;
465 static void h5_unslip_one_byte(struct h5
*h5
, unsigned char c
)
467 const u8 delim
= SLIP_DELIMITER
, esc
= SLIP_ESC
;
470 if (!test_bit(H5_RX_ESC
, &h5
->flags
) && c
== SLIP_ESC
) {
471 set_bit(H5_RX_ESC
, &h5
->flags
);
475 if (test_and_clear_bit(H5_RX_ESC
, &h5
->flags
)) {
484 BT_ERR("Invalid esc byte 0x%02hhx", c
);
490 skb_put_data(h5
->rx_skb
, byte
, 1);
493 BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte
, h5
->rx_pending
);
496 static void h5_reset_rx(struct h5
*h5
)
499 kfree_skb(h5
->rx_skb
);
503 h5
->rx_func
= h5_rx_delimiter
;
505 clear_bit(H5_RX_ESC
, &h5
->flags
);
508 static int h5_recv(struct hci_uart
*hu
, const void *data
, int count
)
510 struct h5
*h5
= hu
->priv
;
511 const unsigned char *ptr
= data
;
513 BT_DBG("%s pending %zu count %d", hu
->hdev
->name
, h5
->rx_pending
,
519 if (h5
->rx_pending
> 0) {
520 if (*ptr
== SLIP_DELIMITER
) {
521 BT_ERR("Too short H5 packet");
526 h5_unslip_one_byte(h5
, *ptr
);
532 processed
= h5
->rx_func(hu
, *ptr
);
543 static int h5_enqueue(struct hci_uart
*hu
, struct sk_buff
*skb
)
545 struct h5
*h5
= hu
->priv
;
547 if (skb
->len
> 0xfff) {
548 BT_ERR("Packet too long (%u bytes)", skb
->len
);
553 if (h5
->state
!= H5_ACTIVE
) {
554 BT_ERR("Ignoring HCI data in non-active state");
559 switch (hci_skb_pkt_type(skb
)) {
560 case HCI_ACLDATA_PKT
:
561 case HCI_COMMAND_PKT
:
562 skb_queue_tail(&h5
->rel
, skb
);
565 case HCI_SCODATA_PKT
:
566 skb_queue_tail(&h5
->unrel
, skb
);
570 BT_ERR("Unknown packet type %u", hci_skb_pkt_type(skb
));
578 static void h5_slip_delim(struct sk_buff
*skb
)
580 const char delim
= SLIP_DELIMITER
;
582 skb_put_data(skb
, &delim
, 1);
585 static void h5_slip_one_byte(struct sk_buff
*skb
, u8 c
)
587 const char esc_delim
[2] = { SLIP_ESC
, SLIP_ESC_DELIM
};
588 const char esc_esc
[2] = { SLIP_ESC
, SLIP_ESC_ESC
};
592 skb_put_data(skb
, &esc_delim
, 2);
595 skb_put_data(skb
, &esc_esc
, 2);
598 skb_put_data(skb
, &c
, 1);
602 static bool valid_packet_type(u8 type
)
605 case HCI_ACLDATA_PKT
:
606 case HCI_COMMAND_PKT
:
607 case HCI_SCODATA_PKT
:
608 case HCI_3WIRE_LINK_PKT
:
609 case HCI_3WIRE_ACK_PKT
:
616 static struct sk_buff
*h5_prepare_pkt(struct hci_uart
*hu
, u8 pkt_type
,
617 const u8
*data
, size_t len
)
619 struct h5
*h5
= hu
->priv
;
620 struct sk_buff
*nskb
;
624 if (!valid_packet_type(pkt_type
)) {
625 BT_ERR("Unknown packet type %u", pkt_type
);
630 * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
631 * (because bytes 0xc0 and 0xdb are escaped, worst case is when
632 * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
633 * delimiters at start and end).
635 nskb
= alloc_skb((len
+ 6) * 2 + 2, GFP_ATOMIC
);
639 hci_skb_pkt_type(nskb
) = pkt_type
;
643 hdr
[0] = h5
->tx_ack
<< 3;
644 clear_bit(H5_TX_ACK_REQ
, &h5
->flags
);
646 /* Reliable packet? */
647 if (pkt_type
== HCI_ACLDATA_PKT
|| pkt_type
== HCI_COMMAND_PKT
) {
649 hdr
[0] |= h5
->tx_seq
;
650 h5
->tx_seq
= (h5
->tx_seq
+ 1) % 8;
653 hdr
[1] = pkt_type
| ((len
& 0x0f) << 4);
655 hdr
[3] = ~((hdr
[0] + hdr
[1] + hdr
[2]) & 0xff);
657 BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
658 hu
->hdev
->name
, H5_HDR_SEQ(hdr
), H5_HDR_ACK(hdr
),
659 H5_HDR_CRC(hdr
), H5_HDR_RELIABLE(hdr
), H5_HDR_PKT_TYPE(hdr
),
662 for (i
= 0; i
< 4; i
++)
663 h5_slip_one_byte(nskb
, hdr
[i
]);
665 for (i
= 0; i
< len
; i
++)
666 h5_slip_one_byte(nskb
, data
[i
]);
673 static struct sk_buff
*h5_dequeue(struct hci_uart
*hu
)
675 struct h5
*h5
= hu
->priv
;
677 struct sk_buff
*skb
, *nskb
;
679 if (h5
->sleep
!= H5_AWAKE
) {
680 const unsigned char wakeup_req
[] = { 0x05, 0xfa };
682 if (h5
->sleep
== H5_WAKING_UP
)
685 h5
->sleep
= H5_WAKING_UP
;
686 BT_DBG("Sending wakeup request");
688 mod_timer(&h5
->timer
, jiffies
+ HZ
/ 100);
689 return h5_prepare_pkt(hu
, HCI_3WIRE_LINK_PKT
, wakeup_req
, 2);
692 skb
= skb_dequeue(&h5
->unrel
);
694 nskb
= h5_prepare_pkt(hu
, hci_skb_pkt_type(skb
),
695 skb
->data
, skb
->len
);
701 skb_queue_head(&h5
->unrel
, skb
);
702 BT_ERR("Could not dequeue pkt because alloc_skb failed");
705 spin_lock_irqsave_nested(&h5
->unack
.lock
, flags
, SINGLE_DEPTH_NESTING
);
707 if (h5
->unack
.qlen
>= h5
->tx_win
)
710 skb
= skb_dequeue(&h5
->rel
);
712 nskb
= h5_prepare_pkt(hu
, hci_skb_pkt_type(skb
),
713 skb
->data
, skb
->len
);
715 __skb_queue_tail(&h5
->unack
, skb
);
716 mod_timer(&h5
->timer
, jiffies
+ H5_ACK_TIMEOUT
);
717 spin_unlock_irqrestore(&h5
->unack
.lock
, flags
);
721 skb_queue_head(&h5
->rel
, skb
);
722 BT_ERR("Could not dequeue pkt because alloc_skb failed");
726 spin_unlock_irqrestore(&h5
->unack
.lock
, flags
);
728 if (test_bit(H5_TX_ACK_REQ
, &h5
->flags
))
729 return h5_prepare_pkt(hu
, HCI_3WIRE_ACK_PKT
, NULL
, 0);
734 static int h5_flush(struct hci_uart
*hu
)
740 static const struct hci_uart_proto h5p
= {
741 .id
= HCI_UART_3WIRE
,
742 .name
= "Three-wire (H5)",
746 .enqueue
= h5_enqueue
,
747 .dequeue
= h5_dequeue
,
751 int __init
h5_init(void)
753 return hci_uart_register_proto(&h5p
);
756 int __exit
h5_deinit(void)
758 return hci_uart_unregister_proto(&h5p
);