2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Daniel Martensson / daniel.martensson@stericsson.com
5 * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2.
9 #include <linux/version.h>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/platform_device.h>
14 #include <linux/netdevice.h>
15 #include <linux/string.h>
16 #include <linux/list.h>
17 #include <linux/interrupt.h>
18 #include <linux/delay.h>
19 #include <linux/sched.h>
20 #include <linux/if_arp.h>
21 #include <linux/timer.h>
22 #include <net/caif/caif_layer.h>
23 #include <net/caif/caif_hsi.h>
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
27 MODULE_DESCRIPTION("CAIF HSI driver");
29 /* Returns the number of padding bytes for alignment. */
30 #define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
31 (((pow)-((x)&((pow)-1)))))
34 * HSI padding options.
35 * Warning: must be a base of 2 (& operation used) and can not be zero !
37 static int hsi_head_align
= 4;
38 module_param(hsi_head_align
, int, S_IRUGO
);
39 MODULE_PARM_DESC(hsi_head_align
, "HSI head alignment.");
41 static int hsi_tail_align
= 4;
42 module_param(hsi_tail_align
, int, S_IRUGO
);
43 MODULE_PARM_DESC(hsi_tail_align
, "HSI tail alignment.");
46 * HSI link layer flowcontrol thresholds.
47 * Warning: A high threshold value migth increase throughput but it will at
48 * the same time prevent channel prioritization and increase the risk of
49 * flooding the modem. The high threshold should be above the low.
51 static int hsi_high_threshold
= 100;
52 module_param(hsi_high_threshold
, int, S_IRUGO
);
53 MODULE_PARM_DESC(hsi_high_threshold
, "HSI high threshold (FLOW OFF).");
55 static int hsi_low_threshold
= 50;
56 module_param(hsi_low_threshold
, int, S_IRUGO
);
57 MODULE_PARM_DESC(hsi_low_threshold
, "HSI high threshold (FLOW ON).");
63 * Threshold values for the HSI packet queue. Flowcontrol will be asserted
64 * when the number of packets exceeds HIGH_WATER_MARK. It will not be
65 * de-asserted before the number of packets drops below LOW_WATER_MARK.
67 #define LOW_WATER_MARK hsi_low_threshold
68 #define HIGH_WATER_MARK hsi_high_threshold
70 static LIST_HEAD(cfhsi_list
);
71 static spinlock_t cfhsi_list_lock
;
73 static void cfhsi_inactivity_tout(unsigned long arg
)
75 struct cfhsi
*cfhsi
= (struct cfhsi
*)arg
;
77 dev_dbg(&cfhsi
->ndev
->dev
, "%s.\n",
80 /* Schedule power down work queue. */
81 if (!test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
82 queue_work(cfhsi
->wq
, &cfhsi
->wake_down_work
);
85 static void cfhsi_abort_tx(struct cfhsi
*cfhsi
)
90 spin_lock_bh(&cfhsi
->lock
);
91 skb
= skb_dequeue(&cfhsi
->qhead
);
95 cfhsi
->ndev
->stats
.tx_errors
++;
96 cfhsi
->ndev
->stats
.tx_dropped
++;
97 spin_unlock_bh(&cfhsi
->lock
);
100 cfhsi
->tx_state
= CFHSI_TX_STATE_IDLE
;
101 if (!test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
102 mod_timer(&cfhsi
->timer
, jiffies
+ CFHSI_INACTIVITY_TOUT
);
103 spin_unlock_bh(&cfhsi
->lock
);
106 static int cfhsi_flush_fifo(struct cfhsi
*cfhsi
)
108 char buffer
[32]; /* Any reasonable value */
109 size_t fifo_occupancy
;
112 dev_dbg(&cfhsi
->ndev
->dev
, "%s.\n",
116 ret
= cfhsi
->dev
->cfhsi_wake_up(cfhsi
->dev
);
118 dev_warn(&cfhsi
->ndev
->dev
,
119 "%s: can't wake up HSI interface: %d.\n",
125 ret
= cfhsi
->dev
->cfhsi_fifo_occupancy(cfhsi
->dev
,
128 dev_warn(&cfhsi
->ndev
->dev
,
129 "%s: can't get FIFO occupancy: %d.\n",
132 } else if (!fifo_occupancy
)
133 /* No more data, exitting normally */
136 fifo_occupancy
= min(sizeof(buffer
), fifo_occupancy
);
137 set_bit(CFHSI_FLUSH_FIFO
, &cfhsi
->bits
);
138 ret
= cfhsi
->dev
->cfhsi_rx(buffer
, fifo_occupancy
,
141 clear_bit(CFHSI_FLUSH_FIFO
, &cfhsi
->bits
);
142 dev_warn(&cfhsi
->ndev
->dev
,
143 "%s: can't read data: %d.\n",
149 wait_event_interruptible_timeout(cfhsi
->flush_fifo_wait
,
150 !test_bit(CFHSI_FLUSH_FIFO
, &cfhsi
->bits
), ret
);
153 dev_warn(&cfhsi
->ndev
->dev
,
154 "%s: can't wait for flush complete: %d.\n",
159 dev_warn(&cfhsi
->ndev
->dev
,
160 "%s: timeout waiting for flush complete.\n",
166 cfhsi
->dev
->cfhsi_wake_down(cfhsi
->dev
);
171 static int cfhsi_tx_frm(struct cfhsi_desc
*desc
, struct cfhsi
*cfhsi
)
176 u8
*pfrm
= desc
->emb_frm
+ CFHSI_MAX_EMB_FRM_SZ
;
178 skb
= skb_dequeue(&cfhsi
->qhead
);
182 /* Check if we can embed a CAIF frame. */
183 if (skb
->len
< CFHSI_MAX_EMB_FRM_SZ
) {
184 struct caif_payload_info
*info
;
188 /* Calculate needed head alignment and tail alignment. */
189 info
= (struct caif_payload_info
*)&skb
->cb
;
191 hpad
= 1 + PAD_POW2((info
->hdr_len
+ 1), hsi_head_align
);
192 tpad
= PAD_POW2((skb
->len
+ hpad
), hsi_tail_align
);
194 /* Check if frame still fits with added alignment. */
195 if ((skb
->len
+ hpad
+ tpad
) <= CFHSI_MAX_EMB_FRM_SZ
) {
196 u8
*pemb
= desc
->emb_frm
;
197 desc
->offset
= CFHSI_DESC_SHORT_SZ
;
198 *pemb
= (u8
)(hpad
- 1);
201 /* Update network statistics. */
202 cfhsi
->ndev
->stats
.tx_packets
++;
203 cfhsi
->ndev
->stats
.tx_bytes
+= skb
->len
;
205 /* Copy in embedded CAIF frame. */
206 skb_copy_bits(skb
, 0, pemb
, skb
->len
);
214 /* Create payload CAIF frames. */
215 pfrm
= desc
->emb_frm
+ CFHSI_MAX_EMB_FRM_SZ
;
216 while (nfrms
< CFHSI_MAX_PKTS
) {
217 struct caif_payload_info
*info
;
222 skb
= skb_dequeue(&cfhsi
->qhead
);
227 /* Calculate needed head alignment and tail alignment. */
228 info
= (struct caif_payload_info
*)&skb
->cb
;
230 hpad
= 1 + PAD_POW2((info
->hdr_len
+ 1), hsi_head_align
);
231 tpad
= PAD_POW2((skb
->len
+ hpad
), hsi_tail_align
);
233 /* Fill in CAIF frame length in descriptor. */
234 desc
->cffrm_len
[nfrms
] = hpad
+ skb
->len
+ tpad
;
236 /* Fill head padding information. */
237 *pfrm
= (u8
)(hpad
- 1);
240 /* Update network statistics. */
241 cfhsi
->ndev
->stats
.tx_packets
++;
242 cfhsi
->ndev
->stats
.tx_bytes
+= skb
->len
;
244 /* Copy in CAIF frame. */
245 skb_copy_bits(skb
, 0, pfrm
, skb
->len
);
247 /* Update payload length. */
248 pld_len
+= desc
->cffrm_len
[nfrms
];
250 /* Update frame pointer. */
251 pfrm
+= skb
->len
+ tpad
;
255 /* Update number of frames. */
259 /* Unused length fields should be zero-filled (according to SPEC). */
260 while (nfrms
< CFHSI_MAX_PKTS
) {
261 desc
->cffrm_len
[nfrms
] = 0x0000;
265 /* Check if we can piggy-back another descriptor. */
266 skb
= skb_peek(&cfhsi
->qhead
);
268 desc
->header
|= CFHSI_PIGGY_DESC
;
270 desc
->header
&= ~CFHSI_PIGGY_DESC
;
272 return CFHSI_DESC_SZ
+ pld_len
;
275 static void cfhsi_tx_done_work(struct work_struct
*work
)
277 struct cfhsi
*cfhsi
= NULL
;
278 struct cfhsi_desc
*desc
= NULL
;
282 cfhsi
= container_of(work
, struct cfhsi
, tx_done_work
);
283 dev_dbg(&cfhsi
->ndev
->dev
, "%s.\n",
286 if (test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
289 desc
= (struct cfhsi_desc
*)cfhsi
->tx_buf
;
293 * Send flow on if flow off has been previously signalled
294 * and number of packets is below low water mark.
296 spin_lock_bh(&cfhsi
->lock
);
297 if (cfhsi
->flow_off_sent
&&
298 cfhsi
->qhead
.qlen
<= cfhsi
->q_low_mark
&&
299 cfhsi
->cfdev
.flowctrl
) {
301 cfhsi
->flow_off_sent
= 0;
302 cfhsi
->cfdev
.flowctrl(cfhsi
->ndev
, ON
);
304 spin_unlock_bh(&cfhsi
->lock
);
306 /* Create HSI frame. */
307 len
= cfhsi_tx_frm(desc
, cfhsi
);
309 cfhsi
->tx_state
= CFHSI_TX_STATE_IDLE
;
310 /* Start inactivity timer. */
311 mod_timer(&cfhsi
->timer
,
312 jiffies
+ CFHSI_INACTIVITY_TOUT
);
316 /* Set up new transfer. */
317 res
= cfhsi
->dev
->cfhsi_tx(cfhsi
->tx_buf
, len
, cfhsi
->dev
);
318 if (WARN_ON(res
< 0)) {
319 dev_err(&cfhsi
->ndev
->dev
, "%s: TX error %d.\n",
325 static void cfhsi_tx_done_cb(struct cfhsi_drv
*drv
)
329 cfhsi
= container_of(drv
, struct cfhsi
, drv
);
330 dev_dbg(&cfhsi
->ndev
->dev
, "%s.\n",
333 if (test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
336 queue_work(cfhsi
->wq
, &cfhsi
->tx_done_work
);
339 static int cfhsi_rx_desc(struct cfhsi_desc
*desc
, struct cfhsi
*cfhsi
)
346 if ((desc
->header
& ~CFHSI_PIGGY_DESC
) ||
347 (desc
->offset
> CFHSI_MAX_EMB_FRM_SZ
)) {
348 dev_err(&cfhsi
->ndev
->dev
, "%s: Invalid descriptor.\n",
353 /* Check for embedded CAIF frame. */
357 int len
= 0, retries
= 0;
358 pfrm
= ((u8
*)desc
) + desc
->offset
;
360 /* Remove offset padding. */
363 /* Read length of CAIF frame (little endian). */
365 len
|= ((*(pfrm
+1)) << 8) & 0xFF00;
366 len
+= 2; /* Add FCS fields. */
369 /* Allocate SKB (OK even in IRQ context). */
370 skb
= alloc_skb(len
+ 1, GFP_KERNEL
);
374 skb
= alloc_skb(len
+ 1, GFP_KERNEL
);
376 printk(KERN_WARNING
"%s: slept for %u "
377 "before getting memory\n",
382 printk(KERN_ERR
"%s: slept for 1HZ and "
383 "did not get memory\n",
385 cfhsi
->ndev
->stats
.rx_dropped
++;
389 caif_assert(skb
!= NULL
);
391 dst
= skb_put(skb
, len
);
392 memcpy(dst
, pfrm
, len
);
394 skb
->protocol
= htons(ETH_P_CAIF
);
395 skb_reset_mac_header(skb
);
396 skb
->dev
= cfhsi
->ndev
;
399 * We are called from a arch specific platform device.
400 * Unfortunately we don't know what context we're
408 /* Update network statistics. */
409 cfhsi
->ndev
->stats
.rx_packets
++;
410 cfhsi
->ndev
->stats
.rx_bytes
+= len
;
414 /* Calculate transfer length. */
415 plen
= desc
->cffrm_len
;
416 while (nfrms
< CFHSI_MAX_PKTS
&& *plen
) {
422 /* Check for piggy-backed descriptor. */
423 if (desc
->header
& CFHSI_PIGGY_DESC
)
424 xfer_sz
+= CFHSI_DESC_SZ
;
427 dev_err(&cfhsi
->ndev
->dev
,
428 "%s: Invalid payload len: %d, ignored.\n",
436 static int cfhsi_rx_pld(struct cfhsi_desc
*desc
, struct cfhsi
*cfhsi
)
443 /* Sanity check header and offset. */
444 if (WARN_ON((desc
->header
& ~CFHSI_PIGGY_DESC
) ||
445 (desc
->offset
> CFHSI_MAX_EMB_FRM_SZ
))) {
446 dev_err(&cfhsi
->ndev
->dev
, "%s: Invalid descriptor.\n",
451 /* Set frame pointer to start of payload. */
452 pfrm
= desc
->emb_frm
+ CFHSI_MAX_EMB_FRM_SZ
;
453 plen
= desc
->cffrm_len
;
454 while (nfrms
< CFHSI_MAX_PKTS
&& *plen
) {
458 int len
= 0, retries
= 0;
460 if (WARN_ON(desc
->cffrm_len
[nfrms
] > CFHSI_MAX_PAYLOAD_SZ
)) {
461 dev_err(&cfhsi
->ndev
->dev
, "%s: Invalid payload.\n",
466 /* CAIF frame starts after head padding. */
467 pcffrm
= pfrm
+ *pfrm
+ 1;
469 /* Read length of CAIF frame (little endian). */
471 len
|= ((*(pcffrm
+ 1)) << 8) & 0xFF00;
472 len
+= 2; /* Add FCS fields. */
474 /* Allocate SKB (OK even in IRQ context). */
475 skb
= alloc_skb(len
+ 1, GFP_KERNEL
);
479 skb
= alloc_skb(len
+ 1, GFP_KERNEL
);
481 printk(KERN_WARNING
"%s: slept for %u "
482 "before getting memory\n",
487 printk(KERN_ERR
"%s: slept for 1HZ "
488 "and did not get memory\n",
490 cfhsi
->ndev
->stats
.rx_dropped
++;
494 caif_assert(skb
!= NULL
);
496 dst
= skb_put(skb
, len
);
497 memcpy(dst
, pcffrm
, len
);
499 skb
->protocol
= htons(ETH_P_CAIF
);
500 skb_reset_mac_header(skb
);
501 skb
->dev
= cfhsi
->ndev
;
504 * We're called from a platform device,
505 * and don't know the context we're running in.
512 /* Update network statistics. */
513 cfhsi
->ndev
->stats
.rx_packets
++;
514 cfhsi
->ndev
->stats
.rx_bytes
+= len
;
526 static void cfhsi_rx_done_work(struct work_struct
*work
)
529 int desc_pld_len
= 0;
530 struct cfhsi
*cfhsi
= NULL
;
531 struct cfhsi_desc
*desc
= NULL
;
533 cfhsi
= container_of(work
, struct cfhsi
, rx_done_work
);
534 desc
= (struct cfhsi_desc
*)cfhsi
->rx_buf
;
536 dev_dbg(&cfhsi
->ndev
->dev
, "%s: Kick timer if pending.\n",
539 if (test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
542 /* Update inactivity timer if pending. */
543 mod_timer_pending(&cfhsi
->timer
, jiffies
+ CFHSI_INACTIVITY_TOUT
);
545 if (cfhsi
->rx_state
== CFHSI_RX_STATE_DESC
) {
546 desc_pld_len
= cfhsi_rx_desc(desc
, cfhsi
);
550 pld_len
= cfhsi_rx_pld(desc
, cfhsi
);
552 if ((pld_len
> 0) && (desc
->header
& CFHSI_PIGGY_DESC
)) {
553 struct cfhsi_desc
*piggy_desc
;
554 piggy_desc
= (struct cfhsi_desc
*)
555 (desc
->emb_frm
+ CFHSI_MAX_EMB_FRM_SZ
+
558 /* Extract piggy-backed descriptor. */
559 desc_pld_len
= cfhsi_rx_desc(piggy_desc
, cfhsi
);
562 * Copy needed information from the piggy-backed
563 * descriptor to the descriptor in the start.
565 memcpy((u8
*)desc
, (u8
*)piggy_desc
,
566 CFHSI_DESC_SHORT_SZ
);
571 cfhsi
->rx_state
= CFHSI_RX_STATE_PAYLOAD
;
572 cfhsi
->rx_ptr
= cfhsi
->rx_buf
+ CFHSI_DESC_SZ
;
573 cfhsi
->rx_len
= desc_pld_len
;
575 cfhsi
->rx_state
= CFHSI_RX_STATE_DESC
;
576 cfhsi
->rx_ptr
= cfhsi
->rx_buf
;
577 cfhsi
->rx_len
= CFHSI_DESC_SZ
;
579 clear_bit(CFHSI_PENDING_RX
, &cfhsi
->bits
);
581 if (test_bit(CFHSI_AWAKE
, &cfhsi
->bits
)) {
582 /* Set up new transfer. */
583 dev_dbg(&cfhsi
->ndev
->dev
, "%s: Start RX.\n",
585 res
= cfhsi
->dev
->cfhsi_rx(cfhsi
->rx_ptr
, cfhsi
->rx_len
,
587 if (WARN_ON(res
< 0)) {
588 dev_err(&cfhsi
->ndev
->dev
, "%s: RX error %d.\n",
590 cfhsi
->ndev
->stats
.rx_errors
++;
591 cfhsi
->ndev
->stats
.rx_dropped
++;
596 static void cfhsi_rx_done_cb(struct cfhsi_drv
*drv
)
600 cfhsi
= container_of(drv
, struct cfhsi
, drv
);
601 dev_dbg(&cfhsi
->ndev
->dev
, "%s.\n",
604 if (test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
607 set_bit(CFHSI_PENDING_RX
, &cfhsi
->bits
);
609 if (test_and_clear_bit(CFHSI_FLUSH_FIFO
, &cfhsi
->bits
))
610 wake_up_interruptible(&cfhsi
->flush_fifo_wait
);
612 queue_work(cfhsi
->wq
, &cfhsi
->rx_done_work
);
615 static void cfhsi_wake_up(struct work_struct
*work
)
617 struct cfhsi
*cfhsi
= NULL
;
622 cfhsi
= container_of(work
, struct cfhsi
, wake_up_work
);
624 if (test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
627 if (unlikely(test_bit(CFHSI_AWAKE
, &cfhsi
->bits
))) {
628 /* It happenes when wakeup is requested by
629 * both ends at the same time. */
630 clear_bit(CFHSI_WAKE_UP
, &cfhsi
->bits
);
634 /* Activate wake line. */
635 cfhsi
->dev
->cfhsi_wake_up(cfhsi
->dev
);
637 dev_dbg(&cfhsi
->ndev
->dev
, "%s: Start waiting.\n",
640 /* Wait for acknowledge. */
641 ret
= CFHSI_WAKEUP_TOUT
;
642 wait_event_interruptible_timeout(cfhsi
->wake_up_wait
,
643 test_bit(CFHSI_WAKE_UP_ACK
,
645 if (unlikely(ret
< 0)) {
646 /* Interrupted by signal. */
647 dev_info(&cfhsi
->ndev
->dev
, "%s: Signalled: %ld.\n",
649 clear_bit(CFHSI_WAKE_UP
, &cfhsi
->bits
);
650 cfhsi
->dev
->cfhsi_wake_down(cfhsi
->dev
);
654 dev_err(&cfhsi
->ndev
->dev
, "%s: Timeout.\n",
656 clear_bit(CFHSI_WAKE_UP
, &cfhsi
->bits
);
657 cfhsi
->dev
->cfhsi_wake_down(cfhsi
->dev
);
660 dev_dbg(&cfhsi
->ndev
->dev
, "%s: Woken.\n",
663 /* Clear power up bit. */
664 set_bit(CFHSI_AWAKE
, &cfhsi
->bits
);
665 clear_bit(CFHSI_WAKE_UP
, &cfhsi
->bits
);
667 /* Resume read operation. */
668 if (!test_bit(CFHSI_PENDING_RX
, &cfhsi
->bits
)) {
669 dev_dbg(&cfhsi
->ndev
->dev
, "%s: Start RX.\n",
671 res
= cfhsi
->dev
->cfhsi_rx(cfhsi
->rx_ptr
,
672 cfhsi
->rx_len
, cfhsi
->dev
);
673 if (WARN_ON(res
< 0)) {
674 dev_err(&cfhsi
->ndev
->dev
, "%s: RX error %d.\n",
679 /* Clear power up acknowledment. */
680 clear_bit(CFHSI_WAKE_UP_ACK
, &cfhsi
->bits
);
682 spin_lock_bh(&cfhsi
->lock
);
684 /* Resume transmit if queue is not empty. */
685 if (!skb_peek(&cfhsi
->qhead
)) {
686 dev_dbg(&cfhsi
->ndev
->dev
, "%s: Peer wake, start timer.\n",
688 /* Start inactivity timer. */
689 mod_timer(&cfhsi
->timer
,
690 jiffies
+ CFHSI_INACTIVITY_TOUT
);
691 spin_unlock_bh(&cfhsi
->lock
);
695 dev_dbg(&cfhsi
->ndev
->dev
, "%s: Host wake.\n",
698 spin_unlock_bh(&cfhsi
->lock
);
700 /* Create HSI frame. */
701 len
= cfhsi_tx_frm((struct cfhsi_desc
*)cfhsi
->tx_buf
, cfhsi
);
703 if (likely(len
> 0)) {
704 /* Set up new transfer. */
705 res
= cfhsi
->dev
->cfhsi_tx(cfhsi
->tx_buf
, len
, cfhsi
->dev
);
706 if (WARN_ON(res
< 0)) {
707 dev_err(&cfhsi
->ndev
->dev
, "%s: TX error %d.\n",
709 cfhsi_abort_tx(cfhsi
);
712 dev_err(&cfhsi
->ndev
->dev
,
713 "%s: Failed to create HSI frame: %d.\n",
719 static void cfhsi_wake_down(struct work_struct
*work
)
722 struct cfhsi
*cfhsi
= NULL
;
723 size_t fifo_occupancy
;
725 cfhsi
= container_of(work
, struct cfhsi
, wake_down_work
);
726 dev_dbg(&cfhsi
->ndev
->dev
, "%s.\n",
729 if (test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
732 /* Check if there is something in FIFO. */
733 if (WARN_ON(cfhsi
->dev
->cfhsi_fifo_occupancy(cfhsi
->dev
,
737 if (fifo_occupancy
) {
738 dev_dbg(&cfhsi
->ndev
->dev
,
739 "%s: %u words in RX FIFO, restart timer.\n",
740 __func__
, (unsigned) fifo_occupancy
);
741 spin_lock_bh(&cfhsi
->lock
);
742 mod_timer(&cfhsi
->timer
,
743 jiffies
+ CFHSI_INACTIVITY_TOUT
);
744 spin_unlock_bh(&cfhsi
->lock
);
748 /* Cancel pending RX requests */
749 cfhsi
->dev
->cfhsi_rx_cancel(cfhsi
->dev
);
751 /* Deactivate wake line. */
752 cfhsi
->dev
->cfhsi_wake_down(cfhsi
->dev
);
754 /* Wait for acknowledge. */
755 ret
= CFHSI_WAKEUP_TOUT
;
756 ret
= wait_event_interruptible_timeout(cfhsi
->wake_down_wait
,
757 test_bit(CFHSI_WAKE_DOWN_ACK
,
761 /* Interrupted by signal. */
762 dev_info(&cfhsi
->ndev
->dev
, "%s: Signalled: %ld.\n",
767 dev_err(&cfhsi
->ndev
->dev
, "%s: Timeout.\n",
771 /* Clear power down acknowledment. */
772 clear_bit(CFHSI_WAKE_DOWN_ACK
, &cfhsi
->bits
);
773 clear_bit(CFHSI_AWAKE
, &cfhsi
->bits
);
775 /* Check if there is something in FIFO. */
776 if (WARN_ON(cfhsi
->dev
->cfhsi_fifo_occupancy(cfhsi
->dev
,
780 if (fifo_occupancy
) {
781 dev_dbg(&cfhsi
->ndev
->dev
,
782 "%s: %u words in RX FIFO, wakeup forced.\n",
783 __func__
, (unsigned) fifo_occupancy
);
784 if (!test_and_set_bit(CFHSI_WAKE_UP
, &cfhsi
->bits
))
785 queue_work(cfhsi
->wq
, &cfhsi
->wake_up_work
);
787 dev_dbg(&cfhsi
->ndev
->dev
, "%s: Done.\n",
791 static void cfhsi_wake_up_cb(struct cfhsi_drv
*drv
)
793 struct cfhsi
*cfhsi
= NULL
;
795 cfhsi
= container_of(drv
, struct cfhsi
, drv
);
796 dev_dbg(&cfhsi
->ndev
->dev
, "%s.\n",
799 set_bit(CFHSI_WAKE_UP_ACK
, &cfhsi
->bits
);
800 wake_up_interruptible(&cfhsi
->wake_up_wait
);
802 if (test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
805 /* Schedule wake up work queue if the peer initiates. */
806 if (!test_and_set_bit(CFHSI_WAKE_UP
, &cfhsi
->bits
))
807 queue_work(cfhsi
->wq
, &cfhsi
->wake_up_work
);
810 static void cfhsi_wake_down_cb(struct cfhsi_drv
*drv
)
812 struct cfhsi
*cfhsi
= NULL
;
814 cfhsi
= container_of(drv
, struct cfhsi
, drv
);
815 dev_dbg(&cfhsi
->ndev
->dev
, "%s.\n",
818 /* Initiating low power is only permitted by the host (us). */
819 set_bit(CFHSI_WAKE_DOWN_ACK
, &cfhsi
->bits
);
820 wake_up_interruptible(&cfhsi
->wake_down_wait
);
823 static int cfhsi_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
825 struct cfhsi
*cfhsi
= NULL
;
832 cfhsi
= netdev_priv(dev
);
834 spin_lock_bh(&cfhsi
->lock
);
836 skb_queue_tail(&cfhsi
->qhead
, skb
);
838 /* Sanity check; xmit should not be called after unregister_netdev */
839 if (WARN_ON(test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))) {
840 spin_unlock_bh(&cfhsi
->lock
);
841 cfhsi_abort_tx(cfhsi
);
845 /* Send flow off if number of packets is above high water mark. */
846 if (!cfhsi
->flow_off_sent
&&
847 cfhsi
->qhead
.qlen
> cfhsi
->q_high_mark
&&
848 cfhsi
->cfdev
.flowctrl
) {
849 cfhsi
->flow_off_sent
= 1;
850 cfhsi
->cfdev
.flowctrl(cfhsi
->ndev
, OFF
);
853 if (cfhsi
->tx_state
== CFHSI_TX_STATE_IDLE
) {
854 cfhsi
->tx_state
= CFHSI_TX_STATE_XFER
;
858 spin_unlock_bh(&cfhsi
->lock
);
863 /* Delete inactivity timer if started. */
865 timer_active
= del_timer_sync(&cfhsi
->timer
);
867 timer_active
= del_timer(&cfhsi
->timer
);
868 #endif /* CONFIG_SMP */
871 struct cfhsi_desc
*desc
= (struct cfhsi_desc
*)cfhsi
->tx_buf
;
875 /* Create HSI frame. */
876 len
= cfhsi_tx_frm(desc
, cfhsi
);
879 /* Set up new transfer. */
880 res
= cfhsi
->dev
->cfhsi_tx(cfhsi
->tx_buf
, len
, cfhsi
->dev
);
881 if (WARN_ON(res
< 0)) {
882 dev_err(&cfhsi
->ndev
->dev
, "%s: TX error %d.\n",
884 cfhsi_abort_tx(cfhsi
);
887 /* Schedule wake up work queue if the we initiate. */
888 if (!test_and_set_bit(CFHSI_WAKE_UP
, &cfhsi
->bits
))
889 queue_work(cfhsi
->wq
, &cfhsi
->wake_up_work
);
895 static int cfhsi_open(struct net_device
*dev
)
897 netif_wake_queue(dev
);
902 static int cfhsi_close(struct net_device
*dev
)
904 netif_stop_queue(dev
);
909 static const struct net_device_ops cfhsi_ops
= {
910 .ndo_open
= cfhsi_open
,
911 .ndo_stop
= cfhsi_close
,
912 .ndo_start_xmit
= cfhsi_xmit
915 static void cfhsi_setup(struct net_device
*dev
)
917 struct cfhsi
*cfhsi
= netdev_priv(dev
);
919 dev
->netdev_ops
= &cfhsi_ops
;
920 dev
->type
= ARPHRD_CAIF
;
921 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
;
922 dev
->mtu
= CFHSI_MAX_PAYLOAD_SZ
;
923 dev
->tx_queue_len
= 0;
924 dev
->destructor
= free_netdev
;
925 skb_queue_head_init(&cfhsi
->qhead
);
926 cfhsi
->cfdev
.link_select
= CAIF_LINK_HIGH_BANDW
;
927 cfhsi
->cfdev
.use_frag
= false;
928 cfhsi
->cfdev
.use_stx
= false;
929 cfhsi
->cfdev
.use_fcs
= false;
933 int cfhsi_probe(struct platform_device
*pdev
)
935 struct cfhsi
*cfhsi
= NULL
;
936 struct net_device
*ndev
;
937 struct cfhsi_dev
*dev
;
940 ndev
= alloc_netdev(sizeof(struct cfhsi
), "cfhsi%d", cfhsi_setup
);
942 dev_err(&pdev
->dev
, "%s: alloc_netdev failed.\n",
947 cfhsi
= netdev_priv(ndev
);
951 /* Initialize state vaiables. */
952 cfhsi
->tx_state
= CFHSI_TX_STATE_IDLE
;
953 cfhsi
->rx_state
= CFHSI_RX_STATE_DESC
;
956 cfhsi
->flow_off_sent
= 0;
957 cfhsi
->q_low_mark
= LOW_WATER_MARK
;
958 cfhsi
->q_high_mark
= HIGH_WATER_MARK
;
960 /* Assign the HSI device. */
961 dev
= (struct cfhsi_dev
*)pdev
->dev
.platform_data
;
964 /* Assign the driver to this HSI device. */
965 dev
->drv
= &cfhsi
->drv
;
968 * Allocate a TX buffer with the size of a HSI packet descriptors
969 * and the necessary room for CAIF payload frames.
971 cfhsi
->tx_buf
= kzalloc(CFHSI_BUF_SZ_TX
, GFP_KERNEL
);
972 if (!cfhsi
->tx_buf
) {
973 dev_err(&ndev
->dev
, "%s: Failed to allocate TX buffer.\n",
980 * Allocate a RX buffer with the size of two HSI packet descriptors and
981 * the necessary room for CAIF payload frames.
983 cfhsi
->rx_buf
= kzalloc(CFHSI_BUF_SZ_RX
, GFP_KERNEL
);
984 if (!cfhsi
->rx_buf
) {
985 dev_err(&ndev
->dev
, "%s: Failed to allocate RX buffer.\n",
991 /* Initialize recieve vaiables. */
992 cfhsi
->rx_ptr
= cfhsi
->rx_buf
;
993 cfhsi
->rx_len
= CFHSI_DESC_SZ
;
995 /* Initialize spin locks. */
996 spin_lock_init(&cfhsi
->lock
);
998 /* Set up the driver. */
999 cfhsi
->drv
.tx_done_cb
= cfhsi_tx_done_cb
;
1000 cfhsi
->drv
.rx_done_cb
= cfhsi_rx_done_cb
;
1002 /* Initialize the work queues. */
1003 INIT_WORK(&cfhsi
->wake_up_work
, cfhsi_wake_up
);
1004 INIT_WORK(&cfhsi
->wake_down_work
, cfhsi_wake_down
);
1005 INIT_WORK(&cfhsi
->rx_done_work
, cfhsi_rx_done_work
);
1006 INIT_WORK(&cfhsi
->tx_done_work
, cfhsi_tx_done_work
);
1008 /* Clear all bit fields. */
1009 clear_bit(CFHSI_WAKE_UP_ACK
, &cfhsi
->bits
);
1010 clear_bit(CFHSI_WAKE_DOWN_ACK
, &cfhsi
->bits
);
1011 clear_bit(CFHSI_WAKE_UP
, &cfhsi
->bits
);
1012 clear_bit(CFHSI_AWAKE
, &cfhsi
->bits
);
1013 clear_bit(CFHSI_PENDING_RX
, &cfhsi
->bits
);
1015 /* Create work thread. */
1016 cfhsi
->wq
= create_singlethread_workqueue(pdev
->name
);
1018 dev_err(&ndev
->dev
, "%s: Failed to create work queue.\n",
1024 /* Initialize wait queues. */
1025 init_waitqueue_head(&cfhsi
->wake_up_wait
);
1026 init_waitqueue_head(&cfhsi
->wake_down_wait
);
1027 init_waitqueue_head(&cfhsi
->flush_fifo_wait
);
1029 /* Setup the inactivity timer. */
1030 init_timer(&cfhsi
->timer
);
1031 cfhsi
->timer
.data
= (unsigned long)cfhsi
;
1032 cfhsi
->timer
.function
= cfhsi_inactivity_tout
;
1034 /* Add CAIF HSI device to list. */
1035 spin_lock(&cfhsi_list_lock
);
1036 list_add_tail(&cfhsi
->list
, &cfhsi_list
);
1037 spin_unlock(&cfhsi_list_lock
);
1039 /* Activate HSI interface. */
1040 res
= cfhsi
->dev
->cfhsi_up(cfhsi
->dev
);
1042 dev_err(&cfhsi
->ndev
->dev
,
1043 "%s: can't activate HSI interface: %d.\n",
1049 res
= cfhsi_flush_fifo(cfhsi
);
1051 dev_err(&ndev
->dev
, "%s: Can't flush FIFO: %d.\n",
1056 cfhsi
->drv
.wake_up_cb
= cfhsi_wake_up_cb
;
1057 cfhsi
->drv
.wake_down_cb
= cfhsi_wake_down_cb
;
1059 /* Register network device. */
1060 res
= register_netdev(ndev
);
1062 dev_err(&ndev
->dev
, "%s: Registration error: %d.\n",
1067 netif_stop_queue(ndev
);
1072 cfhsi
->dev
->cfhsi_down(cfhsi
->dev
);
1074 destroy_workqueue(cfhsi
->wq
);
1076 kfree(cfhsi
->rx_buf
);
1078 kfree(cfhsi
->tx_buf
);
1085 static void cfhsi_shutdown(struct cfhsi
*cfhsi
, bool remove_platform_dev
)
1087 u8
*tx_buf
, *rx_buf
;
1090 netif_tx_stop_all_queues(cfhsi
->ndev
);
1092 /* going to shutdown driver */
1093 set_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
);
1095 if (remove_platform_dev
) {
1096 /* Flush workqueue */
1097 flush_workqueue(cfhsi
->wq
);
1099 /* Notify device. */
1100 platform_device_unregister(cfhsi
->pdev
);
1103 /* Flush workqueue */
1104 flush_workqueue(cfhsi
->wq
);
1106 /* Delete timer if pending */
1108 del_timer_sync(&cfhsi
->timer
);
1110 del_timer(&cfhsi
->timer
);
1111 #endif /* CONFIG_SMP */
1113 /* Cancel pending RX request (if any) */
1114 cfhsi
->dev
->cfhsi_rx_cancel(cfhsi
->dev
);
1116 /* Flush again and destroy workqueue */
1117 destroy_workqueue(cfhsi
->wq
);
1119 /* Store bufferes: will be freed later. */
1120 tx_buf
= cfhsi
->tx_buf
;
1121 rx_buf
= cfhsi
->rx_buf
;
1123 /* Flush transmit queues. */
1124 cfhsi_abort_tx(cfhsi
);
1126 /* Deactivate interface */
1127 cfhsi
->dev
->cfhsi_down(cfhsi
->dev
);
1129 /* Finally unregister the network device. */
1130 unregister_netdev(cfhsi
->ndev
);
1137 int cfhsi_remove(struct platform_device
*pdev
)
1139 struct list_head
*list_node
;
1140 struct list_head
*n
;
1141 struct cfhsi
*cfhsi
= NULL
;
1142 struct cfhsi_dev
*dev
;
1144 dev
= (struct cfhsi_dev
*)pdev
->dev
.platform_data
;
1145 spin_lock(&cfhsi_list_lock
);
1146 list_for_each_safe(list_node
, n
, &cfhsi_list
) {
1147 cfhsi
= list_entry(list_node
, struct cfhsi
, list
);
1148 /* Find the corresponding device. */
1149 if (cfhsi
->dev
== dev
) {
1150 /* Remove from list. */
1151 list_del(list_node
);
1152 spin_unlock(&cfhsi_list_lock
);
1154 /* Shutdown driver. */
1155 cfhsi_shutdown(cfhsi
, false);
1160 spin_unlock(&cfhsi_list_lock
);
1164 struct platform_driver cfhsi_plat_drv
= {
1165 .probe
= cfhsi_probe
,
1166 .remove
= cfhsi_remove
,
1169 .owner
= THIS_MODULE
,
1173 static void __exit
cfhsi_exit_module(void)
1175 struct list_head
*list_node
;
1176 struct list_head
*n
;
1177 struct cfhsi
*cfhsi
= NULL
;
1179 spin_lock(&cfhsi_list_lock
);
1180 list_for_each_safe(list_node
, n
, &cfhsi_list
) {
1181 cfhsi
= list_entry(list_node
, struct cfhsi
, list
);
1183 /* Remove from list. */
1184 list_del(list_node
);
1185 spin_unlock(&cfhsi_list_lock
);
1187 /* Shutdown driver. */
1188 cfhsi_shutdown(cfhsi
, true);
1190 spin_lock(&cfhsi_list_lock
);
1192 spin_unlock(&cfhsi_list_lock
);
1194 /* Unregister platform driver. */
1195 platform_driver_unregister(&cfhsi_plat_drv
);
1198 static int __init
cfhsi_init_module(void)
1202 /* Initialize spin lock. */
1203 spin_lock_init(&cfhsi_list_lock
);
1205 /* Register platform driver. */
1206 result
= platform_driver_register(&cfhsi_plat_drv
);
1208 printk(KERN_ERR
"Could not register platform HSI driver: %d.\n",
1210 goto err_dev_register
;
1219 module_init(cfhsi_init_module
);
1220 module_exit(cfhsi_exit_module
);