2 * drivers/net/wan/dscc4/dscc4.c: a DSCC4 HDLC driver for Linux
4 * This software may be used and distributed according to the terms of the
5 * GNU General Public License.
7 * The author may be reached as romieu@cogenit.fr.
8 * Specific bug reports/asian food will be welcome.
10 * Special thanks to the nice people at CS-Telecom for the hardware and the
11 * access to the test/measure tools.
16 * I. Board Compatibility
18 * This device driver is designed for the Siemens PEB20534 4 ports serial
19 * controller as found on Etinc PCISYNC cards. The documentation for the
20 * chipset is available at http://www.infineon.com:
21 * - Data Sheet "DSCC4, DMA Supported Serial Communication Controller with
22 * 4 Channels, PEB 20534 Version 2.1, PEF 20534 Version 2.1";
23 * - Application Hint "Management of DSCC4 on-chip FIFO resources".
24 * - Errata sheet DS5 (courtesy of Michael Skerritt).
25 * Jens David has built an adapter based on the same chipset. Take a look
26 * at http://www.afthd.tu-darmstadt.de/~dg1kjd/pciscc4 for a specific
28 * Sample code (2 revisions) is available at Infineon.
30 * II. Board-specific settings
32 * Pcisync can transmit some clock signal to the outside world on the
33 * *first two* ports provided you put a quartz and a line driver on it and
34 * remove the jumpers. The operation is described on Etinc web site. If you
35 * go DCE on these ports, don't forget to use an adequate cable.
37 * Sharing of the PCI interrupt line for this board is possible.
39 * III. Driver operation
41 * The rx/tx operations are based on a linked list of descriptors. The driver
42 * doesn't use HOLD mode any more. HOLD mode is definitely buggy and the more
43 * I tried to fix it, the more it started to look like (convoluted) software
44 * mutation of LxDA method. Errata sheet DS5 suggests to use LxDA: consider
45 * this a rfc2119 MUST.
48 * When the tx ring is full, the xmit routine issues a call to netdev_stop.
49 * The device is supposed to be enabled again during an ALLS irq (we could
50 * use HI but as it's easy to lose events, it's fscked).
53 * The received frames aren't supposed to span over multiple receiving areas.
54 * I may implement it some day but it isn't the highest ranked item.
57 * The current error (XDU, RFO) recovery code is untested.
58 * So far, RDO takes his RX channel down and the right sequence to enable it
59 * again is still a mistery. If RDO happens, plan a reboot. More details
60 * in the code (NB: as this happens, TX still works).
61 * Don't mess the cables during operation, especially on DTE ports. I don't
62 * suggest it for DCE either but at least one can get some messages instead
63 * of a complete instant freeze.
64 * Tests are done on Rev. 20 of the silicium. The RDO handling changes with
65 * the documentation/chipset releases.
69 * - use polling at high irq/s,
70 * - performance analysis,
73 * 2001/12/10 Daniela Squassoni <daniela@cyclades.com>
74 * - Contribution to support the new generic HDLC layer.
77 * - old style interface removal
78 * - dscc4_release_ring fix (related to DMA mapping)
79 * - hard_start_xmit fix (hint: TxSizeMax)
83 #include <linux/module.h>
84 #include <linux/sched.h>
85 #include <linux/types.h>
86 #include <linux/errno.h>
87 #include <linux/list.h>
88 #include <linux/ioport.h>
89 #include <linux/pci.h>
90 #include <linux/kernel.h>
92 #include <linux/slab.h>
94 #include <asm/system.h>
95 #include <asm/cache.h>
96 #include <asm/byteorder.h>
97 #include <asm/uaccess.h>
101 #include <linux/init.h>
102 #include <linux/string.h>
104 #include <linux/if_arp.h>
105 #include <linux/netdevice.h>
106 #include <linux/skbuff.h>
107 #include <linux/delay.h>
108 #include <linux/hdlc.h>
109 #include <linux/mutex.h>
112 static const char version
[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 Exp $ for Linux\n";
116 #ifdef CONFIG_DSCC4_PCI_RST
117 static DEFINE_MUTEX(dscc4_mutex
);
118 static u32 dscc4_pci_config_store
[16];
121 #define DRV_NAME "dscc4"
125 /* Module parameters */
127 MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>");
128 MODULE_DESCRIPTION("Siemens PEB20534 PCI Controler");
129 MODULE_LICENSE("GPL");
130 module_param(debug
, int, 0);
131 MODULE_PARM_DESC(debug
,"Enable/disable extra messages");
132 module_param(quartz
, int, 0);
133 MODULE_PARM_DESC(quartz
,"If present, on-board quartz frequency (Hz)");
147 u32 jiffies
; /* Allows sizeof(TxFD) == sizeof(RxFD) + extra hack */
148 /* FWIW, datasheet calls that "dummy" and says that card
149 * never looks at it; neither does the driver */
160 #define DUMMY_SKB_SIZE 64
162 #define TX_RING_SIZE 32
163 #define RX_RING_SIZE 32
164 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct TxFD)
165 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct RxFD)
166 #define IRQ_RING_SIZE 64 /* Keep it a multiple of 32 */
167 #define TX_TIMEOUT (HZ/10)
168 #define DSCC4_HZ_MAX 33000000
169 #define BRR_DIVIDER_MAX 64*0x00004000 /* Cf errata DS5 p.10 */
170 #define dev_per_card 4
171 #define SCC_REGISTERS_MAX 23 /* Cf errata DS5 p.4 */
173 #define SOURCE_ID(flags) (((flags) >> 28) & 0x03)
174 #define TO_SIZE(state) (((state) >> 16) & 0x1fff)
177 * Given the operating range of Linux HDLC, the 2 defines below could be
178 * made simpler. However they are a fine reminder for the limitations of
179 * the driver: it's better to stay < TxSizeMax and < RxSizeMax.
181 #define TO_STATE_TX(len) cpu_to_le32(((len) & TxSizeMax) << 16)
182 #define TO_STATE_RX(len) cpu_to_le32((RX_MAX(len) % RxSizeMax) << 16)
183 #define RX_MAX(len) ((((len) >> 5) + 1) << 5) /* Cf RLCR */
184 #define SCC_REG_START(dpriv) (SCC_START+(dpriv->dev_id)*SCC_OFFSET)
186 struct dscc4_pci_priv
{
190 struct pci_dev
*pdev
;
192 struct dscc4_dev_priv
*root
;
193 dma_addr_t iqcfg_dma
;
197 struct dscc4_dev_priv
{
198 struct sk_buff
*rx_skbuff
[RX_RING_SIZE
];
199 struct sk_buff
*tx_skbuff
[TX_RING_SIZE
];
206 volatile u32 tx_current
;
211 volatile u32 tx_dirty
;
216 dma_addr_t tx_fd_dma
;
217 dma_addr_t rx_fd_dma
;
221 u32 scc_regs
[SCC_REGISTERS_MAX
]; /* Cf errata DS5 p.4 */
223 struct timer_list timer
;
225 struct dscc4_pci_priv
*pci_priv
;
232 unsigned short encoding
;
233 unsigned short parity
;
234 struct net_device
*dev
;
235 sync_serial_settings settings
;
236 void __iomem
*base_addr
;
237 u32 __pad
__attribute__ ((aligned (4)));
240 /* GLOBAL registers definitions */
261 /* SCC registers definitions */
262 #define SCC_START 0x0100
263 #define SCC_OFFSET 0x80
275 #define GPDATA 0x0404
279 #define EncodingMask 0x00700000
280 #define CrcMask 0x00000003
282 #define IntRxScc0 0x10000000
283 #define IntTxScc0 0x01000000
285 #define TxPollCmd 0x00000400
286 #define RxActivate 0x08000000
287 #define MTFi 0x04000000
288 #define Rdr 0x00400000
289 #define Rdt 0x00200000
290 #define Idr 0x00100000
291 #define Idt 0x00080000
292 #define TxSccRes 0x01000000
293 #define RxSccRes 0x00010000
294 #define TxSizeMax 0x1fff /* Datasheet DS1 - 11.1.1.1 */
295 #define RxSizeMax 0x1ffc /* Datasheet DS1 - 11.1.2.1 */
297 #define Ccr0ClockMask 0x0000003f
298 #define Ccr1LoopMask 0x00000200
299 #define IsrMask 0x000fffff
300 #define BrrExpMask 0x00000f00
301 #define BrrMultMask 0x0000003f
302 #define EncodingMask 0x00700000
303 #define Hold cpu_to_le32(0x40000000)
304 #define SccBusy 0x10000000
305 #define PowerUp 0x80000000
306 #define Vis 0x00001000
307 #define FrameOk (FrameVfr | FrameCrc)
308 #define FrameVfr 0x80
309 #define FrameRdo 0x40
310 #define FrameCrc 0x20
311 #define FrameRab 0x10
312 #define FrameAborted cpu_to_le32(0x00000200)
313 #define FrameEnd cpu_to_le32(0x80000000)
314 #define DataComplete cpu_to_le32(0x40000000)
315 #define LengthCheck 0x00008000
316 #define SccEvt 0x02000000
317 #define NoAck 0x00000200
318 #define Action 0x00000001
319 #define HiDesc cpu_to_le32(0x20000000)
322 #define RxEvt 0xf0000000
323 #define TxEvt 0x0f000000
324 #define Alls 0x00040000
325 #define Xdu 0x00010000
326 #define Cts 0x00004000
327 #define Xmr 0x00002000
328 #define Xpr 0x00001000
329 #define Rdo 0x00000080
330 #define Rfs 0x00000040
331 #define Cd 0x00000004
332 #define Rfo 0x00000002
333 #define Flex 0x00000001
335 /* DMA core events */
336 #define Cfg 0x00200000
337 #define Hi 0x00040000
338 #define Fi 0x00020000
339 #define Err 0x00010000
340 #define Arf 0x00000002
341 #define ArAck 0x00000001
344 #define Ready 0x00000000
345 #define NeedIDR 0x00000001
346 #define NeedIDT 0x00000002
347 #define RdoSet 0x00000004
348 #define FakeReset 0x00000008
350 /* Don't mask RDO. Ever. */
352 #define EventsMask 0xfffeef7f
354 #define EventsMask 0xfffa8f7a
357 /* Functions prototypes */
358 static void dscc4_rx_irq(struct dscc4_pci_priv
*, struct dscc4_dev_priv
*);
359 static void dscc4_tx_irq(struct dscc4_pci_priv
*, struct dscc4_dev_priv
*);
360 static int dscc4_found1(struct pci_dev
*, void __iomem
*ioaddr
);
361 static int dscc4_init_one(struct pci_dev
*, const struct pci_device_id
*ent
);
362 static int dscc4_open(struct net_device
*);
363 static netdev_tx_t
dscc4_start_xmit(struct sk_buff
*,
364 struct net_device
*);
365 static int dscc4_close(struct net_device
*);
366 static int dscc4_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
367 static int dscc4_init_ring(struct net_device
*);
368 static void dscc4_release_ring(struct dscc4_dev_priv
*);
369 static void dscc4_timer(unsigned long);
370 static void dscc4_tx_timeout(struct net_device
*);
371 static irqreturn_t
dscc4_irq(int irq
, void *dev_id
);
372 static int dscc4_hdlc_attach(struct net_device
*, unsigned short, unsigned short);
373 static int dscc4_set_iface(struct dscc4_dev_priv
*, struct net_device
*);
375 static int dscc4_tx_poll(struct dscc4_dev_priv
*, struct net_device
*);
378 static inline struct dscc4_dev_priv
*dscc4_priv(struct net_device
*dev
)
380 return dev_to_hdlc(dev
)->priv
;
383 static inline struct net_device
*dscc4_to_dev(struct dscc4_dev_priv
*p
)
388 static void scc_patchl(u32 mask
, u32 value
, struct dscc4_dev_priv
*dpriv
,
389 struct net_device
*dev
, int offset
)
393 /* Cf scc_writel for concern regarding thread-safety */
394 state
= dpriv
->scc_regs
[offset
>> 2];
397 dpriv
->scc_regs
[offset
>> 2] = state
;
398 writel(state
, dpriv
->base_addr
+ SCC_REG_START(dpriv
) + offset
);
401 static void scc_writel(u32 bits
, struct dscc4_dev_priv
*dpriv
,
402 struct net_device
*dev
, int offset
)
406 * As of 2002/02/16, there are no thread racing for access.
408 dpriv
->scc_regs
[offset
>> 2] = bits
;
409 writel(bits
, dpriv
->base_addr
+ SCC_REG_START(dpriv
) + offset
);
412 static inline u32
scc_readl(struct dscc4_dev_priv
*dpriv
, int offset
)
414 return dpriv
->scc_regs
[offset
>> 2];
417 static u32
scc_readl_star(struct dscc4_dev_priv
*dpriv
, struct net_device
*dev
)
419 /* Cf errata DS5 p.4 */
420 readl(dpriv
->base_addr
+ SCC_REG_START(dpriv
) + STAR
);
421 return readl(dpriv
->base_addr
+ SCC_REG_START(dpriv
) + STAR
);
424 static inline void dscc4_do_tx(struct dscc4_dev_priv
*dpriv
,
425 struct net_device
*dev
)
427 dpriv
->ltda
= dpriv
->tx_fd_dma
+
428 ((dpriv
->tx_current
-1)%TX_RING_SIZE
)*sizeof(struct TxFD
);
429 writel(dpriv
->ltda
, dpriv
->base_addr
+ CH0LTDA
+ dpriv
->dev_id
*4);
430 /* Flush posted writes *NOW* */
431 readl(dpriv
->base_addr
+ CH0LTDA
+ dpriv
->dev_id
*4);
434 static inline void dscc4_rx_update(struct dscc4_dev_priv
*dpriv
,
435 struct net_device
*dev
)
437 dpriv
->lrda
= dpriv
->rx_fd_dma
+
438 ((dpriv
->rx_dirty
- 1)%RX_RING_SIZE
)*sizeof(struct RxFD
);
439 writel(dpriv
->lrda
, dpriv
->base_addr
+ CH0LRDA
+ dpriv
->dev_id
*4);
442 static inline unsigned int dscc4_tx_done(struct dscc4_dev_priv
*dpriv
)
444 return dpriv
->tx_current
== dpriv
->tx_dirty
;
447 static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv
*dpriv
,
448 struct net_device
*dev
)
450 return readl(dpriv
->base_addr
+ CH0FTDA
+ dpriv
->dev_id
*4) == dpriv
->ltda
;
453 static int state_check(u32 state
, struct dscc4_dev_priv
*dpriv
,
454 struct net_device
*dev
, const char *msg
)
459 if (SOURCE_ID(state
) != dpriv
->dev_id
) {
460 printk(KERN_DEBUG
"%s (%s): Source Id=%d, state=%08x\n",
461 dev
->name
, msg
, SOURCE_ID(state
), state
);
464 if (state
& 0x0df80c00) {
465 printk(KERN_DEBUG
"%s (%s): state=%08x (UFO alert)\n",
466 dev
->name
, msg
, state
);
473 static void dscc4_tx_print(struct net_device
*dev
,
474 struct dscc4_dev_priv
*dpriv
,
477 printk(KERN_DEBUG
"%s: tx_current=%02d tx_dirty=%02d (%s)\n",
478 dev
->name
, dpriv
->tx_current
, dpriv
->tx_dirty
, msg
);
481 static void dscc4_release_ring(struct dscc4_dev_priv
*dpriv
)
483 struct pci_dev
*pdev
= dpriv
->pci_priv
->pdev
;
484 struct TxFD
*tx_fd
= dpriv
->tx_fd
;
485 struct RxFD
*rx_fd
= dpriv
->rx_fd
;
486 struct sk_buff
**skbuff
;
489 pci_free_consistent(pdev
, TX_TOTAL_SIZE
, tx_fd
, dpriv
->tx_fd_dma
);
490 pci_free_consistent(pdev
, RX_TOTAL_SIZE
, rx_fd
, dpriv
->rx_fd_dma
);
492 skbuff
= dpriv
->tx_skbuff
;
493 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
495 pci_unmap_single(pdev
, le32_to_cpu(tx_fd
->data
),
496 (*skbuff
)->len
, PCI_DMA_TODEVICE
);
497 dev_kfree_skb(*skbuff
);
503 skbuff
= dpriv
->rx_skbuff
;
504 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
506 pci_unmap_single(pdev
, le32_to_cpu(rx_fd
->data
),
507 RX_MAX(HDLC_MAX_MRU
), PCI_DMA_FROMDEVICE
);
508 dev_kfree_skb(*skbuff
);
515 static inline int try_get_rx_skb(struct dscc4_dev_priv
*dpriv
,
516 struct net_device
*dev
)
518 unsigned int dirty
= dpriv
->rx_dirty
%RX_RING_SIZE
;
519 struct RxFD
*rx_fd
= dpriv
->rx_fd
+ dirty
;
520 const int len
= RX_MAX(HDLC_MAX_MRU
);
524 skb
= dev_alloc_skb(len
);
525 dpriv
->rx_skbuff
[dirty
] = skb
;
527 skb
->protocol
= hdlc_type_trans(skb
, dev
);
528 rx_fd
->data
= cpu_to_le32(pci_map_single(dpriv
->pci_priv
->pdev
,
529 skb
->data
, len
, PCI_DMA_FROMDEVICE
));
538 * IRQ/thread/whatever safe
540 static int dscc4_wait_ack_cec(struct dscc4_dev_priv
*dpriv
,
541 struct net_device
*dev
, char *msg
)
546 if (!(scc_readl_star(dpriv
, dev
) & SccBusy
)) {
547 printk(KERN_DEBUG
"%s: %s ack (%d try)\n", dev
->name
,
551 schedule_timeout_uninterruptible(10);
554 printk(KERN_ERR
"%s: %s timeout\n", dev
->name
, msg
);
556 return (i
>= 0) ? i
: -EAGAIN
;
559 static int dscc4_do_action(struct net_device
*dev
, char *msg
)
561 void __iomem
*ioaddr
= dscc4_priv(dev
)->base_addr
;
564 writel(Action
, ioaddr
+ GCMDR
);
567 u32 state
= readl(ioaddr
);
570 printk(KERN_DEBUG
"%s: %s ack\n", dev
->name
, msg
);
571 writel(ArAck
, ioaddr
);
573 } else if (state
& Arf
) {
574 printk(KERN_ERR
"%s: %s failed\n", dev
->name
, msg
);
581 printk(KERN_ERR
"%s: %s timeout\n", dev
->name
, msg
);
586 static inline int dscc4_xpr_ack(struct dscc4_dev_priv
*dpriv
)
588 int cur
= dpriv
->iqtx_current
%IRQ_RING_SIZE
;
592 if (!(dpriv
->flags
& (NeedIDR
| NeedIDT
)) ||
593 (dpriv
->iqtx
[cur
] & cpu_to_le32(Xpr
)))
596 schedule_timeout_uninterruptible(10);
599 return (i
>= 0 ) ? i
: -EAGAIN
;
604 /* TODO: (ab)use this function to refill a completely depleted RX ring. */
605 static inline void dscc4_rx_skb(struct dscc4_dev_priv
*dpriv
,
606 struct net_device
*dev
)
608 struct RxFD
*rx_fd
= dpriv
->rx_fd
+ dpriv
->rx_current
%RX_RING_SIZE
;
609 struct pci_dev
*pdev
= dpriv
->pci_priv
->pdev
;
613 skb
= dpriv
->rx_skbuff
[dpriv
->rx_current
++%RX_RING_SIZE
];
615 printk(KERN_DEBUG
"%s: skb=0 (%s)\n", dev
->name
, __func__
);
618 pkt_len
= TO_SIZE(le32_to_cpu(rx_fd
->state2
));
619 pci_unmap_single(pdev
, le32_to_cpu(rx_fd
->data
),
620 RX_MAX(HDLC_MAX_MRU
), PCI_DMA_FROMDEVICE
);
621 if ((skb
->data
[--pkt_len
] & FrameOk
) == FrameOk
) {
622 dev
->stats
.rx_packets
++;
623 dev
->stats
.rx_bytes
+= pkt_len
;
624 skb_put(skb
, pkt_len
);
625 if (netif_running(dev
))
626 skb
->protocol
= hdlc_type_trans(skb
, dev
);
629 if (skb
->data
[pkt_len
] & FrameRdo
)
630 dev
->stats
.rx_fifo_errors
++;
631 else if (!(skb
->data
[pkt_len
] & FrameCrc
))
632 dev
->stats
.rx_crc_errors
++;
633 else if ((skb
->data
[pkt_len
] & (FrameVfr
| FrameRab
)) !=
634 (FrameVfr
| FrameRab
))
635 dev
->stats
.rx_length_errors
++;
636 dev
->stats
.rx_errors
++;
637 dev_kfree_skb_irq(skb
);
640 while ((dpriv
->rx_dirty
- dpriv
->rx_current
) % RX_RING_SIZE
) {
641 if (try_get_rx_skb(dpriv
, dev
) < 0)
645 dscc4_rx_update(dpriv
, dev
);
646 rx_fd
->state2
= 0x00000000;
647 rx_fd
->end
= cpu_to_le32(0xbabeface);
650 static void dscc4_free1(struct pci_dev
*pdev
)
652 struct dscc4_pci_priv
*ppriv
;
653 struct dscc4_dev_priv
*root
;
656 ppriv
= pci_get_drvdata(pdev
);
659 for (i
= 0; i
< dev_per_card
; i
++)
660 unregister_hdlc_device(dscc4_to_dev(root
+ i
));
662 pci_set_drvdata(pdev
, NULL
);
664 for (i
= 0; i
< dev_per_card
; i
++)
665 free_netdev(root
[i
].dev
);
670 static int __devinit
dscc4_init_one(struct pci_dev
*pdev
,
671 const struct pci_device_id
*ent
)
673 struct dscc4_pci_priv
*priv
;
674 struct dscc4_dev_priv
*dpriv
;
675 void __iomem
*ioaddr
;
678 printk(KERN_DEBUG
"%s", version
);
680 rc
= pci_enable_device(pdev
);
684 rc
= pci_request_region(pdev
, 0, "registers");
686 printk(KERN_ERR
"%s: can't reserve MMIO region (regs)\n",
690 rc
= pci_request_region(pdev
, 1, "LBI interface");
692 printk(KERN_ERR
"%s: can't reserve MMIO region (lbi)\n",
694 goto err_free_mmio_region_1
;
697 ioaddr
= pci_ioremap_bar(pdev
, 0);
699 printk(KERN_ERR
"%s: cannot remap MMIO region %llx @ %llx\n",
700 DRV_NAME
, (unsigned long long)pci_resource_len(pdev
, 0),
701 (unsigned long long)pci_resource_start(pdev
, 0));
703 goto err_free_mmio_regions_2
;
705 printk(KERN_DEBUG
"Siemens DSCC4, MMIO at %#llx (regs), %#llx (lbi), IRQ %d\n",
706 (unsigned long long)pci_resource_start(pdev
, 0),
707 (unsigned long long)pci_resource_start(pdev
, 1), pdev
->irq
);
709 /* Cf errata DS5 p.2 */
710 pci_write_config_byte(pdev
, PCI_LATENCY_TIMER
, 0xf8);
711 pci_set_master(pdev
);
713 rc
= dscc4_found1(pdev
, ioaddr
);
717 priv
= pci_get_drvdata(pdev
);
719 rc
= request_irq(pdev
->irq
, dscc4_irq
, IRQF_SHARED
, DRV_NAME
, priv
->root
);
721 printk(KERN_WARNING
"%s: IRQ %d busy\n", DRV_NAME
, pdev
->irq
);
725 /* power up/little endian/dma core controlled via lrda/ltda */
726 writel(0x00000001, ioaddr
+ GMODE
);
727 /* Shared interrupt queue */
731 bits
= (IRQ_RING_SIZE
>> 5) - 1;
735 writel(bits
, ioaddr
+ IQLENR0
);
737 /* Global interrupt queue */
738 writel((u32
)(((IRQ_RING_SIZE
>> 5) - 1) << 20), ioaddr
+ IQLENR1
);
739 priv
->iqcfg
= (__le32
*) pci_alloc_consistent(pdev
,
740 IRQ_RING_SIZE
*sizeof(__le32
), &priv
->iqcfg_dma
);
743 writel(priv
->iqcfg_dma
, ioaddr
+ IQCFG
);
748 * SCC 0-3 private rx/tx irq structures
749 * IQRX/TXi needs to be set soon. Learned it the hard way...
751 for (i
= 0; i
< dev_per_card
; i
++) {
752 dpriv
= priv
->root
+ i
;
753 dpriv
->iqtx
= (__le32
*) pci_alloc_consistent(pdev
,
754 IRQ_RING_SIZE
*sizeof(u32
), &dpriv
->iqtx_dma
);
756 goto err_free_iqtx_6
;
757 writel(dpriv
->iqtx_dma
, ioaddr
+ IQTX0
+ i
*4);
759 for (i
= 0; i
< dev_per_card
; i
++) {
760 dpriv
= priv
->root
+ i
;
761 dpriv
->iqrx
= (__le32
*) pci_alloc_consistent(pdev
,
762 IRQ_RING_SIZE
*sizeof(u32
), &dpriv
->iqrx_dma
);
764 goto err_free_iqrx_7
;
765 writel(dpriv
->iqrx_dma
, ioaddr
+ IQRX0
+ i
*4);
768 /* Cf application hint. Beware of hard-lock condition on threshold. */
769 writel(0x42104000, ioaddr
+ FIFOCR1
);
770 //writel(0x9ce69800, ioaddr + FIFOCR2);
771 writel(0xdef6d800, ioaddr
+ FIFOCR2
);
772 //writel(0x11111111, ioaddr + FIFOCR4);
773 writel(0x18181818, ioaddr
+ FIFOCR4
);
774 writel(0x0000000e, ioaddr
+ FIFOCR3
);
776 writel(0xff200001, ioaddr
+ GCMDR
);
784 dpriv
= priv
->root
+ i
;
785 pci_free_consistent(pdev
, IRQ_RING_SIZE
*sizeof(u32
),
786 dpriv
->iqrx
, dpriv
->iqrx_dma
);
791 dpriv
= priv
->root
+ i
;
792 pci_free_consistent(pdev
, IRQ_RING_SIZE
*sizeof(u32
),
793 dpriv
->iqtx
, dpriv
->iqtx_dma
);
795 pci_free_consistent(pdev
, IRQ_RING_SIZE
*sizeof(u32
), priv
->iqcfg
,
798 free_irq(pdev
->irq
, priv
->root
);
803 err_free_mmio_regions_2
:
804 pci_release_region(pdev
, 1);
805 err_free_mmio_region_1
:
806 pci_release_region(pdev
, 0);
808 pci_disable_device(pdev
);
813 * Let's hope the default values are decent enough to protect my
814 * feet from the user's gun - Ueimor
816 static void dscc4_init_registers(struct dscc4_dev_priv
*dpriv
,
817 struct net_device
*dev
)
819 /* No interrupts, SCC core disabled. Let's relax */
820 scc_writel(0x00000000, dpriv
, dev
, CCR0
);
822 scc_writel(LengthCheck
| (HDLC_MAX_MRU
>> 5), dpriv
, dev
, RLCR
);
824 scc_writel(0x02408000, dpriv
, dev
, CCR1
);
826 /* crc not forwarded - Cf errata DS5 p.11 */
827 scc_writel(0x00050008 & ~RxActivate
, dpriv
, dev
, CCR2
);
829 //scc_writel(0x00250008 & ~RxActivate, dpriv, dev, CCR2);
832 static inline int dscc4_set_quartz(struct dscc4_dev_priv
*dpriv
, int hz
)
836 if ((hz
< 0) || (hz
> DSCC4_HZ_MAX
))
839 dpriv
->pci_priv
->xtal_hz
= hz
;
844 static const struct net_device_ops dscc4_ops
= {
845 .ndo_open
= dscc4_open
,
846 .ndo_stop
= dscc4_close
,
847 .ndo_change_mtu
= hdlc_change_mtu
,
848 .ndo_start_xmit
= hdlc_start_xmit
,
849 .ndo_do_ioctl
= dscc4_ioctl
,
850 .ndo_tx_timeout
= dscc4_tx_timeout
,
853 static int dscc4_found1(struct pci_dev
*pdev
, void __iomem
*ioaddr
)
855 struct dscc4_pci_priv
*ppriv
;
856 struct dscc4_dev_priv
*root
;
857 int i
, ret
= -ENOMEM
;
859 root
= kcalloc(dev_per_card
, sizeof(*root
), GFP_KERNEL
);
861 printk(KERN_ERR
"%s: can't allocate data\n", DRV_NAME
);
865 for (i
= 0; i
< dev_per_card
; i
++) {
866 root
[i
].dev
= alloc_hdlcdev(root
+ i
);
871 ppriv
= kzalloc(sizeof(*ppriv
), GFP_KERNEL
);
873 printk(KERN_ERR
"%s: can't allocate private data\n", DRV_NAME
);
878 spin_lock_init(&ppriv
->lock
);
880 for (i
= 0; i
< dev_per_card
; i
++) {
881 struct dscc4_dev_priv
*dpriv
= root
+ i
;
882 struct net_device
*d
= dscc4_to_dev(dpriv
);
883 hdlc_device
*hdlc
= dev_to_hdlc(d
);
885 d
->base_addr
= (unsigned long)ioaddr
;
887 d
->netdev_ops
= &dscc4_ops
;
888 d
->watchdog_timeo
= TX_TIMEOUT
;
889 SET_NETDEV_DEV(d
, &pdev
->dev
);
892 dpriv
->pci_priv
= ppriv
;
893 dpriv
->base_addr
= ioaddr
;
894 spin_lock_init(&dpriv
->lock
);
896 hdlc
->xmit
= dscc4_start_xmit
;
897 hdlc
->attach
= dscc4_hdlc_attach
;
899 dscc4_init_registers(dpriv
, d
);
900 dpriv
->parity
= PARITY_CRC16_PR0_CCITT
;
901 dpriv
->encoding
= ENCODING_NRZ
;
903 ret
= dscc4_init_ring(d
);
907 ret
= register_hdlc_device(d
);
909 printk(KERN_ERR
"%s: unable to register\n", DRV_NAME
);
910 dscc4_release_ring(dpriv
);
915 ret
= dscc4_set_quartz(root
, quartz
);
919 pci_set_drvdata(pdev
, ppriv
);
924 dscc4_release_ring(root
+ i
);
925 unregister_hdlc_device(dscc4_to_dev(root
+ i
));
931 free_netdev(root
[i
].dev
);
937 static void dscc4_timer(unsigned long data
)
939 struct net_device
*dev
= (struct net_device
*)data
;
940 struct dscc4_dev_priv
*dpriv
= dscc4_priv(dev
);
941 // struct dscc4_pci_priv *ppriv;
945 dpriv
->timer
.expires
= jiffies
+ TX_TIMEOUT
;
946 add_timer(&dpriv
->timer
);
949 static void dscc4_tx_timeout(struct net_device
*dev
)
953 static int dscc4_loopback_check(struct dscc4_dev_priv
*dpriv
)
955 sync_serial_settings
*settings
= &dpriv
->settings
;
957 if (settings
->loopback
&& (settings
->clock_type
!= CLOCK_INT
)) {
958 struct net_device
*dev
= dscc4_to_dev(dpriv
);
960 printk(KERN_INFO
"%s: loopback requires clock\n", dev
->name
);
966 #ifdef CONFIG_DSCC4_PCI_RST
968 * Some DSCC4-based cards wires the GPIO port and the PCI #RST pin together
969 * so as to provide a safe way to reset the asic while not the whole machine
972 * This code doesn't need to be efficient. Keep It Simple
974 static void dscc4_pci_reset(struct pci_dev
*pdev
, void __iomem
*ioaddr
)
978 mutex_lock(&dscc4_mutex
);
979 for (i
= 0; i
< 16; i
++)
980 pci_read_config_dword(pdev
, i
<< 2, dscc4_pci_config_store
+ i
);
982 /* Maximal LBI clock divider (who cares ?) and whole GPIO range. */
983 writel(0x001c0000, ioaddr
+ GMODE
);
984 /* Configure GPIO port as output */
985 writel(0x0000ffff, ioaddr
+ GPDIR
);
986 /* Disable interruption */
987 writel(0x0000ffff, ioaddr
+ GPIM
);
989 writel(0x0000ffff, ioaddr
+ GPDATA
);
990 writel(0x00000000, ioaddr
+ GPDATA
);
992 /* Flush posted writes */
993 readl(ioaddr
+ GSTAR
);
995 schedule_timeout_uninterruptible(10);
997 for (i
= 0; i
< 16; i
++)
998 pci_write_config_dword(pdev
, i
<< 2, dscc4_pci_config_store
[i
]);
999 mutex_unlock(&dscc4_mutex
);
1002 #define dscc4_pci_reset(pdev,ioaddr) do {} while (0)
1003 #endif /* CONFIG_DSCC4_PCI_RST */
1005 static int dscc4_open(struct net_device
*dev
)
1007 struct dscc4_dev_priv
*dpriv
= dscc4_priv(dev
);
1008 struct dscc4_pci_priv
*ppriv
;
1011 if ((dscc4_loopback_check(dpriv
) < 0))
1014 if ((ret
= hdlc_open(dev
)))
1017 ppriv
= dpriv
->pci_priv
;
1020 * Due to various bugs, there is no way to reliably reset a
1021 * specific port (manufacturer's dependant special PCI #RST wiring
1022 * apart: it affects all ports). Thus the device goes in the best
1023 * silent mode possible at dscc4_close() time and simply claims to
1024 * be up if it's opened again. It still isn't possible to change
1025 * the HDLC configuration without rebooting but at least the ports
1026 * can be up/down ifconfig'ed without killing the host.
1028 if (dpriv
->flags
& FakeReset
) {
1029 dpriv
->flags
&= ~FakeReset
;
1030 scc_patchl(0, PowerUp
, dpriv
, dev
, CCR0
);
1031 scc_patchl(0, 0x00050000, dpriv
, dev
, CCR2
);
1032 scc_writel(EventsMask
, dpriv
, dev
, IMR
);
1033 printk(KERN_INFO
"%s: up again.\n", dev
->name
);
1037 /* IDT+IDR during XPR */
1038 dpriv
->flags
= NeedIDR
| NeedIDT
;
1040 scc_patchl(0, PowerUp
| Vis
, dpriv
, dev
, CCR0
);
1043 * The following is a bit paranoid...
1045 * NB: the datasheet "...CEC will stay active if the SCC is in
1046 * power-down mode or..." and CCR2.RAC = 1 are two different
1049 if (scc_readl_star(dpriv
, dev
) & SccBusy
) {
1050 printk(KERN_ERR
"%s busy. Try later\n", dev
->name
);
1054 printk(KERN_INFO
"%s: available. Good\n", dev
->name
);
1056 scc_writel(EventsMask
, dpriv
, dev
, IMR
);
1058 /* Posted write is flushed in the wait_ack loop */
1059 scc_writel(TxSccRes
| RxSccRes
, dpriv
, dev
, CMDR
);
1061 if ((ret
= dscc4_wait_ack_cec(dpriv
, dev
, "Cec")) < 0)
1062 goto err_disable_scc_events
;
1065 * I would expect XPR near CE completion (before ? after ?).
1066 * At worst, this code won't see a late XPR and people
1067 * will have to re-issue an ifconfig (this is harmless).
1068 * WARNING, a really missing XPR usually means a hardware
1069 * reset is needed. Suggestions anyone ?
1071 if ((ret
= dscc4_xpr_ack(dpriv
)) < 0) {
1072 printk(KERN_ERR
"%s: %s timeout\n", DRV_NAME
, "XPR");
1073 goto err_disable_scc_events
;
1077 dscc4_tx_print(dev
, dpriv
, "Open");
1080 netif_start_queue(dev
);
1082 init_timer(&dpriv
->timer
);
1083 dpriv
->timer
.expires
= jiffies
+ 10*HZ
;
1084 dpriv
->timer
.data
= (unsigned long)dev
;
1085 dpriv
->timer
.function
= dscc4_timer
;
1086 add_timer(&dpriv
->timer
);
1087 netif_carrier_on(dev
);
1091 err_disable_scc_events
:
1092 scc_writel(0xffffffff, dpriv
, dev
, IMR
);
1093 scc_patchl(PowerUp
| Vis
, 0, dpriv
, dev
, CCR0
);
1100 #ifdef DSCC4_POLLING
1101 static int dscc4_tx_poll(struct dscc4_dev_priv
*dpriv
, struct net_device
*dev
)
1104 #endif /* DSCC4_POLLING */
1106 static netdev_tx_t
dscc4_start_xmit(struct sk_buff
*skb
,
1107 struct net_device
*dev
)
1109 struct dscc4_dev_priv
*dpriv
= dscc4_priv(dev
);
1110 struct dscc4_pci_priv
*ppriv
= dpriv
->pci_priv
;
1114 next
= dpriv
->tx_current
%TX_RING_SIZE
;
1115 dpriv
->tx_skbuff
[next
] = skb
;
1116 tx_fd
= dpriv
->tx_fd
+ next
;
1117 tx_fd
->state
= FrameEnd
| TO_STATE_TX(skb
->len
);
1118 tx_fd
->data
= cpu_to_le32(pci_map_single(ppriv
->pdev
, skb
->data
, skb
->len
,
1120 tx_fd
->complete
= 0x00000000;
1121 tx_fd
->jiffies
= jiffies
;
1124 #ifdef DSCC4_POLLING
1125 spin_lock(&dpriv
->lock
);
1126 while (dscc4_tx_poll(dpriv
, dev
));
1127 spin_unlock(&dpriv
->lock
);
1131 dscc4_tx_print(dev
, dpriv
, "Xmit");
1132 /* To be cleaned(unsigned int)/optimized. Later, ok ? */
1133 if (!((++dpriv
->tx_current
- dpriv
->tx_dirty
)%TX_RING_SIZE
))
1134 netif_stop_queue(dev
);
1136 if (dscc4_tx_quiescent(dpriv
, dev
))
1137 dscc4_do_tx(dpriv
, dev
);
1139 return NETDEV_TX_OK
;
1142 static int dscc4_close(struct net_device
*dev
)
1144 struct dscc4_dev_priv
*dpriv
= dscc4_priv(dev
);
1146 del_timer_sync(&dpriv
->timer
);
1147 netif_stop_queue(dev
);
1149 scc_patchl(PowerUp
| Vis
, 0, dpriv
, dev
, CCR0
);
1150 scc_patchl(0x00050000, 0, dpriv
, dev
, CCR2
);
1151 scc_writel(0xffffffff, dpriv
, dev
, IMR
);
1153 dpriv
->flags
|= FakeReset
;
1160 static inline int dscc4_check_clock_ability(int port
)
1164 #ifdef CONFIG_DSCC4_PCISYNC
1171 static int dscc4_set_clock(struct net_device
*dev
, u32
*bps
, u32
*state
)
1173 struct dscc4_dev_priv
*dpriv
= dscc4_priv(dev
);
1177 *state
&= ~Ccr0ClockMask
;
1178 if (*bps
) { /* Clock generated - required for DCE */
1179 u32 n
= 0, m
= 0, divider
;
1182 xtal
= dpriv
->pci_priv
->xtal_hz
;
1185 if (dscc4_check_clock_ability(dpriv
->dev_id
) < 0)
1187 divider
= xtal
/ *bps
;
1188 if (divider
> BRR_DIVIDER_MAX
) {
1190 *state
|= 0x00000036; /* Clock mode 6b (BRG/16) */
1192 *state
|= 0x00000037; /* Clock mode 7b (BRG) */
1193 if (divider
>> 22) {
1196 } else if (divider
) {
1197 /* Extraction of the 6 highest weighted bits */
1199 while (0xffffffc0 & divider
) {
1207 if (!(*state
& 0x00000001)) /* ?b mode mask => clock mode 6b */
1209 *bps
= xtal
/ divider
;
1212 * External clock - DTE
1213 * "state" already reflects Clock mode 0a (CCR0 = 0xzzzzzz00).
1214 * Nothing more to be done
1218 scc_writel(brr
, dpriv
, dev
, BRR
);
1224 static int dscc4_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1226 sync_serial_settings __user
*line
= ifr
->ifr_settings
.ifs_ifsu
.sync
;
1227 struct dscc4_dev_priv
*dpriv
= dscc4_priv(dev
);
1228 const size_t size
= sizeof(dpriv
->settings
);
1231 if (dev
->flags
& IFF_UP
)
1234 if (cmd
!= SIOCWANDEV
)
1237 switch(ifr
->ifr_settings
.type
) {
1239 ifr
->ifr_settings
.type
= IF_IFACE_SYNC_SERIAL
;
1240 if (ifr
->ifr_settings
.size
< size
) {
1241 ifr
->ifr_settings
.size
= size
; /* data size wanted */
1244 if (copy_to_user(line
, &dpriv
->settings
, size
))
1248 case IF_IFACE_SYNC_SERIAL
:
1249 if (!capable(CAP_NET_ADMIN
))
1252 if (dpriv
->flags
& FakeReset
) {
1253 printk(KERN_INFO
"%s: please reset the device"
1254 " before this command\n", dev
->name
);
1257 if (copy_from_user(&dpriv
->settings
, line
, size
))
1259 ret
= dscc4_set_iface(dpriv
, dev
);
1263 ret
= hdlc_ioctl(dev
, ifr
, cmd
);
1270 static int dscc4_match(struct thingie
*p
, int value
)
1274 for (i
= 0; p
[i
].define
!= -1; i
++) {
1275 if (value
== p
[i
].define
)
1278 if (p
[i
].define
== -1)
1284 static int dscc4_clock_setting(struct dscc4_dev_priv
*dpriv
,
1285 struct net_device
*dev
)
1287 sync_serial_settings
*settings
= &dpriv
->settings
;
1288 int ret
= -EOPNOTSUPP
;
1291 bps
= settings
->clock_rate
;
1292 state
= scc_readl(dpriv
, CCR0
);
1293 if (dscc4_set_clock(dev
, &bps
, &state
) < 0)
1295 if (bps
) { /* DCE */
1296 printk(KERN_DEBUG
"%s: generated RxClk (DCE)\n", dev
->name
);
1297 if (settings
->clock_rate
!= bps
) {
1298 printk(KERN_DEBUG
"%s: clock adjusted (%08d -> %08d)\n",
1299 dev
->name
, settings
->clock_rate
, bps
);
1300 settings
->clock_rate
= bps
;
1303 state
|= PowerUp
| Vis
;
1304 printk(KERN_DEBUG
"%s: external RxClk (DTE)\n", dev
->name
);
1306 scc_writel(state
, dpriv
, dev
, CCR0
);
1312 static int dscc4_encoding_setting(struct dscc4_dev_priv
*dpriv
,
1313 struct net_device
*dev
)
1315 struct thingie encoding
[] = {
1316 { ENCODING_NRZ
, 0x00000000 },
1317 { ENCODING_NRZI
, 0x00200000 },
1318 { ENCODING_FM_MARK
, 0x00400000 },
1319 { ENCODING_FM_SPACE
, 0x00500000 },
1320 { ENCODING_MANCHESTER
, 0x00600000 },
1325 i
= dscc4_match(encoding
, dpriv
->encoding
);
1327 scc_patchl(EncodingMask
, encoding
[i
].bits
, dpriv
, dev
, CCR0
);
1333 static int dscc4_loopback_setting(struct dscc4_dev_priv
*dpriv
,
1334 struct net_device
*dev
)
1336 sync_serial_settings
*settings
= &dpriv
->settings
;
1339 state
= scc_readl(dpriv
, CCR1
);
1340 if (settings
->loopback
) {
1341 printk(KERN_DEBUG
"%s: loopback\n", dev
->name
);
1342 state
|= 0x00000100;
1344 printk(KERN_DEBUG
"%s: normal\n", dev
->name
);
1345 state
&= ~0x00000100;
1347 scc_writel(state
, dpriv
, dev
, CCR1
);
1351 static int dscc4_crc_setting(struct dscc4_dev_priv
*dpriv
,
1352 struct net_device
*dev
)
1354 struct thingie crc
[] = {
1355 { PARITY_CRC16_PR0_CCITT
, 0x00000010 },
1356 { PARITY_CRC16_PR1_CCITT
, 0x00000000 },
1357 { PARITY_CRC32_PR0_CCITT
, 0x00000011 },
1358 { PARITY_CRC32_PR1_CCITT
, 0x00000001 }
1362 i
= dscc4_match(crc
, dpriv
->parity
);
1364 scc_patchl(CrcMask
, crc
[i
].bits
, dpriv
, dev
, CCR1
);
1370 static int dscc4_set_iface(struct dscc4_dev_priv
*dpriv
, struct net_device
*dev
)
1373 int (*action
)(struct dscc4_dev_priv
*, struct net_device
*);
1374 } *p
, do_setting
[] = {
1375 { dscc4_encoding_setting
},
1376 { dscc4_clock_setting
},
1377 { dscc4_loopback_setting
},
1378 { dscc4_crc_setting
},
1383 for (p
= do_setting
; p
->action
; p
++) {
1384 if ((ret
= p
->action(dpriv
, dev
)) < 0)
1390 static irqreturn_t
dscc4_irq(int irq
, void *token
)
1392 struct dscc4_dev_priv
*root
= token
;
1393 struct dscc4_pci_priv
*priv
;
1394 struct net_device
*dev
;
1395 void __iomem
*ioaddr
;
1397 unsigned long flags
;
1400 priv
= root
->pci_priv
;
1401 dev
= dscc4_to_dev(root
);
1403 spin_lock_irqsave(&priv
->lock
, flags
);
1405 ioaddr
= root
->base_addr
;
1407 state
= readl(ioaddr
+ GSTAR
);
1413 printk(KERN_DEBUG
"%s: GSTAR = 0x%08x\n", DRV_NAME
, state
);
1414 writel(state
, ioaddr
+ GSTAR
);
1417 printk(KERN_ERR
"%s: failure (Arf). Harass the maintener\n",
1424 printk(KERN_DEBUG
"%s: CfgIV\n", DRV_NAME
);
1425 if (priv
->iqcfg
[priv
->cfg_cur
++%IRQ_RING_SIZE
] & cpu_to_le32(Arf
))
1426 printk(KERN_ERR
"%s: %s failed\n", dev
->name
, "CFG");
1427 if (!(state
&= ~Cfg
))
1430 if (state
& RxEvt
) {
1431 i
= dev_per_card
- 1;
1433 dscc4_rx_irq(priv
, root
+ i
);
1437 if (state
& TxEvt
) {
1438 i
= dev_per_card
- 1;
1440 dscc4_tx_irq(priv
, root
+ i
);
1445 spin_unlock_irqrestore(&priv
->lock
, flags
);
1446 return IRQ_RETVAL(handled
);
1449 static void dscc4_tx_irq(struct dscc4_pci_priv
*ppriv
,
1450 struct dscc4_dev_priv
*dpriv
)
1452 struct net_device
*dev
= dscc4_to_dev(dpriv
);
1457 cur
= dpriv
->iqtx_current
%IRQ_RING_SIZE
;
1458 state
= le32_to_cpu(dpriv
->iqtx
[cur
]);
1461 printk(KERN_DEBUG
"%s: Tx ISR = 0x%08x\n", dev
->name
,
1463 if ((debug
> 1) && (loop
> 1))
1464 printk(KERN_DEBUG
"%s: Tx irq loop=%d\n", dev
->name
, loop
);
1465 if (loop
&& netif_queue_stopped(dev
))
1466 if ((dpriv
->tx_current
- dpriv
->tx_dirty
)%TX_RING_SIZE
)
1467 netif_wake_queue(dev
);
1469 if (netif_running(dev
) && dscc4_tx_quiescent(dpriv
, dev
) &&
1470 !dscc4_tx_done(dpriv
))
1471 dscc4_do_tx(dpriv
, dev
);
1475 dpriv
->iqtx
[cur
] = 0;
1476 dpriv
->iqtx_current
++;
1478 if (state_check(state
, dpriv
, dev
, "Tx") < 0)
1481 if (state
& SccEvt
) {
1483 struct sk_buff
*skb
;
1487 dscc4_tx_print(dev
, dpriv
, "Alls");
1489 * DataComplete can't be trusted for Tx completion.
1492 cur
= dpriv
->tx_dirty
%TX_RING_SIZE
;
1493 tx_fd
= dpriv
->tx_fd
+ cur
;
1494 skb
= dpriv
->tx_skbuff
[cur
];
1496 pci_unmap_single(ppriv
->pdev
, le32_to_cpu(tx_fd
->data
),
1497 skb
->len
, PCI_DMA_TODEVICE
);
1498 if (tx_fd
->state
& FrameEnd
) {
1499 dev
->stats
.tx_packets
++;
1500 dev
->stats
.tx_bytes
+= skb
->len
;
1502 dev_kfree_skb_irq(skb
);
1503 dpriv
->tx_skbuff
[cur
] = NULL
;
1507 printk(KERN_ERR
"%s Tx: NULL skb %d\n",
1511 * If the driver ends sending crap on the wire, it
1512 * will be way easier to diagnose than the (not so)
1513 * random freeze induced by null sized tx frames.
1515 tx_fd
->data
= tx_fd
->next
;
1516 tx_fd
->state
= FrameEnd
| TO_STATE_TX(2*DUMMY_SKB_SIZE
);
1517 tx_fd
->complete
= 0x00000000;
1520 if (!(state
&= ~Alls
))
1524 * Transmit Data Underrun
1527 printk(KERN_ERR
"%s: XDU. Ask maintainer\n", DRV_NAME
);
1528 dpriv
->flags
= NeedIDT
;
1531 dpriv
->base_addr
+ 0x0c*dpriv
->dev_id
+ CH0CFG
);
1532 writel(Action
, dpriv
->base_addr
+ GCMDR
);
1536 printk(KERN_INFO
"%s: CTS transition\n", dev
->name
);
1537 if (!(state
&= ~Cts
)) /* DEBUG */
1541 printk(KERN_ERR
"%s: Xmr. Ask maintainer\n", DRV_NAME
);
1542 if (!(state
&= ~Xmr
)) /* DEBUG */
1546 void __iomem
*scc_addr
;
1551 * - the busy condition happens (sometimes);
1552 * - it doesn't seem to make the handler unreliable.
1554 for (i
= 1; i
; i
<<= 1) {
1555 if (!(scc_readl_star(dpriv
, dev
) & SccBusy
))
1559 printk(KERN_INFO
"%s busy in irq\n", dev
->name
);
1561 scc_addr
= dpriv
->base_addr
+ 0x0c*dpriv
->dev_id
;
1562 /* Keep this order: IDT before IDR */
1563 if (dpriv
->flags
& NeedIDT
) {
1565 dscc4_tx_print(dev
, dpriv
, "Xpr");
1566 ring
= dpriv
->tx_fd_dma
+
1567 (dpriv
->tx_dirty
%TX_RING_SIZE
)*
1568 sizeof(struct TxFD
);
1569 writel(ring
, scc_addr
+ CH0BTDA
);
1570 dscc4_do_tx(dpriv
, dev
);
1571 writel(MTFi
| Idt
, scc_addr
+ CH0CFG
);
1572 if (dscc4_do_action(dev
, "IDT") < 0)
1574 dpriv
->flags
&= ~NeedIDT
;
1576 if (dpriv
->flags
& NeedIDR
) {
1577 ring
= dpriv
->rx_fd_dma
+
1578 (dpriv
->rx_current
%RX_RING_SIZE
)*
1579 sizeof(struct RxFD
);
1580 writel(ring
, scc_addr
+ CH0BRDA
);
1581 dscc4_rx_update(dpriv
, dev
);
1582 writel(MTFi
| Idr
, scc_addr
+ CH0CFG
);
1583 if (dscc4_do_action(dev
, "IDR") < 0)
1585 dpriv
->flags
&= ~NeedIDR
;
1587 /* Activate receiver and misc */
1588 scc_writel(0x08050008, dpriv
, dev
, CCR2
);
1591 if (!(state
&= ~Xpr
))
1596 printk(KERN_INFO
"%s: CD transition\n", dev
->name
);
1597 if (!(state
&= ~Cd
)) /* DEBUG */
1600 } else { /* ! SccEvt */
1602 #ifdef DSCC4_POLLING
1603 while (!dscc4_tx_poll(dpriv
, dev
));
1605 printk(KERN_INFO
"%s: Tx Hi\n", dev
->name
);
1609 printk(KERN_INFO
"%s: Tx ERR\n", dev
->name
);
1610 dev
->stats
.tx_errors
++;
1617 static void dscc4_rx_irq(struct dscc4_pci_priv
*priv
,
1618 struct dscc4_dev_priv
*dpriv
)
1620 struct net_device
*dev
= dscc4_to_dev(dpriv
);
1625 cur
= dpriv
->iqrx_current
%IRQ_RING_SIZE
;
1626 state
= le32_to_cpu(dpriv
->iqrx
[cur
]);
1629 dpriv
->iqrx
[cur
] = 0;
1630 dpriv
->iqrx_current
++;
1632 if (state_check(state
, dpriv
, dev
, "Rx") < 0)
1635 if (!(state
& SccEvt
)){
1639 printk(KERN_DEBUG
"%s: Rx ISR = 0x%08x\n", dev
->name
,
1641 state
&= 0x00ffffff;
1642 if (state
& Err
) { /* Hold or reset */
1643 printk(KERN_DEBUG
"%s: Rx ERR\n", dev
->name
);
1644 cur
= dpriv
->rx_current
%RX_RING_SIZE
;
1645 rx_fd
= dpriv
->rx_fd
+ cur
;
1647 * Presume we're not facing a DMAC receiver reset.
1648 * As We use the rx size-filtering feature of the
1649 * DSCC4, the beginning of a new frame is waiting in
1650 * the rx fifo. I bet a Receive Data Overflow will
1651 * happen most of time but let's try and avoid it.
1652 * Btw (as for RDO) if one experiences ERR whereas
1653 * the system looks rather idle, there may be a
1654 * problem with latency. In this case, increasing
1655 * RX_RING_SIZE may help.
1657 //while (dpriv->rx_needs_refill) {
1658 while (!(rx_fd
->state1
& Hold
)) {
1661 if (!(cur
= cur
%RX_RING_SIZE
))
1662 rx_fd
= dpriv
->rx_fd
;
1664 //dpriv->rx_needs_refill--;
1665 try_get_rx_skb(dpriv
, dev
);
1668 rx_fd
->state1
&= ~Hold
;
1669 rx_fd
->state2
= 0x00000000;
1670 rx_fd
->end
= cpu_to_le32(0xbabeface);
1675 dscc4_rx_skb(dpriv
, dev
);
1678 if (state
& Hi
) { /* HI bit */
1679 printk(KERN_INFO
"%s: Rx Hi\n", dev
->name
);
1683 } else { /* SccEvt */
1687 const char *irq_name
;
1689 { 0x00008000, "TIN"},
1690 { 0x00000020, "RSC"},
1691 { 0x00000010, "PCE"},
1692 { 0x00000008, "PLLA"},
1696 for (evt
= evts
; evt
->irq_name
; evt
++) {
1697 if (state
& evt
->mask
) {
1698 printk(KERN_DEBUG
"%s: %s\n",
1699 dev
->name
, evt
->irq_name
);
1700 if (!(state
&= ~evt
->mask
))
1705 if (!(state
&= ~0x0000c03c))
1709 printk(KERN_INFO
"%s: CTS transition\n", dev
->name
);
1710 if (!(state
&= ~Cts
)) /* DEBUG */
1715 void __iomem
*scc_addr
;
1719 // dscc4_rx_dump(dpriv);
1720 scc_addr
= dpriv
->base_addr
+ 0x0c*dpriv
->dev_id
;
1722 scc_patchl(RxActivate
, 0, dpriv
, dev
, CCR2
);
1724 * This has no effect. Why ?
1725 * ORed with TxSccRes, one sees the CFG ack (for
1726 * the TX part only).
1728 scc_writel(RxSccRes
, dpriv
, dev
, CMDR
);
1729 dpriv
->flags
|= RdoSet
;
1732 * Let's try and save something in the received data.
1733 * rx_current must be incremented at least once to
1734 * avoid HOLD in the BRDA-to-be-pointed desc.
1737 cur
= dpriv
->rx_current
++%RX_RING_SIZE
;
1738 rx_fd
= dpriv
->rx_fd
+ cur
;
1739 if (!(rx_fd
->state2
& DataComplete
))
1741 if (rx_fd
->state2
& FrameAborted
) {
1742 dev
->stats
.rx_over_errors
++;
1743 rx_fd
->state1
|= Hold
;
1744 rx_fd
->state2
= 0x00000000;
1745 rx_fd
->end
= cpu_to_le32(0xbabeface);
1747 dscc4_rx_skb(dpriv
, dev
);
1751 if (dpriv
->flags
& RdoSet
)
1753 "%s: no RDO in Rx data\n", DRV_NAME
);
1755 #ifdef DSCC4_RDO_EXPERIMENTAL_RECOVERY
1756 #warning "FIXME: CH0BRDA"
1757 writel(dpriv
->rx_fd_dma
+
1758 (dpriv
->rx_current
%RX_RING_SIZE
)*
1759 sizeof(struct RxFD
), scc_addr
+ CH0BRDA
);
1760 writel(MTFi
|Rdr
|Idr
, scc_addr
+ CH0CFG
);
1761 if (dscc4_do_action(dev
, "RDR") < 0) {
1762 printk(KERN_ERR
"%s: RDO recovery failed(%s)\n",
1766 writel(MTFi
|Idr
, scc_addr
+ CH0CFG
);
1767 if (dscc4_do_action(dev
, "IDR") < 0) {
1768 printk(KERN_ERR
"%s: RDO recovery failed(%s)\n",
1774 scc_patchl(0, RxActivate
, dpriv
, dev
, CCR2
);
1778 printk(KERN_INFO
"%s: CD transition\n", dev
->name
);
1779 if (!(state
&= ~Cd
)) /* DEBUG */
1783 printk(KERN_DEBUG
"%s: Flex. Ttttt...\n", DRV_NAME
);
1784 if (!(state
&= ~Flex
))
1791 * I had expected the following to work for the first descriptor
1792 * (tx_fd->state = 0xc0000000)
1793 * - Hold=1 (don't try and branch to the next descripto);
1794 * - No=0 (I want an empty data section, i.e. size=0);
1795 * - Fe=1 (required by No=0 or we got an Err irq and must reset).
1796 * It failed and locked solid. Thus the introduction of a dummy skb.
1797 * Problem is acknowledged in errata sheet DS5. Joy :o/
1799 static struct sk_buff
*dscc4_init_dummy_skb(struct dscc4_dev_priv
*dpriv
)
1801 struct sk_buff
*skb
;
1803 skb
= dev_alloc_skb(DUMMY_SKB_SIZE
);
1805 int last
= dpriv
->tx_dirty
%TX_RING_SIZE
;
1806 struct TxFD
*tx_fd
= dpriv
->tx_fd
+ last
;
1808 skb
->len
= DUMMY_SKB_SIZE
;
1809 skb_copy_to_linear_data(skb
, version
,
1810 strlen(version
) % DUMMY_SKB_SIZE
);
1811 tx_fd
->state
= FrameEnd
| TO_STATE_TX(DUMMY_SKB_SIZE
);
1812 tx_fd
->data
= cpu_to_le32(pci_map_single(dpriv
->pci_priv
->pdev
,
1813 skb
->data
, DUMMY_SKB_SIZE
,
1815 dpriv
->tx_skbuff
[last
] = skb
;
1820 static int dscc4_init_ring(struct net_device
*dev
)
1822 struct dscc4_dev_priv
*dpriv
= dscc4_priv(dev
);
1823 struct pci_dev
*pdev
= dpriv
->pci_priv
->pdev
;
1829 ring
= pci_alloc_consistent(pdev
, RX_TOTAL_SIZE
, &dpriv
->rx_fd_dma
);
1832 dpriv
->rx_fd
= rx_fd
= (struct RxFD
*) ring
;
1834 ring
= pci_alloc_consistent(pdev
, TX_TOTAL_SIZE
, &dpriv
->tx_fd_dma
);
1836 goto err_free_dma_rx
;
1837 dpriv
->tx_fd
= tx_fd
= (struct TxFD
*) ring
;
1839 memset(dpriv
->tx_skbuff
, 0, sizeof(struct sk_buff
*)*TX_RING_SIZE
);
1840 dpriv
->tx_dirty
= 0xffffffff;
1841 i
= dpriv
->tx_current
= 0;
1843 tx_fd
->state
= FrameEnd
| TO_STATE_TX(2*DUMMY_SKB_SIZE
);
1844 tx_fd
->complete
= 0x00000000;
1845 tx_fd
->data
= cpu_to_le32(dpriv
->tx_fd_dma
);
1846 (tx_fd
++)->next
= cpu_to_le32(dpriv
->tx_fd_dma
+
1847 (++i
%TX_RING_SIZE
)*sizeof(*tx_fd
));
1848 } while (i
< TX_RING_SIZE
);
1850 if (!dscc4_init_dummy_skb(dpriv
))
1851 goto err_free_dma_tx
;
1853 memset(dpriv
->rx_skbuff
, 0, sizeof(struct sk_buff
*)*RX_RING_SIZE
);
1854 i
= dpriv
->rx_dirty
= dpriv
->rx_current
= 0;
1856 /* size set by the host. Multiple of 4 bytes please */
1857 rx_fd
->state1
= HiDesc
;
1858 rx_fd
->state2
= 0x00000000;
1859 rx_fd
->end
= cpu_to_le32(0xbabeface);
1860 rx_fd
->state1
|= TO_STATE_RX(HDLC_MAX_MRU
);
1861 if (try_get_rx_skb(dpriv
, dev
) >= 0)
1863 (rx_fd
++)->next
= cpu_to_le32(dpriv
->rx_fd_dma
+
1864 (++i
%RX_RING_SIZE
)*sizeof(*rx_fd
));
1865 } while (i
< RX_RING_SIZE
);
1870 pci_free_consistent(pdev
, TX_TOTAL_SIZE
, ring
, dpriv
->tx_fd_dma
);
1872 pci_free_consistent(pdev
, RX_TOTAL_SIZE
, rx_fd
, dpriv
->rx_fd_dma
);
1877 static void __devexit
dscc4_remove_one(struct pci_dev
*pdev
)
1879 struct dscc4_pci_priv
*ppriv
;
1880 struct dscc4_dev_priv
*root
;
1881 void __iomem
*ioaddr
;
1884 ppriv
= pci_get_drvdata(pdev
);
1887 ioaddr
= root
->base_addr
;
1889 dscc4_pci_reset(pdev
, ioaddr
);
1891 free_irq(pdev
->irq
, root
);
1892 pci_free_consistent(pdev
, IRQ_RING_SIZE
*sizeof(u32
), ppriv
->iqcfg
,
1894 for (i
= 0; i
< dev_per_card
; i
++) {
1895 struct dscc4_dev_priv
*dpriv
= root
+ i
;
1897 dscc4_release_ring(dpriv
);
1898 pci_free_consistent(pdev
, IRQ_RING_SIZE
*sizeof(u32
),
1899 dpriv
->iqrx
, dpriv
->iqrx_dma
);
1900 pci_free_consistent(pdev
, IRQ_RING_SIZE
*sizeof(u32
),
1901 dpriv
->iqtx
, dpriv
->iqtx_dma
);
1908 pci_release_region(pdev
, 1);
1909 pci_release_region(pdev
, 0);
1911 pci_disable_device(pdev
);
1914 static int dscc4_hdlc_attach(struct net_device
*dev
, unsigned short encoding
,
1915 unsigned short parity
)
1917 struct dscc4_dev_priv
*dpriv
= dscc4_priv(dev
);
1919 if (encoding
!= ENCODING_NRZ
&&
1920 encoding
!= ENCODING_NRZI
&&
1921 encoding
!= ENCODING_FM_MARK
&&
1922 encoding
!= ENCODING_FM_SPACE
&&
1923 encoding
!= ENCODING_MANCHESTER
)
1926 if (parity
!= PARITY_NONE
&&
1927 parity
!= PARITY_CRC16_PR0_CCITT
&&
1928 parity
!= PARITY_CRC16_PR1_CCITT
&&
1929 parity
!= PARITY_CRC32_PR0_CCITT
&&
1930 parity
!= PARITY_CRC32_PR1_CCITT
)
1933 dpriv
->encoding
= encoding
;
1934 dpriv
->parity
= parity
;
1939 static int __init
dscc4_setup(char *str
)
1941 int *args
[] = { &debug
, &quartz
, NULL
}, **p
= args
;
1943 while (*p
&& (get_option(&str
, *p
) == 2))
1948 __setup("dscc4.setup=", dscc4_setup
);
1951 static DEFINE_PCI_DEVICE_TABLE(dscc4_pci_tbl
) = {
1952 { PCI_VENDOR_ID_SIEMENS
, PCI_DEVICE_ID_SIEMENS_DSCC4
,
1953 PCI_ANY_ID
, PCI_ANY_ID
, },
1956 MODULE_DEVICE_TABLE(pci
, dscc4_pci_tbl
);
1958 static struct pci_driver dscc4_driver
= {
1960 .id_table
= dscc4_pci_tbl
,
1961 .probe
= dscc4_init_one
,
1962 .remove
= __devexit_p(dscc4_remove_one
),
1965 static int __init
dscc4_init_module(void)
1967 return pci_register_driver(&dscc4_driver
);
1970 static void __exit
dscc4_cleanup_module(void)
1972 pci_unregister_driver(&dscc4_driver
);
1975 module_init(dscc4_init_module
);
1976 module_exit(dscc4_cleanup_module
);