4 * Armin Kuster akuster@mvista.com
7 * Copyright 2002 MontaVista Softare Inc.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
15 #include <linux/config.h>
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/netdevice.h>
20 #include <linux/init.h>
21 #include <linux/dma-mapping.h>
27 #include "ibm_emac_mal.h"
29 // Locking: Should we share a lock with the client ? The client could provide
30 // a lock pointer (optionally) in the commac structure... I don't think this is
31 // really necessary though
33 /* This lock protects the commac list. On today UP implementations, it's
34 * really only used as IRQ protection in mal_{register,unregister}_commac()
36 static rwlock_t mal_list_lock
= RW_LOCK_UNLOCKED
;
38 int mal_register_commac(struct ibm_ocp_mal
*mal
, struct mal_commac
*commac
)
42 write_lock_irqsave(&mal_list_lock
, flags
);
44 /* Don't let multiple commacs claim the same channel */
45 if ((mal
->tx_chan_mask
& commac
->tx_chan_mask
) ||
46 (mal
->rx_chan_mask
& commac
->rx_chan_mask
)) {
47 write_unlock_irqrestore(&mal_list_lock
, flags
);
51 mal
->tx_chan_mask
|= commac
->tx_chan_mask
;
52 mal
->rx_chan_mask
|= commac
->rx_chan_mask
;
54 list_add(&commac
->list
, &mal
->commac
);
56 write_unlock_irqrestore(&mal_list_lock
, flags
);
61 int mal_unregister_commac(struct ibm_ocp_mal
*mal
, struct mal_commac
*commac
)
65 write_lock_irqsave(&mal_list_lock
, flags
);
67 mal
->tx_chan_mask
&= ~commac
->tx_chan_mask
;
68 mal
->rx_chan_mask
&= ~commac
->rx_chan_mask
;
70 list_del_init(&commac
->list
);
72 write_unlock_irqrestore(&mal_list_lock
, flags
);
77 int mal_set_rcbs(struct ibm_ocp_mal
*mal
, int channel
, unsigned long size
)
81 set_mal_dcrn(mal
, DCRN_MALRCBS0
, size
);
85 set_mal_dcrn(mal
, DCRN_MALRCBS1
, size
);
90 set_mal_dcrn(mal
, DCRN_MALRCBS2
, size
);
95 set_mal_dcrn(mal
, DCRN_MALRCBS3
, size
);
105 static irqreturn_t
mal_serr(int irq
, void *dev_instance
, struct pt_regs
*regs
)
107 struct ibm_ocp_mal
*mal
= dev_instance
;
108 unsigned long mal_error
;
111 * This SERR applies to one of the devices on the MAL, here we charge
112 * it against the first EMAC registered for the MAL.
115 mal_error
= get_mal_dcrn(mal
, DCRN_MALESR
);
117 printk(KERN_ERR
"%s: System Error (MALESR=%lx)\n",
118 "MAL" /* FIXME: get the name right */ , mal_error
);
120 /* FIXME: decipher error */
121 /* DIXME: distribute to commacs, if possible */
123 /* Clear the error status register */
124 set_mal_dcrn(mal
, DCRN_MALESR
, mal_error
);
129 static irqreturn_t
mal_txeob(int irq
, void *dev_instance
, struct pt_regs
*regs
)
131 struct ibm_ocp_mal
*mal
= dev_instance
;
135 isr
= get_mal_dcrn(mal
, DCRN_MALTXEOBISR
);
136 set_mal_dcrn(mal
, DCRN_MALTXEOBISR
, isr
);
138 read_lock(&mal_list_lock
);
139 list_for_each(l
, &mal
->commac
) {
140 struct mal_commac
*mc
= list_entry(l
, struct mal_commac
, list
);
142 if (isr
& mc
->tx_chan_mask
) {
143 mc
->ops
->txeob(mc
->dev
, isr
& mc
->tx_chan_mask
);
146 read_unlock(&mal_list_lock
);
151 static irqreturn_t
mal_rxeob(int irq
, void *dev_instance
, struct pt_regs
*regs
)
153 struct ibm_ocp_mal
*mal
= dev_instance
;
157 isr
= get_mal_dcrn(mal
, DCRN_MALRXEOBISR
);
158 set_mal_dcrn(mal
, DCRN_MALRXEOBISR
, isr
);
160 read_lock(&mal_list_lock
);
161 list_for_each(l
, &mal
->commac
) {
162 struct mal_commac
*mc
= list_entry(l
, struct mal_commac
, list
);
164 if (isr
& mc
->rx_chan_mask
) {
165 mc
->ops
->rxeob(mc
->dev
, isr
& mc
->rx_chan_mask
);
168 read_unlock(&mal_list_lock
);
173 static irqreturn_t
mal_txde(int irq
, void *dev_instance
, struct pt_regs
*regs
)
175 struct ibm_ocp_mal
*mal
= dev_instance
;
179 deir
= get_mal_dcrn(mal
, DCRN_MALTXDEIR
);
181 /* FIXME: print which MAL correctly */
182 printk(KERN_WARNING
"%s: Tx descriptor error (MALTXDEIR=%lx)\n",
185 read_lock(&mal_list_lock
);
186 list_for_each(l
, &mal
->commac
) {
187 struct mal_commac
*mc
= list_entry(l
, struct mal_commac
, list
);
189 if (deir
& mc
->tx_chan_mask
) {
190 mc
->ops
->txde(mc
->dev
, deir
& mc
->tx_chan_mask
);
193 read_unlock(&mal_list_lock
);
199 * This interrupt should be very rare at best. This occurs when
200 * the hardware has a problem with the receive descriptors. The manual
201 * states that it occurs when the hardware cannot the receive descriptor
202 * empty bit is not set. The recovery mechanism will be to
203 * traverse through the descriptors, handle any that are marked to be
204 * handled and reinitialize each along the way. At that point the driver
207 static irqreturn_t
mal_rxde(int irq
, void *dev_instance
, struct pt_regs
*regs
)
209 struct ibm_ocp_mal
*mal
= dev_instance
;
213 deir
= get_mal_dcrn(mal
, DCRN_MALRXDEIR
);
216 * This really is needed. This case encountered in stress testing.
221 /* FIXME: print which MAL correctly */
222 printk(KERN_WARNING
"%s: Rx descriptor error (MALRXDEIR=%lx)\n",
225 read_lock(&mal_list_lock
);
226 list_for_each(l
, &mal
->commac
) {
227 struct mal_commac
*mc
= list_entry(l
, struct mal_commac
, list
);
229 if (deir
& mc
->rx_chan_mask
) {
230 mc
->ops
->rxde(mc
->dev
, deir
& mc
->rx_chan_mask
);
233 read_unlock(&mal_list_lock
);
238 static int __init
mal_probe(struct ocp_device
*ocpdev
)
240 struct ibm_ocp_mal
*mal
= NULL
;
241 struct ocp_func_mal_data
*maldata
;
244 maldata
= (struct ocp_func_mal_data
*)ocpdev
->def
->additions
;
245 if (maldata
== NULL
) {
246 printk(KERN_ERR
"mal%d: Missing additional datas !\n",
251 mal
= kmalloc(sizeof(struct ibm_ocp_mal
), GFP_KERNEL
);
254 "mal%d: Out of memory allocating MAL structure !\n",
258 memset(mal
, 0, sizeof(*mal
));
260 switch (ocpdev
->def
->index
) {
262 mal
->dcrbase
= DCRN_MAL_BASE
;
264 #ifdef DCRN_MAL1_BASE
266 mal
->dcrbase
= DCRN_MAL1_BASE
;
273 /**************************/
275 INIT_LIST_HEAD(&mal
->commac
);
277 set_mal_dcrn(mal
, DCRN_MALRXCARR
, 0xFFFFFFFF);
278 set_mal_dcrn(mal
, DCRN_MALTXCARR
, 0xFFFFFFFF);
280 set_mal_dcrn(mal
, DCRN_MALCR
, MALCR_MMSR
); /* 384 */
281 /* FIXME: Add delay */
283 /* Set the MAL configuration register */
284 set_mal_dcrn(mal
, DCRN_MALCR
,
285 MALCR_PLBB
| MALCR_OPBBL
| MALCR_LEA
|
286 MALCR_PLBLT_DEFAULT
);
288 /* It would be nice to allocate buffers separately for each
289 * channel, but we can't because the channels share the upper
290 * 13 bits of address lines. Each channels buffer must also
291 * be 4k aligned, so we allocate 4k for each channel. This is
292 * inefficient FIXME: do better, if possible */
293 mal
->tx_virt_addr
= dma_alloc_coherent(&ocpdev
->dev
,
295 maldata
->num_tx_chans
,
296 &mal
->tx_phys_addr
, GFP_KERNEL
);
297 if (mal
->tx_virt_addr
== NULL
) {
299 "mal%d: Out of memory allocating MAL descriptors !\n",
305 /* God, oh, god, I hate DCRs */
306 set_mal_dcrn(mal
, DCRN_MALTXCTP0R
, mal
->tx_phys_addr
);
307 #ifdef DCRN_MALTXCTP1R
308 if (maldata
->num_tx_chans
> 1)
309 set_mal_dcrn(mal
, DCRN_MALTXCTP1R
,
310 mal
->tx_phys_addr
+ MAL_DT_ALIGN
);
311 #endif /* DCRN_MALTXCTP1R */
312 #ifdef DCRN_MALTXCTP2R
313 if (maldata
->num_tx_chans
> 2)
314 set_mal_dcrn(mal
, DCRN_MALTXCTP2R
,
315 mal
->tx_phys_addr
+ 2 * MAL_DT_ALIGN
);
316 #endif /* DCRN_MALTXCTP2R */
317 #ifdef DCRN_MALTXCTP3R
318 if (maldata
->num_tx_chans
> 3)
319 set_mal_dcrn(mal
, DCRN_MALTXCTP3R
,
320 mal
->tx_phys_addr
+ 3 * MAL_DT_ALIGN
);
321 #endif /* DCRN_MALTXCTP3R */
322 #ifdef DCRN_MALTXCTP4R
323 if (maldata
->num_tx_chans
> 4)
324 set_mal_dcrn(mal
, DCRN_MALTXCTP4R
,
325 mal
->tx_phys_addr
+ 4 * MAL_DT_ALIGN
);
326 #endif /* DCRN_MALTXCTP4R */
327 #ifdef DCRN_MALTXCTP5R
328 if (maldata
->num_tx_chans
> 5)
329 set_mal_dcrn(mal
, DCRN_MALTXCTP5R
,
330 mal
->tx_phys_addr
+ 5 * MAL_DT_ALIGN
);
331 #endif /* DCRN_MALTXCTP5R */
332 #ifdef DCRN_MALTXCTP6R
333 if (maldata
->num_tx_chans
> 6)
334 set_mal_dcrn(mal
, DCRN_MALTXCTP6R
,
335 mal
->tx_phys_addr
+ 6 * MAL_DT_ALIGN
);
336 #endif /* DCRN_MALTXCTP6R */
337 #ifdef DCRN_MALTXCTP7R
338 if (maldata
->num_tx_chans
> 7)
339 set_mal_dcrn(mal
, DCRN_MALTXCTP7R
,
340 mal
->tx_phys_addr
+ 7 * MAL_DT_ALIGN
);
341 #endif /* DCRN_MALTXCTP7R */
343 mal
->rx_virt_addr
= dma_alloc_coherent(&ocpdev
->dev
,
345 maldata
->num_rx_chans
,
346 &mal
->rx_phys_addr
, GFP_KERNEL
);
348 set_mal_dcrn(mal
, DCRN_MALRXCTP0R
, mal
->rx_phys_addr
);
349 #ifdef DCRN_MALRXCTP1R
350 if (maldata
->num_rx_chans
> 1)
351 set_mal_dcrn(mal
, DCRN_MALRXCTP1R
,
352 mal
->rx_phys_addr
+ MAL_DT_ALIGN
);
353 #endif /* DCRN_MALRXCTP1R */
354 #ifdef DCRN_MALRXCTP2R
355 if (maldata
->num_rx_chans
> 2)
356 set_mal_dcrn(mal
, DCRN_MALRXCTP2R
,
357 mal
->rx_phys_addr
+ 2 * MAL_DT_ALIGN
);
358 #endif /* DCRN_MALRXCTP2R */
359 #ifdef DCRN_MALRXCTP3R
360 if (maldata
->num_rx_chans
> 3)
361 set_mal_dcrn(mal
, DCRN_MALRXCTP3R
,
362 mal
->rx_phys_addr
+ 3 * MAL_DT_ALIGN
);
363 #endif /* DCRN_MALRXCTP3R */
365 err
= request_irq(maldata
->serr_irq
, mal_serr
, 0, "MAL SERR", mal
);
368 err
= request_irq(maldata
->txde_irq
, mal_txde
, 0, "MAL TX DE ", mal
);
371 err
= request_irq(maldata
->txeob_irq
, mal_txeob
, 0, "MAL TX EOB", mal
);
374 err
= request_irq(maldata
->rxde_irq
, mal_rxde
, 0, "MAL RX DE", mal
);
377 err
= request_irq(maldata
->rxeob_irq
, mal_rxeob
, 0, "MAL RX EOB", mal
);
381 set_mal_dcrn(mal
, DCRN_MALIER
,
382 MALIER_DE
| MALIER_NE
| MALIER_TE
|
383 MALIER_OPBE
| MALIER_PLBE
);
385 /* Advertise me to the rest of the world */
386 ocp_set_drvdata(ocpdev
, mal
);
388 printk(KERN_INFO
"mal%d: Initialized, %d tx channels, %d rx channels\n",
389 ocpdev
->def
->index
, maldata
->num_tx_chans
,
390 maldata
->num_rx_chans
);
395 /* FIXME: dispose requested IRQs ! */
401 static void __exit
mal_remove(struct ocp_device
*ocpdev
)
403 struct ibm_ocp_mal
*mal
= ocp_get_drvdata(ocpdev
);
404 struct ocp_func_mal_data
*maldata
= ocpdev
->def
->additions
;
408 ocp_set_drvdata(ocpdev
, NULL
);
410 /* FIXME: shut down the MAL, deal with dependency with emac */
411 free_irq(maldata
->serr_irq
, mal
);
412 free_irq(maldata
->txde_irq
, mal
);
413 free_irq(maldata
->txeob_irq
, mal
);
414 free_irq(maldata
->rxde_irq
, mal
);
415 free_irq(maldata
->rxeob_irq
, mal
);
417 if (mal
->tx_virt_addr
)
418 dma_free_coherent(&ocpdev
->dev
,
419 MAL_DT_ALIGN
* maldata
->num_tx_chans
,
420 mal
->tx_virt_addr
, mal
->tx_phys_addr
);
422 if (mal
->rx_virt_addr
)
423 dma_free_coherent(&ocpdev
->dev
,
424 MAL_DT_ALIGN
* maldata
->num_rx_chans
,
425 mal
->rx_virt_addr
, mal
->rx_phys_addr
);
430 /* Structure for a device driver */
431 static struct ocp_device_id mal_ids
[] = {
432 {.vendor
= OCP_ANY_ID
,.function
= OCP_FUNC_MAL
},
433 {.vendor
= OCP_VENDOR_INVALID
}
436 static struct ocp_driver mal_driver
= {
441 .remove
= mal_remove
,
444 static int __init
init_mals(void)
448 rc
= ocp_register_driver(&mal_driver
);
450 ocp_unregister_driver(&mal_driver
);
457 static void __exit
exit_mals(void)
459 ocp_unregister_driver(&mal_driver
);
462 module_init(init_mals
);
463 module_exit(exit_mals
);