[PATCH] libertas: fixed transmission flow control on the mesh interface
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / ieee1394 / ohci1394.c
blob5dadfd296f79c4796e414e3de301a3c4f9714569
1 /*
2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
28 * . Iso Receive
29 * . DMA mmap for iso receive
30 * . Config ROM generation
32 * Things implemented, but still in test phase:
33 * . Iso Transmit
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
36 * Things not implemented:
37 * . DMA error recovery
39 * Known bugs:
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
45 * Acknowledgments:
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
68 * Leon van Stuivenberg <leonvs@iae.nl>
69 * . Bug fixes
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
85 #include <linux/kernel.h>
86 #include <linux/list.h>
87 #include <linux/slab.h>
88 #include <linux/interrupt.h>
89 #include <linux/wait.h>
90 #include <linux/errno.h>
91 #include <linux/module.h>
92 #include <linux/moduleparam.h>
93 #include <linux/pci.h>
94 #include <linux/fs.h>
95 #include <linux/poll.h>
96 #include <asm/byteorder.h>
97 #include <asm/atomic.h>
98 #include <asm/uaccess.h>
99 #include <linux/delay.h>
100 #include <linux/spinlock.h>
102 #include <asm/pgtable.h>
103 #include <asm/page.h>
104 #include <asm/irq.h>
105 #include <linux/types.h>
106 #include <linux/vmalloc.h>
107 #include <linux/init.h>
109 #ifdef CONFIG_PPC_PMAC
110 #include <asm/machdep.h>
111 #include <asm/pmac_feature.h>
112 #include <asm/prom.h>
113 #include <asm/pci-bridge.h>
114 #endif
116 #include "csr1212.h"
117 #include "ieee1394.h"
118 #include "ieee1394_types.h"
119 #include "hosts.h"
120 #include "dma.h"
121 #include "iso.h"
122 #include "ieee1394_core.h"
123 #include "highlevel.h"
124 #include "ohci1394.h"
126 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
127 #define OHCI1394_DEBUG
128 #endif
130 #ifdef DBGMSG
131 #undef DBGMSG
132 #endif
134 #ifdef OHCI1394_DEBUG
135 #define DBGMSG(fmt, args...) \
136 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
137 #else
138 #define DBGMSG(fmt, args...) do {} while (0)
139 #endif
141 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
142 #define OHCI_DMA_ALLOC(fmt, args...) \
143 HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
144 ++global_outstanding_dmas, ## args)
145 #define OHCI_DMA_FREE(fmt, args...) \
146 HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
147 --global_outstanding_dmas, ## args)
148 static int global_outstanding_dmas = 0;
149 #else
150 #define OHCI_DMA_ALLOC(fmt, args...) do {} while (0)
151 #define OHCI_DMA_FREE(fmt, args...) do {} while (0)
152 #endif
154 /* print general (card independent) information */
155 #define PRINT_G(level, fmt, args...) \
156 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
158 /* print card specific information */
159 #define PRINT(level, fmt, args...) \
160 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
162 /* Module Parameters */
163 static int phys_dma = 1;
164 module_param(phys_dma, int, 0444);
165 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
167 static void dma_trm_tasklet(unsigned long data);
168 static void dma_trm_reset(struct dma_trm_ctx *d);
170 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
171 enum context_type type, int ctx, int num_desc,
172 int buf_size, int split_buf_size, int context_base);
173 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
174 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
176 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
177 enum context_type type, int ctx, int num_desc,
178 int context_base);
180 static void ohci1394_pci_remove(struct pci_dev *pdev);
182 #ifndef __LITTLE_ENDIAN
183 static const size_t hdr_sizes[] = {
184 3, /* TCODE_WRITEQ */
185 4, /* TCODE_WRITEB */
186 3, /* TCODE_WRITE_RESPONSE */
187 0, /* reserved */
188 3, /* TCODE_READQ */
189 4, /* TCODE_READB */
190 3, /* TCODE_READQ_RESPONSE */
191 4, /* TCODE_READB_RESPONSE */
192 1, /* TCODE_CYCLE_START */
193 4, /* TCODE_LOCK_REQUEST */
194 2, /* TCODE_ISO_DATA */
195 4, /* TCODE_LOCK_RESPONSE */
196 /* rest is reserved or link-internal */
199 static inline void header_le32_to_cpu(quadlet_t *data, unsigned char tcode)
201 size_t size;
203 if (unlikely(tcode >= ARRAY_SIZE(hdr_sizes)))
204 return;
206 size = hdr_sizes[tcode];
207 while (size--)
208 data[size] = le32_to_cpu(data[size]);
210 #else
211 #define header_le32_to_cpu(w,x) do {} while (0)
212 #endif /* !LITTLE_ENDIAN */
214 /***********************************
215 * IEEE-1394 functionality section *
216 ***********************************/
218 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
220 int i;
221 unsigned long flags;
222 quadlet_t r;
224 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
226 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
228 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
229 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
230 break;
232 mdelay(1);
235 r = reg_read(ohci, OHCI1394_PhyControl);
237 if (i >= OHCI_LOOP_COUNT)
238 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
239 r, r & 0x80000000, i);
241 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
243 return (r & 0x00ff0000) >> 16;
246 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
248 int i;
249 unsigned long flags;
250 u32 r = 0;
252 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
254 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
256 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
257 r = reg_read(ohci, OHCI1394_PhyControl);
258 if (!(r & 0x00004000))
259 break;
261 mdelay(1);
264 if (i == OHCI_LOOP_COUNT)
265 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
266 r, r & 0x00004000, i);
268 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
270 return;
273 /* Or's our value into the current value */
274 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
276 u8 old;
278 old = get_phy_reg (ohci, addr);
279 old |= data;
280 set_phy_reg (ohci, addr, old);
282 return;
285 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
286 int phyid, int isroot)
288 quadlet_t *q = ohci->selfid_buf_cpu;
289 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
290 size_t size;
291 quadlet_t q0, q1;
293 /* Check status of self-id reception */
295 if (ohci->selfid_swap)
296 q0 = le32_to_cpu(q[0]);
297 else
298 q0 = q[0];
300 if ((self_id_count & 0x80000000) ||
301 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
302 PRINT(KERN_ERR,
303 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
304 self_id_count, q0, ohci->self_id_errors);
306 /* Tip by James Goodwin <jamesg@Filanet.com>:
307 * We had an error, generate another bus reset in response. */
308 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
309 set_phy_reg_mask (ohci, 1, 0x40);
310 ohci->self_id_errors++;
311 } else {
312 PRINT(KERN_ERR,
313 "Too many errors on SelfID error reception, giving up!");
315 return;
318 /* SelfID Ok, reset error counter. */
319 ohci->self_id_errors = 0;
321 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
322 q++;
324 while (size > 0) {
325 if (ohci->selfid_swap) {
326 q0 = le32_to_cpu(q[0]);
327 q1 = le32_to_cpu(q[1]);
328 } else {
329 q0 = q[0];
330 q1 = q[1];
333 if (q0 == ~q1) {
334 DBGMSG ("SelfID packet 0x%x received", q0);
335 hpsb_selfid_received(host, cpu_to_be32(q0));
336 if (((q0 & 0x3f000000) >> 24) == phyid)
337 DBGMSG ("SelfID for this node is 0x%08x", q0);
338 } else {
339 PRINT(KERN_ERR,
340 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
342 q += 2;
343 size -= 2;
346 DBGMSG("SelfID complete");
348 return;
351 static void ohci_soft_reset(struct ti_ohci *ohci) {
352 int i;
354 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
356 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
357 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
358 break;
359 mdelay(1);
361 DBGMSG ("Soft reset finished");
365 /* Generate the dma receive prgs and start the context */
366 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
368 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
369 int i;
371 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
373 for (i=0; i<d->num_desc; i++) {
374 u32 c;
376 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
377 if (generate_irq)
378 c |= DMA_CTL_IRQ;
380 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
382 /* End of descriptor list? */
383 if (i + 1 < d->num_desc) {
384 d->prg_cpu[i]->branchAddress =
385 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
386 } else {
387 d->prg_cpu[i]->branchAddress =
388 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
391 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
392 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
395 d->buf_ind = 0;
396 d->buf_offset = 0;
398 if (d->type == DMA_CTX_ISO) {
399 /* Clear contextControl */
400 reg_write(ohci, d->ctrlClear, 0xffffffff);
402 /* Set bufferFill, isochHeader, multichannel for IR context */
403 reg_write(ohci, d->ctrlSet, 0xd0000000);
405 /* Set the context match register to match on all tags */
406 reg_write(ohci, d->ctxtMatch, 0xf0000000);
408 /* Clear the multi channel mask high and low registers */
409 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
410 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
412 /* Set up isoRecvIntMask to generate interrupts */
413 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
416 /* Tell the controller where the first AR program is */
417 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
419 /* Run context */
420 reg_write(ohci, d->ctrlSet, 0x00008000);
422 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
425 /* Initialize the dma transmit context */
426 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
428 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
430 /* Stop the context */
431 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
433 d->prg_ind = 0;
434 d->sent_ind = 0;
435 d->free_prgs = d->num_desc;
436 d->branchAddrPtr = NULL;
437 INIT_LIST_HEAD(&d->fifo_list);
438 INIT_LIST_HEAD(&d->pending_list);
440 if (d->type == DMA_CTX_ISO) {
441 /* enable interrupts */
442 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
445 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
448 /* Count the number of available iso contexts */
449 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
451 int i,ctx=0;
452 u32 tmp;
454 reg_write(ohci, reg, 0xffffffff);
455 tmp = reg_read(ohci, reg);
457 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
459 /* Count the number of contexts */
460 for (i=0; i<32; i++) {
461 if (tmp & 1) ctx++;
462 tmp >>= 1;
464 return ctx;
467 /* Global initialization */
468 static void ohci_initialize(struct ti_ohci *ohci)
470 quadlet_t buf;
471 int num_ports, i;
473 spin_lock_init(&ohci->phy_reg_lock);
475 /* Put some defaults to these undefined bus options */
476 buf = reg_read(ohci, OHCI1394_BusOptions);
477 buf |= 0x60000000; /* Enable CMC and ISC */
478 if (hpsb_disable_irm)
479 buf &= ~0x80000000;
480 else
481 buf |= 0x80000000; /* Enable IRMC */
482 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
483 buf &= ~0x18000000; /* Disable PMC and BMC */
484 reg_write(ohci, OHCI1394_BusOptions, buf);
486 /* Set the bus number */
487 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
489 /* Enable posted writes */
490 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
492 /* Clear link control register */
493 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
495 /* Enable cycle timer and cycle master and set the IRM
496 * contender bit in our self ID packets if appropriate. */
497 reg_write(ohci, OHCI1394_LinkControlSet,
498 OHCI1394_LinkControl_CycleTimerEnable |
499 OHCI1394_LinkControl_CycleMaster);
500 i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
501 if (hpsb_disable_irm)
502 i &= ~PHY_04_CONTENDER;
503 else
504 i |= PHY_04_CONTENDER;
505 set_phy_reg(ohci, 4, i);
507 /* Set up self-id dma buffer */
508 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
510 /* enable self-id */
511 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID);
513 /* Set the Config ROM mapping register */
514 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
516 /* Now get our max packet size */
517 ohci->max_packet_size =
518 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
520 /* Clear the interrupt mask */
521 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
522 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
524 /* Clear the interrupt mask */
525 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
526 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
528 /* Initialize AR dma */
529 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
530 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
532 /* Initialize AT dma */
533 initialize_dma_trm_ctx(&ohci->at_req_context);
534 initialize_dma_trm_ctx(&ohci->at_resp_context);
536 /* Initialize IR Legacy DMA channel mask */
537 ohci->ir_legacy_channels = 0;
539 /* Accept AR requests from all nodes */
540 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
542 /* Set the address range of the physical response unit.
543 * Most controllers do not implement it as a writable register though.
544 * They will keep a hardwired offset of 0x00010000 and show 0x0 as
545 * register content.
546 * To actually enable physical responses is the job of our interrupt
547 * handler which programs the physical request filter. */
548 reg_write(ohci, OHCI1394_PhyUpperBound,
549 OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED >> 16);
551 DBGMSG("physUpperBoundOffset=%08x",
552 reg_read(ohci, OHCI1394_PhyUpperBound));
554 /* Specify AT retries */
555 reg_write(ohci, OHCI1394_ATRetries,
556 OHCI1394_MAX_AT_REQ_RETRIES |
557 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
558 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
560 /* We don't want hardware swapping */
561 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
563 /* Enable interrupts */
564 reg_write(ohci, OHCI1394_IntMaskSet,
565 OHCI1394_unrecoverableError |
566 OHCI1394_masterIntEnable |
567 OHCI1394_busReset |
568 OHCI1394_selfIDComplete |
569 OHCI1394_RSPkt |
570 OHCI1394_RQPkt |
571 OHCI1394_respTxComplete |
572 OHCI1394_reqTxComplete |
573 OHCI1394_isochRx |
574 OHCI1394_isochTx |
575 OHCI1394_postedWriteErr |
576 OHCI1394_cycleTooLong |
577 OHCI1394_cycleInconsistent);
579 /* Enable link */
580 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
582 buf = reg_read(ohci, OHCI1394_Version);
583 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%d] "
584 "MMIO=[%llx-%llx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
585 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
586 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), ohci->dev->irq,
587 (unsigned long long)pci_resource_start(ohci->dev, 0),
588 (unsigned long long)pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
589 ohci->max_packet_size,
590 ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
592 /* Check all of our ports to make sure that if anything is
593 * connected, we enable that port. */
594 num_ports = get_phy_reg(ohci, 2) & 0xf;
595 for (i = 0; i < num_ports; i++) {
596 unsigned int status;
598 set_phy_reg(ohci, 7, i);
599 status = get_phy_reg(ohci, 8);
601 if (status & 0x20)
602 set_phy_reg(ohci, 8, status & ~1);
605 /* Serial EEPROM Sanity check. */
606 if ((ohci->max_packet_size < 512) ||
607 (ohci->max_packet_size > 4096)) {
608 /* Serial EEPROM contents are suspect, set a sane max packet
609 * size and print the raw contents for bug reports if verbose
610 * debug is enabled. */
611 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
612 int i;
613 #endif
615 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
616 "attempting to set max_packet_size to 512 bytes");
617 reg_write(ohci, OHCI1394_BusOptions,
618 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
619 ohci->max_packet_size = 512;
620 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
621 PRINT(KERN_DEBUG, " EEPROM Present: %d",
622 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
623 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
625 for (i = 0;
626 ((i < 1000) &&
627 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
628 udelay(10);
630 for (i = 0; i < 0x20; i++) {
631 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
632 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
633 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
635 #endif
640 * Insert a packet in the DMA fifo and generate the DMA prg
641 * FIXME: rewrite the program in order to accept packets crossing
642 * page boundaries.
643 * check also that a single dma descriptor doesn't cross a
644 * page boundary.
646 static void insert_packet(struct ti_ohci *ohci,
647 struct dma_trm_ctx *d, struct hpsb_packet *packet)
649 u32 cycleTimer;
650 int idx = d->prg_ind;
652 DBGMSG("Inserting packet for node " NODE_BUS_FMT
653 ", tlabel=%d, tcode=0x%x, speed=%d",
654 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
655 packet->tcode, packet->speed_code);
657 d->prg_cpu[idx]->begin.address = 0;
658 d->prg_cpu[idx]->begin.branchAddress = 0;
660 if (d->type == DMA_CTX_ASYNC_RESP) {
662 * For response packets, we need to put a timeout value in
663 * the 16 lower bits of the status... let's try 1 sec timeout
665 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
666 d->prg_cpu[idx]->begin.status = cpu_to_le32(
667 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
668 ((cycleTimer&0x01fff000)>>12));
670 DBGMSG("cycleTimer: %08x timeStamp: %08x",
671 cycleTimer, d->prg_cpu[idx]->begin.status);
672 } else
673 d->prg_cpu[idx]->begin.status = 0;
675 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
677 if (packet->type == hpsb_raw) {
678 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
679 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
680 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
681 } else {
682 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
683 (packet->header[0] & 0xFFFF);
685 if (packet->tcode == TCODE_ISO_DATA) {
686 /* Sending an async stream packet */
687 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
688 } else {
689 /* Sending a normal async request or response */
690 d->prg_cpu[idx]->data[1] =
691 (packet->header[1] & 0xFFFF) |
692 (packet->header[0] & 0xFFFF0000);
693 d->prg_cpu[idx]->data[2] = packet->header[2];
694 d->prg_cpu[idx]->data[3] = packet->header[3];
696 header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
699 if (packet->data_size) { /* block transmit */
700 if (packet->tcode == TCODE_STREAM_DATA){
701 d->prg_cpu[idx]->begin.control =
702 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
703 DMA_CTL_IMMEDIATE | 0x8);
704 } else {
705 d->prg_cpu[idx]->begin.control =
706 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
707 DMA_CTL_IMMEDIATE | 0x10);
709 d->prg_cpu[idx]->end.control =
710 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
711 DMA_CTL_IRQ |
712 DMA_CTL_BRANCH |
713 packet->data_size);
715 * Check that the packet data buffer
716 * does not cross a page boundary.
718 * XXX Fix this some day. eth1394 seems to trigger
719 * it, but ignoring it doesn't seem to cause a
720 * problem.
722 #if 0
723 if (cross_bound((unsigned long)packet->data,
724 packet->data_size)>0) {
725 /* FIXME: do something about it */
726 PRINT(KERN_ERR,
727 "%s: packet data addr: %p size %Zd bytes "
728 "cross page boundary", __FUNCTION__,
729 packet->data, packet->data_size);
731 #endif
732 d->prg_cpu[idx]->end.address = cpu_to_le32(
733 pci_map_single(ohci->dev, packet->data,
734 packet->data_size,
735 PCI_DMA_TODEVICE));
736 OHCI_DMA_ALLOC("single, block transmit packet");
738 d->prg_cpu[idx]->end.branchAddress = 0;
739 d->prg_cpu[idx]->end.status = 0;
740 if (d->branchAddrPtr)
741 *(d->branchAddrPtr) =
742 cpu_to_le32(d->prg_bus[idx] | 0x3);
743 d->branchAddrPtr =
744 &(d->prg_cpu[idx]->end.branchAddress);
745 } else { /* quadlet transmit */
746 if (packet->type == hpsb_raw)
747 d->prg_cpu[idx]->begin.control =
748 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
749 DMA_CTL_IMMEDIATE |
750 DMA_CTL_IRQ |
751 DMA_CTL_BRANCH |
752 (packet->header_size + 4));
753 else
754 d->prg_cpu[idx]->begin.control =
755 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
756 DMA_CTL_IMMEDIATE |
757 DMA_CTL_IRQ |
758 DMA_CTL_BRANCH |
759 packet->header_size);
761 if (d->branchAddrPtr)
762 *(d->branchAddrPtr) =
763 cpu_to_le32(d->prg_bus[idx] | 0x2);
764 d->branchAddrPtr =
765 &(d->prg_cpu[idx]->begin.branchAddress);
768 } else { /* iso packet */
769 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
770 (packet->header[0] & 0xFFFF);
771 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
772 header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
774 d->prg_cpu[idx]->begin.control =
775 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
776 DMA_CTL_IMMEDIATE | 0x8);
777 d->prg_cpu[idx]->end.control =
778 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
779 DMA_CTL_UPDATE |
780 DMA_CTL_IRQ |
781 DMA_CTL_BRANCH |
782 packet->data_size);
783 d->prg_cpu[idx]->end.address = cpu_to_le32(
784 pci_map_single(ohci->dev, packet->data,
785 packet->data_size, PCI_DMA_TODEVICE));
786 OHCI_DMA_ALLOC("single, iso transmit packet");
788 d->prg_cpu[idx]->end.branchAddress = 0;
789 d->prg_cpu[idx]->end.status = 0;
790 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
791 " begin=%08x %08x %08x %08x\n"
792 " %08x %08x %08x %08x\n"
793 " end =%08x %08x %08x %08x",
794 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
795 d->prg_cpu[idx]->begin.control,
796 d->prg_cpu[idx]->begin.address,
797 d->prg_cpu[idx]->begin.branchAddress,
798 d->prg_cpu[idx]->begin.status,
799 d->prg_cpu[idx]->data[0],
800 d->prg_cpu[idx]->data[1],
801 d->prg_cpu[idx]->data[2],
802 d->prg_cpu[idx]->data[3],
803 d->prg_cpu[idx]->end.control,
804 d->prg_cpu[idx]->end.address,
805 d->prg_cpu[idx]->end.branchAddress,
806 d->prg_cpu[idx]->end.status);
807 if (d->branchAddrPtr)
808 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
809 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
811 d->free_prgs--;
813 /* queue the packet in the appropriate context queue */
814 list_add_tail(&packet->driver_list, &d->fifo_list);
815 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
819 * This function fills the FIFO with the (eventual) pending packets
820 * and runs or wakes up the DMA prg if necessary.
822 * The function MUST be called with the d->lock held.
824 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
826 struct hpsb_packet *packet, *ptmp;
827 int idx = d->prg_ind;
828 int z = 0;
830 /* insert the packets into the dma fifo */
831 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
832 if (!d->free_prgs)
833 break;
835 /* For the first packet only */
836 if (!z)
837 z = (packet->data_size) ? 3 : 2;
839 /* Insert the packet */
840 list_del_init(&packet->driver_list);
841 insert_packet(ohci, d, packet);
844 /* Nothing must have been done, either no free_prgs or no packets */
845 if (z == 0)
846 return;
848 /* Is the context running ? (should be unless it is
849 the first packet to be sent in this context) */
850 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
851 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
853 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
854 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
856 /* Check that the node id is valid, and not 63 */
857 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
858 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
859 else
860 reg_write(ohci, d->ctrlSet, 0x8000);
861 } else {
862 /* Wake up the dma context if necessary */
863 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
864 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
866 /* do this always, to avoid race condition */
867 reg_write(ohci, d->ctrlSet, 0x1000);
870 return;
873 /* Transmission of an async or iso packet */
874 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
876 struct ti_ohci *ohci = host->hostdata;
877 struct dma_trm_ctx *d;
878 unsigned long flags;
880 if (packet->data_size > ohci->max_packet_size) {
881 PRINT(KERN_ERR,
882 "Transmit packet size %Zd is too big",
883 packet->data_size);
884 return -EOVERFLOW;
887 /* Decide whether we have an iso, a request, or a response packet */
888 if (packet->type == hpsb_raw)
889 d = &ohci->at_req_context;
890 else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
891 /* The legacy IT DMA context is initialized on first
892 * use. However, the alloc cannot be run from
893 * interrupt context, so we bail out if that is the
894 * case. I don't see anyone sending ISO packets from
895 * interrupt context anyway... */
897 if (ohci->it_legacy_context.ohci == NULL) {
898 if (in_interrupt()) {
899 PRINT(KERN_ERR,
900 "legacy IT context cannot be initialized during interrupt");
901 return -EINVAL;
904 if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
905 DMA_CTX_ISO, 0, IT_NUM_DESC,
906 OHCI1394_IsoXmitContextBase) < 0) {
907 PRINT(KERN_ERR,
908 "error initializing legacy IT context");
909 return -ENOMEM;
912 initialize_dma_trm_ctx(&ohci->it_legacy_context);
915 d = &ohci->it_legacy_context;
916 } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
917 d = &ohci->at_resp_context;
918 else
919 d = &ohci->at_req_context;
921 spin_lock_irqsave(&d->lock,flags);
923 list_add_tail(&packet->driver_list, &d->pending_list);
925 dma_trm_flush(ohci, d);
927 spin_unlock_irqrestore(&d->lock,flags);
929 return 0;
932 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
934 struct ti_ohci *ohci = host->hostdata;
935 int retval = 0;
936 unsigned long flags;
937 int phy_reg;
939 switch (cmd) {
940 case RESET_BUS:
941 switch (arg) {
942 case SHORT_RESET:
943 phy_reg = get_phy_reg(ohci, 5);
944 phy_reg |= 0x40;
945 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
946 break;
947 case LONG_RESET:
948 phy_reg = get_phy_reg(ohci, 1);
949 phy_reg |= 0x40;
950 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
951 break;
952 case SHORT_RESET_NO_FORCE_ROOT:
953 phy_reg = get_phy_reg(ohci, 1);
954 if (phy_reg & 0x80) {
955 phy_reg &= ~0x80;
956 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
959 phy_reg = get_phy_reg(ohci, 5);
960 phy_reg |= 0x40;
961 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
962 break;
963 case LONG_RESET_NO_FORCE_ROOT:
964 phy_reg = get_phy_reg(ohci, 1);
965 phy_reg &= ~0x80;
966 phy_reg |= 0x40;
967 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
968 break;
969 case SHORT_RESET_FORCE_ROOT:
970 phy_reg = get_phy_reg(ohci, 1);
971 if (!(phy_reg & 0x80)) {
972 phy_reg |= 0x80;
973 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
976 phy_reg = get_phy_reg(ohci, 5);
977 phy_reg |= 0x40;
978 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
979 break;
980 case LONG_RESET_FORCE_ROOT:
981 phy_reg = get_phy_reg(ohci, 1);
982 phy_reg |= 0xc0;
983 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
984 break;
985 default:
986 retval = -1;
988 break;
990 case GET_CYCLE_COUNTER:
991 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
992 break;
994 case SET_CYCLE_COUNTER:
995 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
996 break;
998 case SET_BUS_ID:
999 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
1000 break;
1002 case ACT_CYCLE_MASTER:
1003 if (arg) {
1004 /* check if we are root and other nodes are present */
1005 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1006 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1008 * enable cycleTimer, cycleMaster
1010 DBGMSG("Cycle master enabled");
1011 reg_write(ohci, OHCI1394_LinkControlSet,
1012 OHCI1394_LinkControl_CycleTimerEnable |
1013 OHCI1394_LinkControl_CycleMaster);
1015 } else {
1016 /* disable cycleTimer, cycleMaster, cycleSource */
1017 reg_write(ohci, OHCI1394_LinkControlClear,
1018 OHCI1394_LinkControl_CycleTimerEnable |
1019 OHCI1394_LinkControl_CycleMaster |
1020 OHCI1394_LinkControl_CycleSource);
1022 break;
1024 case CANCEL_REQUESTS:
1025 DBGMSG("Cancel request received");
1026 dma_trm_reset(&ohci->at_req_context);
1027 dma_trm_reset(&ohci->at_resp_context);
1028 break;
1030 case ISO_LISTEN_CHANNEL:
1032 u64 mask;
1033 struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1034 int ir_legacy_active;
1036 if (arg<0 || arg>63) {
1037 PRINT(KERN_ERR,
1038 "%s: IS0 listen channel %d is out of range",
1039 __FUNCTION__, arg);
1040 return -EFAULT;
1043 mask = (u64)0x1<<arg;
1045 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1047 if (ohci->ISO_channel_usage & mask) {
1048 PRINT(KERN_ERR,
1049 "%s: IS0 listen channel %d is already used",
1050 __FUNCTION__, arg);
1051 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1052 return -EFAULT;
1055 ir_legacy_active = ohci->ir_legacy_channels;
1057 ohci->ISO_channel_usage |= mask;
1058 ohci->ir_legacy_channels |= mask;
1060 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1062 if (!ir_legacy_active) {
1063 if (ohci1394_register_iso_tasklet(ohci,
1064 &ohci->ir_legacy_tasklet) < 0) {
1065 PRINT(KERN_ERR, "No IR DMA context available");
1066 return -EBUSY;
1069 /* the IR context can be assigned to any DMA context
1070 * by ohci1394_register_iso_tasklet */
1071 d->ctx = ohci->ir_legacy_tasklet.context;
1072 d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1073 32*d->ctx;
1074 d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1075 32*d->ctx;
1076 d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1077 d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1079 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1081 if (printk_ratelimit())
1082 DBGMSG("IR legacy activated");
1085 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1087 if (arg>31)
1088 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1089 1<<(arg-32));
1090 else
1091 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1092 1<<arg);
1094 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1095 DBGMSG("Listening enabled on channel %d", arg);
1096 break;
1098 case ISO_UNLISTEN_CHANNEL:
1100 u64 mask;
1102 if (arg<0 || arg>63) {
1103 PRINT(KERN_ERR,
1104 "%s: IS0 unlisten channel %d is out of range",
1105 __FUNCTION__, arg);
1106 return -EFAULT;
1109 mask = (u64)0x1<<arg;
1111 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1113 if (!(ohci->ISO_channel_usage & mask)) {
1114 PRINT(KERN_ERR,
1115 "%s: IS0 unlisten channel %d is not used",
1116 __FUNCTION__, arg);
1117 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1118 return -EFAULT;
1121 ohci->ISO_channel_usage &= ~mask;
1122 ohci->ir_legacy_channels &= ~mask;
1124 if (arg>31)
1125 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1126 1<<(arg-32));
1127 else
1128 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1129 1<<arg);
1131 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1132 DBGMSG("Listening disabled on channel %d", arg);
1134 if (ohci->ir_legacy_channels == 0) {
1135 stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1136 DBGMSG("ISO legacy receive context stopped");
1139 break;
1141 default:
1142 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1143 cmd);
1144 break;
1146 return retval;
1149 /***********************************
1150 * rawiso ISO reception *
1151 ***********************************/
1154 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1155 buffer is split into "blocks" (regions described by one DMA
1156 descriptor). Each block must be one page or less in size, and
1157 must not cross a page boundary.
1159 There is one little wrinkle with buffer-fill mode: a packet that
1160 starts in the final block may wrap around into the first block. But
1161 the user API expects all packets to be contiguous. Our solution is
1162 to keep the very last page of the DMA buffer in reserve - if a
1163 packet spans the gap, we copy its tail into this page.
1166 struct ohci_iso_recv {
1167 struct ti_ohci *ohci;
1169 struct ohci1394_iso_tasklet task;
1170 int task_active;
1172 enum { BUFFER_FILL_MODE = 0,
1173 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1175 /* memory and PCI mapping for the DMA descriptors */
1176 struct dma_prog_region prog;
1177 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1179 /* how many DMA blocks fit in the buffer */
1180 unsigned int nblocks;
1182 /* stride of DMA blocks */
1183 unsigned int buf_stride;
1185 /* number of blocks to batch between interrupts */
1186 int block_irq_interval;
1188 /* block that DMA will finish next */
1189 int block_dma;
1191 /* (buffer-fill only) block that the reader will release next */
1192 int block_reader;
1194 /* (buffer-fill only) bytes of buffer the reader has released,
1195 less than one block */
1196 int released_bytes;
1198 /* (buffer-fill only) buffer offset at which the next packet will appear */
1199 int dma_offset;
1201 /* OHCI DMA context control registers */
1202 u32 ContextControlSet;
1203 u32 ContextControlClear;
1204 u32 CommandPtr;
1205 u32 ContextMatch;
1208 static void ohci_iso_recv_task(unsigned long data);
1209 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1210 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1211 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1212 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1214 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1216 struct ti_ohci *ohci = iso->host->hostdata;
1217 struct ohci_iso_recv *recv;
1218 int ctx;
1219 int ret = -ENOMEM;
1221 recv = kmalloc(sizeof(*recv), GFP_KERNEL);
1222 if (!recv)
1223 return -ENOMEM;
1225 iso->hostdata = recv;
1226 recv->ohci = ohci;
1227 recv->task_active = 0;
1228 dma_prog_region_init(&recv->prog);
1229 recv->block = NULL;
1231 /* use buffer-fill mode, unless irq_interval is 1
1232 (note: multichannel requires buffer-fill) */
1234 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1235 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1236 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1237 } else {
1238 recv->dma_mode = BUFFER_FILL_MODE;
1241 /* set nblocks, buf_stride, block_irq_interval */
1243 if (recv->dma_mode == BUFFER_FILL_MODE) {
1244 recv->buf_stride = PAGE_SIZE;
1246 /* one block per page of data in the DMA buffer, minus the final guard page */
1247 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1248 if (recv->nblocks < 3) {
1249 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1250 goto err;
1253 /* iso->irq_interval is in packets - translate that to blocks */
1254 if (iso->irq_interval == 1)
1255 recv->block_irq_interval = 1;
1256 else
1257 recv->block_irq_interval = iso->irq_interval *
1258 ((recv->nblocks+1)/iso->buf_packets);
1259 if (recv->block_irq_interval*4 > recv->nblocks)
1260 recv->block_irq_interval = recv->nblocks/4;
1261 if (recv->block_irq_interval < 1)
1262 recv->block_irq_interval = 1;
1264 } else {
1265 int max_packet_size;
1267 recv->nblocks = iso->buf_packets;
1268 recv->block_irq_interval = iso->irq_interval;
1269 if (recv->block_irq_interval * 4 > iso->buf_packets)
1270 recv->block_irq_interval = iso->buf_packets / 4;
1271 if (recv->block_irq_interval < 1)
1272 recv->block_irq_interval = 1;
1274 /* choose a buffer stride */
1275 /* must be a power of 2, and <= PAGE_SIZE */
1277 max_packet_size = iso->buf_size / iso->buf_packets;
1279 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1280 recv->buf_stride *= 2);
1282 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1283 recv->buf_stride > PAGE_SIZE) {
1284 /* this shouldn't happen, but anyway... */
1285 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1286 goto err;
1290 recv->block_reader = 0;
1291 recv->released_bytes = 0;
1292 recv->block_dma = 0;
1293 recv->dma_offset = 0;
1295 /* size of DMA program = one descriptor per block */
1296 if (dma_prog_region_alloc(&recv->prog,
1297 sizeof(struct dma_cmd) * recv->nblocks,
1298 recv->ohci->dev))
1299 goto err;
1301 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1303 ohci1394_init_iso_tasklet(&recv->task,
1304 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1305 OHCI_ISO_RECEIVE,
1306 ohci_iso_recv_task, (unsigned long) iso);
1308 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1309 ret = -EBUSY;
1310 goto err;
1313 recv->task_active = 1;
1315 /* recv context registers are spaced 32 bytes apart */
1316 ctx = recv->task.context;
1317 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1318 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1319 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1320 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1322 if (iso->channel == -1) {
1323 /* clear multi-channel selection mask */
1324 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1325 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1328 /* write the DMA program */
1329 ohci_iso_recv_program(iso);
1331 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1332 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1333 recv->dma_mode == BUFFER_FILL_MODE ?
1334 "buffer-fill" : "packet-per-buffer",
1335 iso->buf_size/PAGE_SIZE, iso->buf_size,
1336 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1338 return 0;
1340 err:
1341 ohci_iso_recv_shutdown(iso);
1342 return ret;
1345 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1347 struct ohci_iso_recv *recv = iso->hostdata;
1349 /* disable interrupts */
1350 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1352 /* halt DMA */
1353 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1356 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1358 struct ohci_iso_recv *recv = iso->hostdata;
1360 if (recv->task_active) {
1361 ohci_iso_recv_stop(iso);
1362 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1363 recv->task_active = 0;
1366 dma_prog_region_free(&recv->prog);
1367 kfree(recv);
1368 iso->hostdata = NULL;
1371 /* set up a "gapped" ring buffer DMA program */
1372 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1374 struct ohci_iso_recv *recv = iso->hostdata;
1375 int blk;
1377 /* address of 'branch' field in previous DMA descriptor */
1378 u32 *prev_branch = NULL;
1380 for (blk = 0; blk < recv->nblocks; blk++) {
1381 u32 control;
1383 /* the DMA descriptor */
1384 struct dma_cmd *cmd = &recv->block[blk];
1386 /* offset of the DMA descriptor relative to the DMA prog buffer */
1387 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1389 /* offset of this packet's data within the DMA buffer */
1390 unsigned long buf_offset = blk * recv->buf_stride;
1392 if (recv->dma_mode == BUFFER_FILL_MODE) {
1393 control = 2 << 28; /* INPUT_MORE */
1394 } else {
1395 control = 3 << 28; /* INPUT_LAST */
1398 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1400 /* interrupt on last block, and at intervals */
1401 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1402 control |= 3 << 20; /* want interrupt */
1405 control |= 3 << 18; /* enable branch to address */
1406 control |= recv->buf_stride;
1408 cmd->control = cpu_to_le32(control);
1409 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1410 cmd->branchAddress = 0; /* filled in on next loop */
1411 cmd->status = cpu_to_le32(recv->buf_stride);
1413 /* link the previous descriptor to this one */
1414 if (prev_branch) {
1415 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1418 prev_branch = &cmd->branchAddress;
1421 /* the final descriptor's branch address and Z should be left at 0 */
1424 /* listen or unlisten to a specific channel (multi-channel mode only) */
1425 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1427 struct ohci_iso_recv *recv = iso->hostdata;
1428 int reg, i;
1430 if (channel < 32) {
1431 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1432 i = channel;
1433 } else {
1434 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1435 i = channel - 32;
1438 reg_write(recv->ohci, reg, (1 << i));
1440 /* issue a dummy read to force all PCI writes to be posted immediately */
1441 mb();
1442 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1445 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1447 struct ohci_iso_recv *recv = iso->hostdata;
1448 int i;
1450 for (i = 0; i < 64; i++) {
1451 if (mask & (1ULL << i)) {
1452 if (i < 32)
1453 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1454 else
1455 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1456 } else {
1457 if (i < 32)
1458 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1459 else
1460 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1464 /* issue a dummy read to force all PCI writes to be posted immediately */
1465 mb();
1466 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1469 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1471 struct ohci_iso_recv *recv = iso->hostdata;
1472 struct ti_ohci *ohci = recv->ohci;
1473 u32 command, contextMatch;
1475 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1476 wmb();
1478 /* always keep ISO headers */
1479 command = (1 << 30);
1481 if (recv->dma_mode == BUFFER_FILL_MODE)
1482 command |= (1 << 31);
1484 reg_write(recv->ohci, recv->ContextControlSet, command);
1486 /* match on specified tags */
1487 contextMatch = tag_mask << 28;
1489 if (iso->channel == -1) {
1490 /* enable multichannel reception */
1491 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1492 } else {
1493 /* listen on channel */
1494 contextMatch |= iso->channel;
1497 if (cycle != -1) {
1498 u32 seconds;
1500 /* enable cycleMatch */
1501 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1503 /* set starting cycle */
1504 cycle &= 0x1FFF;
1506 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1507 just snarf them from the current time */
1508 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1510 /* advance one second to give some extra time for DMA to start */
1511 seconds += 1;
1513 cycle |= (seconds & 3) << 13;
1515 contextMatch |= cycle << 12;
1518 if (sync != -1) {
1519 /* set sync flag on first DMA descriptor */
1520 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1521 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1523 /* match sync field */
1524 contextMatch |= (sync&0xf)<<8;
1527 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1529 /* address of first descriptor block */
1530 command = dma_prog_region_offset_to_bus(&recv->prog,
1531 recv->block_dma * sizeof(struct dma_cmd));
1532 command |= 1; /* Z=1 */
1534 reg_write(recv->ohci, recv->CommandPtr, command);
1536 /* enable interrupts */
1537 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1539 wmb();
1541 /* run */
1542 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1544 /* issue a dummy read of the cycle timer register to force
1545 all PCI writes to be posted immediately */
1546 mb();
1547 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1549 /* check RUN */
1550 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1551 PRINT(KERN_ERR,
1552 "Error starting IR DMA (ContextControl 0x%08x)\n",
1553 reg_read(recv->ohci, recv->ContextControlSet));
1554 return -1;
1557 return 0;
1560 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1562 /* re-use the DMA descriptor for the block */
1563 /* by linking the previous descriptor to it */
1565 int next_i = block;
1566 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1568 struct dma_cmd *next = &recv->block[next_i];
1569 struct dma_cmd *prev = &recv->block[prev_i];
1571 /* ignore out-of-range requests */
1572 if ((block < 0) || (block > recv->nblocks))
1573 return;
1575 /* 'next' becomes the new end of the DMA chain,
1576 so disable branch and enable interrupt */
1577 next->branchAddress = 0;
1578 next->control |= cpu_to_le32(3 << 20);
1579 next->status = cpu_to_le32(recv->buf_stride);
1581 /* link prev to next */
1582 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1583 sizeof(struct dma_cmd) * next_i)
1584 | 1); /* Z=1 */
1586 /* disable interrupt on previous DMA descriptor, except at intervals */
1587 if ((prev_i % recv->block_irq_interval) == 0) {
1588 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1589 } else {
1590 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1592 wmb();
1594 /* wake up DMA in case it fell asleep */
1595 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1598 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1599 struct hpsb_iso_packet_info *info)
1601 /* release the memory where the packet was */
1602 recv->released_bytes += info->total_len;
1604 /* have we released enough memory for one block? */
1605 while (recv->released_bytes > recv->buf_stride) {
1606 ohci_iso_recv_release_block(recv, recv->block_reader);
1607 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1608 recv->released_bytes -= recv->buf_stride;
1612 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1614 struct ohci_iso_recv *recv = iso->hostdata;
1615 if (recv->dma_mode == BUFFER_FILL_MODE) {
1616 ohci_iso_recv_bufferfill_release(recv, info);
1617 } else {
1618 ohci_iso_recv_release_block(recv, info - iso->infos);
1622 /* parse all packets from blocks that have been fully received */
1623 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1625 int wake = 0;
1626 int runaway = 0;
1627 struct ti_ohci *ohci = recv->ohci;
1629 while (1) {
1630 /* we expect the next parsable packet to begin at recv->dma_offset */
1631 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1633 unsigned int offset;
1634 unsigned short len, cycle, total_len;
1635 unsigned char channel, tag, sy;
1637 unsigned char *p = iso->data_buf.kvirt;
1639 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1641 /* don't loop indefinitely */
1642 if (runaway++ > 100000) {
1643 atomic_inc(&iso->overflows);
1644 PRINT(KERN_ERR,
1645 "IR DMA error - Runaway during buffer parsing!\n");
1646 break;
1649 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1650 if (this_block == recv->block_dma)
1651 break;
1653 wake = 1;
1655 /* parse data length, tag, channel, and sy */
1657 /* note: we keep our own local copies of 'len' and 'offset'
1658 so the user can't mess with them by poking in the mmap area */
1660 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1662 if (len > 4096) {
1663 PRINT(KERN_ERR,
1664 "IR DMA error - bogus 'len' value %u\n", len);
1667 channel = p[recv->dma_offset+1] & 0x3F;
1668 tag = p[recv->dma_offset+1] >> 6;
1669 sy = p[recv->dma_offset+0] & 0xF;
1671 /* advance to data payload */
1672 recv->dma_offset += 4;
1674 /* check for wrap-around */
1675 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1676 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1679 /* dma_offset now points to the first byte of the data payload */
1680 offset = recv->dma_offset;
1682 /* advance to xferStatus/timeStamp */
1683 recv->dma_offset += len;
1685 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1686 /* payload is padded to 4 bytes */
1687 if (len % 4) {
1688 recv->dma_offset += 4 - (len%4);
1689 total_len += 4 - (len%4);
1692 /* check for wrap-around */
1693 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1694 /* uh oh, the packet data wraps from the last
1695 to the first DMA block - make the packet
1696 contiguous by copying its "tail" into the
1697 guard page */
1699 int guard_off = recv->buf_stride*recv->nblocks;
1700 int tail_len = len - (guard_off - offset);
1702 if (tail_len > 0 && tail_len < recv->buf_stride) {
1703 memcpy(iso->data_buf.kvirt + guard_off,
1704 iso->data_buf.kvirt,
1705 tail_len);
1708 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1711 /* parse timestamp */
1712 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1713 cycle &= 0x1FFF;
1715 /* advance to next packet */
1716 recv->dma_offset += 4;
1718 /* check for wrap-around */
1719 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1720 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1723 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1726 if (wake)
1727 hpsb_iso_wake(iso);
1730 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1732 int loop;
1733 struct ti_ohci *ohci = recv->ohci;
1735 /* loop over all blocks */
1736 for (loop = 0; loop < recv->nblocks; loop++) {
1738 /* check block_dma to see if it's done */
1739 struct dma_cmd *im = &recv->block[recv->block_dma];
1741 /* check the DMA descriptor for new writes to xferStatus */
1742 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1744 /* rescount is the number of bytes *remaining to be written* in the block */
1745 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1747 unsigned char event = xferstatus & 0x1F;
1749 if (!event) {
1750 /* nothing has happened to this block yet */
1751 break;
1754 if (event != 0x11) {
1755 atomic_inc(&iso->overflows);
1756 PRINT(KERN_ERR,
1757 "IR DMA error - OHCI error code 0x%02x\n", event);
1760 if (rescount != 0) {
1761 /* the card is still writing to this block;
1762 we can't touch it until it's done */
1763 break;
1766 /* OK, the block is finished... */
1768 /* sync our view of the block */
1769 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1771 /* reset the DMA descriptor */
1772 im->status = recv->buf_stride;
1774 /* advance block_dma */
1775 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1777 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1778 atomic_inc(&iso->overflows);
1779 DBGMSG("ISO reception overflow - "
1780 "ran out of DMA blocks");
1784 /* parse any packets that have arrived */
1785 ohci_iso_recv_bufferfill_parse(iso, recv);
1788 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1790 int count;
1791 int wake = 0;
1792 struct ti_ohci *ohci = recv->ohci;
1794 /* loop over the entire buffer */
1795 for (count = 0; count < recv->nblocks; count++) {
1796 u32 packet_len = 0;
1798 /* pointer to the DMA descriptor */
1799 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1801 /* check the DMA descriptor for new writes to xferStatus */
1802 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1803 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1805 unsigned char event = xferstatus & 0x1F;
1807 if (!event) {
1808 /* this packet hasn't come in yet; we are done for now */
1809 goto out;
1812 if (event == 0x11) {
1813 /* packet received successfully! */
1815 /* rescount is the number of bytes *remaining* in the packet buffer,
1816 after the packet was written */
1817 packet_len = recv->buf_stride - rescount;
1819 } else if (event == 0x02) {
1820 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1821 } else if (event) {
1822 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1825 /* sync our view of the buffer */
1826 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1828 /* record the per-packet info */
1830 /* iso header is 8 bytes ahead of the data payload */
1831 unsigned char *hdr;
1833 unsigned int offset;
1834 unsigned short cycle;
1835 unsigned char channel, tag, sy;
1837 offset = iso->pkt_dma * recv->buf_stride;
1838 hdr = iso->data_buf.kvirt + offset;
1840 /* skip iso header */
1841 offset += 8;
1842 packet_len -= 8;
1844 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1845 channel = hdr[5] & 0x3F;
1846 tag = hdr[5] >> 6;
1847 sy = hdr[4] & 0xF;
1849 hpsb_iso_packet_received(iso, offset, packet_len,
1850 recv->buf_stride, cycle, channel, tag, sy);
1853 /* reset the DMA descriptor */
1854 il->status = recv->buf_stride;
1856 wake = 1;
1857 recv->block_dma = iso->pkt_dma;
1860 out:
1861 if (wake)
1862 hpsb_iso_wake(iso);
1865 static void ohci_iso_recv_task(unsigned long data)
1867 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1868 struct ohci_iso_recv *recv = iso->hostdata;
1870 if (recv->dma_mode == BUFFER_FILL_MODE)
1871 ohci_iso_recv_bufferfill_task(iso, recv);
1872 else
1873 ohci_iso_recv_packetperbuf_task(iso, recv);
1876 /***********************************
1877 * rawiso ISO transmission *
1878 ***********************************/
1880 struct ohci_iso_xmit {
1881 struct ti_ohci *ohci;
1882 struct dma_prog_region prog;
1883 struct ohci1394_iso_tasklet task;
1884 int task_active;
1886 u32 ContextControlSet;
1887 u32 ContextControlClear;
1888 u32 CommandPtr;
1891 /* transmission DMA program:
1892 one OUTPUT_MORE_IMMEDIATE for the IT header
1893 one OUTPUT_LAST for the buffer data */
1895 struct iso_xmit_cmd {
1896 struct dma_cmd output_more_immediate;
1897 u8 iso_hdr[8];
1898 u32 unused[2];
1899 struct dma_cmd output_last;
1902 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1903 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1904 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1905 static void ohci_iso_xmit_task(unsigned long data);
1907 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1909 struct ohci_iso_xmit *xmit;
1910 unsigned int prog_size;
1911 int ctx;
1912 int ret = -ENOMEM;
1914 xmit = kmalloc(sizeof(*xmit), GFP_KERNEL);
1915 if (!xmit)
1916 return -ENOMEM;
1918 iso->hostdata = xmit;
1919 xmit->ohci = iso->host->hostdata;
1920 xmit->task_active = 0;
1922 dma_prog_region_init(&xmit->prog);
1924 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1926 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1927 goto err;
1929 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1930 ohci_iso_xmit_task, (unsigned long) iso);
1932 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1933 ret = -EBUSY;
1934 goto err;
1937 xmit->task_active = 1;
1939 /* xmit context registers are spaced 16 bytes apart */
1940 ctx = xmit->task.context;
1941 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1942 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1943 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1945 return 0;
1947 err:
1948 ohci_iso_xmit_shutdown(iso);
1949 return ret;
1952 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1954 struct ohci_iso_xmit *xmit = iso->hostdata;
1955 struct ti_ohci *ohci = xmit->ohci;
1957 /* disable interrupts */
1958 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1960 /* halt DMA */
1961 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1962 /* XXX the DMA context will lock up if you try to send too much data! */
1963 PRINT(KERN_ERR,
1964 "you probably exceeded the OHCI card's bandwidth limit - "
1965 "reload the module and reduce xmit bandwidth");
1969 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1971 struct ohci_iso_xmit *xmit = iso->hostdata;
1973 if (xmit->task_active) {
1974 ohci_iso_xmit_stop(iso);
1975 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1976 xmit->task_active = 0;
1979 dma_prog_region_free(&xmit->prog);
1980 kfree(xmit);
1981 iso->hostdata = NULL;
1984 static void ohci_iso_xmit_task(unsigned long data)
1986 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1987 struct ohci_iso_xmit *xmit = iso->hostdata;
1988 struct ti_ohci *ohci = xmit->ohci;
1989 int wake = 0;
1990 int count;
1992 /* check the whole buffer if necessary, starting at pkt_dma */
1993 for (count = 0; count < iso->buf_packets; count++) {
1994 int cycle;
1996 /* DMA descriptor */
1997 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
1999 /* check for new writes to xferStatus */
2000 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
2001 u8 event = xferstatus & 0x1F;
2003 if (!event) {
2004 /* packet hasn't been sent yet; we are done for now */
2005 break;
2008 if (event != 0x11)
2009 PRINT(KERN_ERR,
2010 "IT DMA error - OHCI error code 0x%02x\n", event);
2012 /* at least one packet went out, so wake up the writer */
2013 wake = 1;
2015 /* parse cycle */
2016 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
2018 /* tell the subsystem the packet has gone out */
2019 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
2021 /* reset the DMA descriptor for next time */
2022 cmd->output_last.status = 0;
2025 if (wake)
2026 hpsb_iso_wake(iso);
2029 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2031 struct ohci_iso_xmit *xmit = iso->hostdata;
2032 struct ti_ohci *ohci = xmit->ohci;
2034 int next_i, prev_i;
2035 struct iso_xmit_cmd *next, *prev;
2037 unsigned int offset;
2038 unsigned short len;
2039 unsigned char tag, sy;
2041 /* check that the packet doesn't cross a page boundary
2042 (we could allow this if we added OUTPUT_MORE descriptor support) */
2043 if (cross_bound(info->offset, info->len)) {
2044 PRINT(KERN_ERR,
2045 "rawiso xmit: packet %u crosses a page boundary",
2046 iso->first_packet);
2047 return -EINVAL;
2050 offset = info->offset;
2051 len = info->len;
2052 tag = info->tag;
2053 sy = info->sy;
2055 /* sync up the card's view of the buffer */
2056 dma_region_sync_for_device(&iso->data_buf, offset, len);
2058 /* append first_packet to the DMA chain */
2059 /* by linking the previous descriptor to it */
2060 /* (next will become the new end of the DMA chain) */
2062 next_i = iso->first_packet;
2063 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2065 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2066 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2068 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2069 memset(next, 0, sizeof(struct iso_xmit_cmd));
2070 next->output_more_immediate.control = cpu_to_le32(0x02000008);
2072 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2074 /* tcode = 0xA, and sy */
2075 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2077 /* tag and channel number */
2078 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2080 /* transmission speed */
2081 next->iso_hdr[2] = iso->speed & 0x7;
2083 /* payload size */
2084 next->iso_hdr[6] = len & 0xFF;
2085 next->iso_hdr[7] = len >> 8;
2087 /* set up the OUTPUT_LAST */
2088 next->output_last.control = cpu_to_le32(1 << 28);
2089 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2090 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2091 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2092 next->output_last.control |= cpu_to_le32(len);
2094 /* payload bus address */
2095 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2097 /* leave branchAddress at zero for now */
2099 /* re-write the previous DMA descriptor to chain to this one */
2101 /* set prev branch address to point to next (Z=3) */
2102 prev->output_last.branchAddress = cpu_to_le32(
2103 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2105 /* disable interrupt, unless required by the IRQ interval */
2106 if (prev_i % iso->irq_interval) {
2107 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2108 } else {
2109 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2112 wmb();
2114 /* wake DMA in case it is sleeping */
2115 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2117 /* issue a dummy read of the cycle timer to force all PCI
2118 writes to be posted immediately */
2119 mb();
2120 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2122 return 0;
2125 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2127 struct ohci_iso_xmit *xmit = iso->hostdata;
2128 struct ti_ohci *ohci = xmit->ohci;
2130 /* clear out the control register */
2131 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2132 wmb();
2134 /* address and length of first descriptor block (Z=3) */
2135 reg_write(xmit->ohci, xmit->CommandPtr,
2136 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2138 /* cycle match */
2139 if (cycle != -1) {
2140 u32 start = cycle & 0x1FFF;
2142 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2143 just snarf them from the current time */
2144 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2146 /* advance one second to give some extra time for DMA to start */
2147 seconds += 1;
2149 start |= (seconds & 3) << 13;
2151 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2154 /* enable interrupts */
2155 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2157 /* run */
2158 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2159 mb();
2161 /* wait 100 usec to give the card time to go active */
2162 udelay(100);
2164 /* check the RUN bit */
2165 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2166 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2167 reg_read(xmit->ohci, xmit->ContextControlSet));
2168 return -1;
2171 return 0;
2174 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2177 switch(cmd) {
2178 case XMIT_INIT:
2179 return ohci_iso_xmit_init(iso);
2180 case XMIT_START:
2181 return ohci_iso_xmit_start(iso, arg);
2182 case XMIT_STOP:
2183 ohci_iso_xmit_stop(iso);
2184 return 0;
2185 case XMIT_QUEUE:
2186 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2187 case XMIT_SHUTDOWN:
2188 ohci_iso_xmit_shutdown(iso);
2189 return 0;
2191 case RECV_INIT:
2192 return ohci_iso_recv_init(iso);
2193 case RECV_START: {
2194 int *args = (int*) arg;
2195 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2197 case RECV_STOP:
2198 ohci_iso_recv_stop(iso);
2199 return 0;
2200 case RECV_RELEASE:
2201 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2202 return 0;
2203 case RECV_FLUSH:
2204 ohci_iso_recv_task((unsigned long) iso);
2205 return 0;
2206 case RECV_SHUTDOWN:
2207 ohci_iso_recv_shutdown(iso);
2208 return 0;
2209 case RECV_LISTEN_CHANNEL:
2210 ohci_iso_recv_change_channel(iso, arg, 1);
2211 return 0;
2212 case RECV_UNLISTEN_CHANNEL:
2213 ohci_iso_recv_change_channel(iso, arg, 0);
2214 return 0;
2215 case RECV_SET_CHANNEL_MASK:
2216 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2217 return 0;
2219 default:
2220 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2221 cmd);
2222 break;
2224 return -EINVAL;
2227 /***************************************
2228 * IEEE-1394 functionality section END *
2229 ***************************************/
2232 /********************************************************
2233 * Global stuff (interrupt handler, init/shutdown code) *
2234 ********************************************************/
2236 static void dma_trm_reset(struct dma_trm_ctx *d)
2238 unsigned long flags;
2239 LIST_HEAD(packet_list);
2240 struct ti_ohci *ohci = d->ohci;
2241 struct hpsb_packet *packet, *ptmp;
2243 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2245 /* Lock the context, reset it and release it. Move the packets
2246 * that were pending in the context to packet_list and free
2247 * them after releasing the lock. */
2249 spin_lock_irqsave(&d->lock, flags);
2251 list_splice(&d->fifo_list, &packet_list);
2252 list_splice(&d->pending_list, &packet_list);
2253 INIT_LIST_HEAD(&d->fifo_list);
2254 INIT_LIST_HEAD(&d->pending_list);
2256 d->branchAddrPtr = NULL;
2257 d->sent_ind = d->prg_ind;
2258 d->free_prgs = d->num_desc;
2260 spin_unlock_irqrestore(&d->lock, flags);
2262 if (list_empty(&packet_list))
2263 return;
2265 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2267 /* Now process subsystem callbacks for the packets from this
2268 * context. */
2269 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2270 list_del_init(&packet->driver_list);
2271 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2275 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2276 quadlet_t rx_event,
2277 quadlet_t tx_event)
2279 struct ohci1394_iso_tasklet *t;
2280 unsigned long mask;
2281 unsigned long flags;
2283 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
2285 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2286 mask = 1 << t->context;
2288 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2289 tasklet_schedule(&t->tasklet);
2290 else if (rx_event & mask)
2291 tasklet_schedule(&t->tasklet);
2294 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
2297 static irqreturn_t ohci_irq_handler(int irq, void *dev_id)
2299 quadlet_t event, node_id;
2300 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2301 struct hpsb_host *host = ohci->host;
2302 int phyid = -1, isroot = 0;
2303 unsigned long flags;
2305 /* Read and clear the interrupt event register. Don't clear
2306 * the busReset event, though. This is done when we get the
2307 * selfIDComplete interrupt. */
2308 spin_lock_irqsave(&ohci->event_lock, flags);
2309 event = reg_read(ohci, OHCI1394_IntEventClear);
2310 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2311 spin_unlock_irqrestore(&ohci->event_lock, flags);
2313 if (!event)
2314 return IRQ_NONE;
2316 /* If event is ~(u32)0 cardbus card was ejected. In this case
2317 * we just return, and clean up in the ohci1394_pci_remove
2318 * function. */
2319 if (event == ~(u32) 0) {
2320 DBGMSG("Device removed.");
2321 return IRQ_NONE;
2324 DBGMSG("IntEvent: %08x", event);
2326 if (event & OHCI1394_unrecoverableError) {
2327 int ctx;
2328 PRINT(KERN_ERR, "Unrecoverable error!");
2330 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2331 PRINT(KERN_ERR, "Async Req Tx Context died: "
2332 "ctrl[%08x] cmdptr[%08x]",
2333 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2334 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2336 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2337 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2338 "ctrl[%08x] cmdptr[%08x]",
2339 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2340 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2342 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2343 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2344 "ctrl[%08x] cmdptr[%08x]",
2345 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2346 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2348 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2349 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2350 "ctrl[%08x] cmdptr[%08x]",
2351 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2352 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2354 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2355 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2356 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2357 "ctrl[%08x] cmdptr[%08x]", ctx,
2358 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2359 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2362 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2363 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2364 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2365 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2366 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2367 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2368 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2371 event &= ~OHCI1394_unrecoverableError;
2373 if (event & OHCI1394_postedWriteErr) {
2374 PRINT(KERN_ERR, "physical posted write error");
2375 /* no recovery strategy yet, had to involve protocol drivers */
2376 event &= ~OHCI1394_postedWriteErr;
2378 if (event & OHCI1394_cycleTooLong) {
2379 if(printk_ratelimit())
2380 PRINT(KERN_WARNING, "isochronous cycle too long");
2381 else
2382 DBGMSG("OHCI1394_cycleTooLong");
2383 reg_write(ohci, OHCI1394_LinkControlSet,
2384 OHCI1394_LinkControl_CycleMaster);
2385 event &= ~OHCI1394_cycleTooLong;
2387 if (event & OHCI1394_cycleInconsistent) {
2388 /* We subscribe to the cycleInconsistent event only to
2389 * clear the corresponding event bit... otherwise,
2390 * isochronous cycleMatch DMA won't work. */
2391 DBGMSG("OHCI1394_cycleInconsistent");
2392 event &= ~OHCI1394_cycleInconsistent;
2394 if (event & OHCI1394_busReset) {
2395 /* The busReset event bit can't be cleared during the
2396 * selfID phase, so we disable busReset interrupts, to
2397 * avoid burying the cpu in interrupt requests. */
2398 spin_lock_irqsave(&ohci->event_lock, flags);
2399 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2401 if (ohci->check_busreset) {
2402 int loop_count = 0;
2404 udelay(10);
2406 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2407 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2409 spin_unlock_irqrestore(&ohci->event_lock, flags);
2410 udelay(10);
2411 spin_lock_irqsave(&ohci->event_lock, flags);
2413 /* The loop counter check is to prevent the driver
2414 * from remaining in this state forever. For the
2415 * initial bus reset, the loop continues for ever
2416 * and the system hangs, until some device is plugged-in
2417 * or out manually into a port! The forced reset seems
2418 * to solve this problem. This mainly effects nForce2. */
2419 if (loop_count > 10000) {
2420 ohci_devctl(host, RESET_BUS, LONG_RESET);
2421 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2422 loop_count = 0;
2425 loop_count++;
2428 spin_unlock_irqrestore(&ohci->event_lock, flags);
2429 if (!host->in_bus_reset) {
2430 DBGMSG("irq_handler: Bus reset requested");
2432 /* Subsystem call */
2433 hpsb_bus_reset(ohci->host);
2435 event &= ~OHCI1394_busReset;
2437 if (event & OHCI1394_reqTxComplete) {
2438 struct dma_trm_ctx *d = &ohci->at_req_context;
2439 DBGMSG("Got reqTxComplete interrupt "
2440 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2441 if (reg_read(ohci, d->ctrlSet) & 0x800)
2442 ohci1394_stop_context(ohci, d->ctrlClear,
2443 "reqTxComplete");
2444 else
2445 dma_trm_tasklet((unsigned long)d);
2446 //tasklet_schedule(&d->task);
2447 event &= ~OHCI1394_reqTxComplete;
2449 if (event & OHCI1394_respTxComplete) {
2450 struct dma_trm_ctx *d = &ohci->at_resp_context;
2451 DBGMSG("Got respTxComplete interrupt "
2452 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2453 if (reg_read(ohci, d->ctrlSet) & 0x800)
2454 ohci1394_stop_context(ohci, d->ctrlClear,
2455 "respTxComplete");
2456 else
2457 tasklet_schedule(&d->task);
2458 event &= ~OHCI1394_respTxComplete;
2460 if (event & OHCI1394_RQPkt) {
2461 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2462 DBGMSG("Got RQPkt interrupt status=0x%08X",
2463 reg_read(ohci, d->ctrlSet));
2464 if (reg_read(ohci, d->ctrlSet) & 0x800)
2465 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2466 else
2467 tasklet_schedule(&d->task);
2468 event &= ~OHCI1394_RQPkt;
2470 if (event & OHCI1394_RSPkt) {
2471 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2472 DBGMSG("Got RSPkt interrupt status=0x%08X",
2473 reg_read(ohci, d->ctrlSet));
2474 if (reg_read(ohci, d->ctrlSet) & 0x800)
2475 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2476 else
2477 tasklet_schedule(&d->task);
2478 event &= ~OHCI1394_RSPkt;
2480 if (event & OHCI1394_isochRx) {
2481 quadlet_t rx_event;
2483 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2484 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2485 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2486 event &= ~OHCI1394_isochRx;
2488 if (event & OHCI1394_isochTx) {
2489 quadlet_t tx_event;
2491 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2492 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2493 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2494 event &= ~OHCI1394_isochTx;
2496 if (event & OHCI1394_selfIDComplete) {
2497 if (host->in_bus_reset) {
2498 node_id = reg_read(ohci, OHCI1394_NodeID);
2500 if (!(node_id & 0x80000000)) {
2501 PRINT(KERN_ERR,
2502 "SelfID received, but NodeID invalid "
2503 "(probably new bus reset occurred): %08X",
2504 node_id);
2505 goto selfid_not_valid;
2508 phyid = node_id & 0x0000003f;
2509 isroot = (node_id & 0x40000000) != 0;
2511 DBGMSG("SelfID interrupt received "
2512 "(phyid %d, %s)", phyid,
2513 (isroot ? "root" : "not root"));
2515 handle_selfid(ohci, host, phyid, isroot);
2517 /* Clear the bus reset event and re-enable the
2518 * busReset interrupt. */
2519 spin_lock_irqsave(&ohci->event_lock, flags);
2520 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2521 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2522 spin_unlock_irqrestore(&ohci->event_lock, flags);
2524 /* Turn on phys dma reception.
2526 * TODO: Enable some sort of filtering management.
2528 if (phys_dma) {
2529 reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2530 0xffffffff);
2531 reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2532 0xffffffff);
2535 DBGMSG("PhyReqFilter=%08x%08x",
2536 reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2537 reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
2539 hpsb_selfid_complete(host, phyid, isroot);
2540 } else
2541 PRINT(KERN_ERR,
2542 "SelfID received outside of bus reset sequence");
2544 selfid_not_valid:
2545 event &= ~OHCI1394_selfIDComplete;
2548 /* Make sure we handle everything, just in case we accidentally
2549 * enabled an interrupt that we didn't write a handler for. */
2550 if (event)
2551 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2552 event);
2554 return IRQ_HANDLED;
2557 /* Put the buffer back into the dma context */
2558 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2560 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2561 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2563 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2564 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2565 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2566 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2568 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2569 * context program descriptors before it sees the wakeup bit set. */
2570 wmb();
2572 /* wake up the dma context if necessary */
2573 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2574 PRINT(KERN_INFO,
2575 "Waking dma ctx=%d ... processing is probably too slow",
2576 d->ctx);
2579 /* do this always, to avoid race condition */
2580 reg_write(ohci, d->ctrlSet, 0x1000);
2583 #define cond_le32_to_cpu(data, noswap) \
2584 (noswap ? data : le32_to_cpu(data))
2586 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2587 -1, 0, -1, 0, -1, -1, 16, -1};
2590 * Determine the length of a packet in the buffer
2591 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2593 static inline int packet_length(struct dma_rcv_ctx *d, int idx,
2594 quadlet_t *buf_ptr, int offset,
2595 unsigned char tcode, int noswap)
2597 int length = -1;
2599 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2600 length = TCODE_SIZE[tcode];
2601 if (length == 0) {
2602 if (offset + 12 >= d->buf_size) {
2603 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2604 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2605 } else {
2606 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2608 length += 20;
2610 } else if (d->type == DMA_CTX_ISO) {
2611 /* Assumption: buffer fill mode with header/trailer */
2612 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2615 if (length > 0 && length % 4)
2616 length += 4 - (length % 4);
2618 return length;
2621 /* Tasklet that processes dma receive buffers */
2622 static void dma_rcv_tasklet (unsigned long data)
2624 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2625 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2626 unsigned int split_left, idx, offset, rescount;
2627 unsigned char tcode;
2628 int length, bytes_left, ack;
2629 unsigned long flags;
2630 quadlet_t *buf_ptr;
2631 char *split_ptr;
2632 char msg[256];
2634 spin_lock_irqsave(&d->lock, flags);
2636 idx = d->buf_ind;
2637 offset = d->buf_offset;
2638 buf_ptr = d->buf_cpu[idx] + offset/4;
2640 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2641 bytes_left = d->buf_size - rescount - offset;
2643 while (bytes_left > 0) {
2644 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2646 /* packet_length() will return < 4 for an error */
2647 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2649 if (length < 4) { /* something is wrong */
2650 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2651 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2652 d->ctx, length);
2653 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2654 spin_unlock_irqrestore(&d->lock, flags);
2655 return;
2658 /* The first case is where we have a packet that crosses
2659 * over more than one descriptor. The next case is where
2660 * it's all in the first descriptor. */
2661 if ((offset + length) > d->buf_size) {
2662 DBGMSG("Split packet rcv'd");
2663 if (length > d->split_buf_size) {
2664 ohci1394_stop_context(ohci, d->ctrlClear,
2665 "Split packet size exceeded");
2666 d->buf_ind = idx;
2667 d->buf_offset = offset;
2668 spin_unlock_irqrestore(&d->lock, flags);
2669 return;
2672 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2673 == d->buf_size) {
2674 /* Other part of packet not written yet.
2675 * this should never happen I think
2676 * anyway we'll get it on the next call. */
2677 PRINT(KERN_INFO,
2678 "Got only half a packet!");
2679 d->buf_ind = idx;
2680 d->buf_offset = offset;
2681 spin_unlock_irqrestore(&d->lock, flags);
2682 return;
2685 split_left = length;
2686 split_ptr = (char *)d->spb;
2687 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2688 split_left -= d->buf_size-offset;
2689 split_ptr += d->buf_size-offset;
2690 insert_dma_buffer(d, idx);
2691 idx = (idx+1) % d->num_desc;
2692 buf_ptr = d->buf_cpu[idx];
2693 offset=0;
2695 while (split_left >= d->buf_size) {
2696 memcpy(split_ptr,buf_ptr,d->buf_size);
2697 split_ptr += d->buf_size;
2698 split_left -= d->buf_size;
2699 insert_dma_buffer(d, idx);
2700 idx = (idx+1) % d->num_desc;
2701 buf_ptr = d->buf_cpu[idx];
2704 if (split_left > 0) {
2705 memcpy(split_ptr, buf_ptr, split_left);
2706 offset = split_left;
2707 buf_ptr += offset/4;
2709 } else {
2710 DBGMSG("Single packet rcv'd");
2711 memcpy(d->spb, buf_ptr, length);
2712 offset += length;
2713 buf_ptr += length/4;
2714 if (offset==d->buf_size) {
2715 insert_dma_buffer(d, idx);
2716 idx = (idx+1) % d->num_desc;
2717 buf_ptr = d->buf_cpu[idx];
2718 offset=0;
2722 /* We get one phy packet to the async descriptor for each
2723 * bus reset. We always ignore it. */
2724 if (tcode != OHCI1394_TCODE_PHY) {
2725 if (!ohci->no_swap_incoming)
2726 header_le32_to_cpu(d->spb, tcode);
2727 DBGMSG("Packet received from node"
2728 " %d ack=0x%02X spd=%d tcode=0x%X"
2729 " length=%d ctx=%d tlabel=%d",
2730 (d->spb[1]>>16)&0x3f,
2731 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2732 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2733 tcode, length, d->ctx,
2734 (d->spb[0]>>10)&0x3f);
2736 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2737 == 0x11) ? 1 : 0;
2739 hpsb_packet_received(ohci->host, d->spb,
2740 length-4, ack);
2742 #ifdef OHCI1394_DEBUG
2743 else
2744 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2745 d->ctx);
2746 #endif
2748 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2750 bytes_left = d->buf_size - rescount - offset;
2754 d->buf_ind = idx;
2755 d->buf_offset = offset;
2757 spin_unlock_irqrestore(&d->lock, flags);
2760 /* Bottom half that processes sent packets */
2761 static void dma_trm_tasklet (unsigned long data)
2763 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2764 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2765 struct hpsb_packet *packet, *ptmp;
2766 unsigned long flags;
2767 u32 status, ack;
2768 size_t datasize;
2770 spin_lock_irqsave(&d->lock, flags);
2772 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2773 datasize = packet->data_size;
2774 if (datasize && packet->type != hpsb_raw)
2775 status = le32_to_cpu(
2776 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2777 else
2778 status = le32_to_cpu(
2779 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2781 if (status == 0)
2782 /* this packet hasn't been sent yet*/
2783 break;
2785 #ifdef OHCI1394_DEBUG
2786 if (datasize)
2787 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2788 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2789 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2790 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2791 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2792 status&0x1f, (status>>5)&0x3,
2793 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2794 d->ctx);
2795 else
2796 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2797 "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2798 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2799 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2800 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2801 status&0x1f, (status>>5)&0x3,
2802 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2803 d->ctx);
2804 else
2805 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2806 "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2807 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2808 >>16)&0x3f,
2809 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2810 >>4)&0xf,
2811 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2812 >>10)&0x3f,
2813 status&0x1f, (status>>5)&0x3,
2814 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2815 d->ctx);
2816 #endif
2818 if (status & 0x10) {
2819 ack = status & 0xf;
2820 } else {
2821 switch (status & 0x1f) {
2822 case EVT_NO_STATUS: /* that should never happen */
2823 case EVT_RESERVED_A: /* that should never happen */
2824 case EVT_LONG_PACKET: /* that should never happen */
2825 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2826 ack = ACKX_SEND_ERROR;
2827 break;
2828 case EVT_MISSING_ACK:
2829 ack = ACKX_TIMEOUT;
2830 break;
2831 case EVT_UNDERRUN:
2832 ack = ACKX_SEND_ERROR;
2833 break;
2834 case EVT_OVERRUN: /* that should never happen */
2835 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2836 ack = ACKX_SEND_ERROR;
2837 break;
2838 case EVT_DESCRIPTOR_READ:
2839 case EVT_DATA_READ:
2840 case EVT_DATA_WRITE:
2841 ack = ACKX_SEND_ERROR;
2842 break;
2843 case EVT_BUS_RESET: /* that should never happen */
2844 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2845 ack = ACKX_SEND_ERROR;
2846 break;
2847 case EVT_TIMEOUT:
2848 ack = ACKX_TIMEOUT;
2849 break;
2850 case EVT_TCODE_ERR:
2851 ack = ACKX_SEND_ERROR;
2852 break;
2853 case EVT_RESERVED_B: /* that should never happen */
2854 case EVT_RESERVED_C: /* that should never happen */
2855 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2856 ack = ACKX_SEND_ERROR;
2857 break;
2858 case EVT_UNKNOWN:
2859 case EVT_FLUSHED:
2860 ack = ACKX_SEND_ERROR;
2861 break;
2862 default:
2863 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2864 ack = ACKX_SEND_ERROR;
2865 BUG();
2869 list_del_init(&packet->driver_list);
2870 hpsb_packet_sent(ohci->host, packet, ack);
2872 if (datasize) {
2873 pci_unmap_single(ohci->dev,
2874 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2875 datasize, PCI_DMA_TODEVICE);
2876 OHCI_DMA_FREE("single Xmit data packet");
2879 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2880 d->free_prgs++;
2883 dma_trm_flush(ohci, d);
2885 spin_unlock_irqrestore(&d->lock, flags);
2888 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2890 if (d->ctrlClear) {
2891 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2893 if (d->type == DMA_CTX_ISO) {
2894 /* disable interrupts */
2895 reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2896 ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2897 } else {
2898 tasklet_kill(&d->task);
2904 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2906 int i;
2907 struct ti_ohci *ohci = d->ohci;
2909 if (ohci == NULL)
2910 return;
2912 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2914 if (d->buf_cpu) {
2915 for (i=0; i<d->num_desc; i++)
2916 if (d->buf_cpu[i] && d->buf_bus[i]) {
2917 pci_free_consistent(
2918 ohci->dev, d->buf_size,
2919 d->buf_cpu[i], d->buf_bus[i]);
2920 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2922 kfree(d->buf_cpu);
2923 kfree(d->buf_bus);
2925 if (d->prg_cpu) {
2926 for (i=0; i<d->num_desc; i++)
2927 if (d->prg_cpu[i] && d->prg_bus[i]) {
2928 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2929 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2931 pci_pool_destroy(d->prg_pool);
2932 OHCI_DMA_FREE("dma_rcv prg pool");
2933 kfree(d->prg_cpu);
2934 kfree(d->prg_bus);
2936 kfree(d->spb);
2938 /* Mark this context as freed. */
2939 d->ohci = NULL;
2942 static int
2943 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2944 enum context_type type, int ctx, int num_desc,
2945 int buf_size, int split_buf_size, int context_base)
2947 int i, len;
2948 static int num_allocs;
2949 static char pool_name[20];
2951 d->ohci = ohci;
2952 d->type = type;
2953 d->ctx = ctx;
2955 d->num_desc = num_desc;
2956 d->buf_size = buf_size;
2957 d->split_buf_size = split_buf_size;
2959 d->ctrlSet = 0;
2960 d->ctrlClear = 0;
2961 d->cmdPtr = 0;
2963 d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2964 d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
2966 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2967 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2968 free_dma_rcv_ctx(d);
2969 return -ENOMEM;
2972 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2973 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
2975 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2976 PRINT(KERN_ERR, "Failed to allocate dma prg");
2977 free_dma_rcv_ctx(d);
2978 return -ENOMEM;
2981 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2983 if (d->spb == NULL) {
2984 PRINT(KERN_ERR, "Failed to allocate split buffer");
2985 free_dma_rcv_ctx(d);
2986 return -ENOMEM;
2989 len = sprintf(pool_name, "ohci1394_rcv_prg");
2990 sprintf(pool_name+len, "%d", num_allocs);
2991 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2992 sizeof(struct dma_cmd), 4, 0);
2993 if(d->prg_pool == NULL)
2995 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2996 free_dma_rcv_ctx(d);
2997 return -ENOMEM;
2999 num_allocs++;
3001 OHCI_DMA_ALLOC("dma_rcv prg pool");
3003 for (i=0; i<d->num_desc; i++) {
3004 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
3005 d->buf_size,
3006 d->buf_bus+i);
3007 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
3009 if (d->buf_cpu[i] != NULL) {
3010 memset(d->buf_cpu[i], 0, d->buf_size);
3011 } else {
3012 PRINT(KERN_ERR,
3013 "Failed to allocate dma buffer");
3014 free_dma_rcv_ctx(d);
3015 return -ENOMEM;
3018 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
3019 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
3021 if (d->prg_cpu[i] != NULL) {
3022 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
3023 } else {
3024 PRINT(KERN_ERR,
3025 "Failed to allocate dma prg");
3026 free_dma_rcv_ctx(d);
3027 return -ENOMEM;
3031 spin_lock_init(&d->lock);
3033 if (type == DMA_CTX_ISO) {
3034 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3035 OHCI_ISO_MULTICHANNEL_RECEIVE,
3036 dma_rcv_tasklet, (unsigned long) d);
3037 } else {
3038 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3039 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3040 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3042 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3045 return 0;
3048 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3050 int i;
3051 struct ti_ohci *ohci = d->ohci;
3053 if (ohci == NULL)
3054 return;
3056 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3058 if (d->prg_cpu) {
3059 for (i=0; i<d->num_desc; i++)
3060 if (d->prg_cpu[i] && d->prg_bus[i]) {
3061 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3062 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3064 pci_pool_destroy(d->prg_pool);
3065 OHCI_DMA_FREE("dma_trm prg pool");
3066 kfree(d->prg_cpu);
3067 kfree(d->prg_bus);
3070 /* Mark this context as freed. */
3071 d->ohci = NULL;
3074 static int
3075 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3076 enum context_type type, int ctx, int num_desc,
3077 int context_base)
3079 int i, len;
3080 static char pool_name[20];
3081 static int num_allocs=0;
3083 d->ohci = ohci;
3084 d->type = type;
3085 d->ctx = ctx;
3086 d->num_desc = num_desc;
3087 d->ctrlSet = 0;
3088 d->ctrlClear = 0;
3089 d->cmdPtr = 0;
3091 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
3092 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
3094 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3095 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3096 free_dma_trm_ctx(d);
3097 return -ENOMEM;
3100 len = sprintf(pool_name, "ohci1394_trm_prg");
3101 sprintf(pool_name+len, "%d", num_allocs);
3102 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3103 sizeof(struct at_dma_prg), 4, 0);
3104 if (d->prg_pool == NULL) {
3105 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3106 free_dma_trm_ctx(d);
3107 return -ENOMEM;
3109 num_allocs++;
3111 OHCI_DMA_ALLOC("dma_rcv prg pool");
3113 for (i = 0; i < d->num_desc; i++) {
3114 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
3115 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3117 if (d->prg_cpu[i] != NULL) {
3118 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3119 } else {
3120 PRINT(KERN_ERR,
3121 "Failed to allocate at dma prg");
3122 free_dma_trm_ctx(d);
3123 return -ENOMEM;
3127 spin_lock_init(&d->lock);
3129 /* initialize tasklet */
3130 if (type == DMA_CTX_ISO) {
3131 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3132 dma_trm_tasklet, (unsigned long) d);
3133 if (ohci1394_register_iso_tasklet(ohci,
3134 &ohci->it_legacy_tasklet) < 0) {
3135 PRINT(KERN_ERR, "No IT DMA context available");
3136 free_dma_trm_ctx(d);
3137 return -EBUSY;
3140 /* IT can be assigned to any context by register_iso_tasklet */
3141 d->ctx = ohci->it_legacy_tasklet.context;
3142 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3143 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3144 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3145 } else {
3146 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3147 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3148 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3149 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3152 return 0;
3155 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3157 struct ti_ohci *ohci = host->hostdata;
3159 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3160 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3162 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3166 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3167 quadlet_t data, quadlet_t compare)
3169 struct ti_ohci *ohci = host->hostdata;
3170 int i;
3172 reg_write(ohci, OHCI1394_CSRData, data);
3173 reg_write(ohci, OHCI1394_CSRCompareData, compare);
3174 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3176 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3177 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3178 break;
3180 mdelay(1);
3183 return reg_read(ohci, OHCI1394_CSRData);
3186 static struct hpsb_host_driver ohci1394_driver = {
3187 .owner = THIS_MODULE,
3188 .name = OHCI1394_DRIVER_NAME,
3189 .set_hw_config_rom = ohci_set_hw_config_rom,
3190 .transmit_packet = ohci_transmit,
3191 .devctl = ohci_devctl,
3192 .isoctl = ohci_isoctl,
3193 .hw_csr_reg = ohci_hw_csr_reg,
3196 /***********************************
3197 * PCI Driver Interface functions *
3198 ***********************************/
3200 #define FAIL(err, fmt, args...) \
3201 do { \
3202 PRINT_G(KERN_ERR, fmt , ## args); \
3203 ohci1394_pci_remove(dev); \
3204 return err; \
3205 } while (0)
3207 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3208 const struct pci_device_id *ent)
3210 struct hpsb_host *host;
3211 struct ti_ohci *ohci; /* shortcut to currently handled device */
3212 resource_size_t ohci_base;
3214 #ifdef CONFIG_PPC_PMAC
3215 /* Necessary on some machines if ohci1394 was loaded/ unloaded before */
3216 if (machine_is(powermac)) {
3217 struct device_node *ofn = pci_device_to_OF_node(dev);
3219 if (ofn) {
3220 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3221 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3224 #endif /* CONFIG_PPC_PMAC */
3226 if (pci_enable_device(dev))
3227 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3228 pci_set_master(dev);
3230 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3231 if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3233 ohci = host->hostdata;
3234 ohci->dev = dev;
3235 ohci->host = host;
3236 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3237 host->pdev = dev;
3238 pci_set_drvdata(dev, ohci);
3240 /* We don't want hardware swapping */
3241 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3243 /* Some oddball Apple controllers do not order the selfid
3244 * properly, so we make up for it here. */
3245 #ifndef __LITTLE_ENDIAN
3246 /* XXX: Need a better way to check this. I'm wondering if we can
3247 * read the values of the OHCI1394_PCI_HCI_Control and the
3248 * noByteSwapData registers to see if they were not cleared to
3249 * zero. Should this work? Obviously it's not defined what these
3250 * registers will read when they aren't supported. Bleh! */
3251 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3252 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3253 ohci->no_swap_incoming = 1;
3254 ohci->selfid_swap = 0;
3255 } else
3256 ohci->selfid_swap = 1;
3257 #endif
3260 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3261 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3262 #endif
3264 /* These chipsets require a bit of extra care when checking after
3265 * a busreset. */
3266 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3267 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3268 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3269 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3270 ohci->check_busreset = 1;
3272 /* We hardwire the MMIO length, since some CardBus adaptors
3273 * fail to report the right length. Anyway, the ohci spec
3274 * clearly says it's 2kb, so this shouldn't be a problem. */
3275 ohci_base = pci_resource_start(dev, 0);
3276 if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
3277 PRINT(KERN_WARNING, "PCI resource length of 0x%llx too small!",
3278 (unsigned long long)pci_resource_len(dev, 0));
3280 if (!request_mem_region(ohci_base, OHCI1394_REGISTER_SIZE,
3281 OHCI1394_DRIVER_NAME))
3282 FAIL(-ENOMEM, "MMIO resource (0x%llx - 0x%llx) unavailable",
3283 (unsigned long long)ohci_base,
3284 (unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
3285 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3287 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3288 if (ohci->registers == NULL)
3289 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3290 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3291 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3293 /* csr_config rom allocation */
3294 ohci->csr_config_rom_cpu =
3295 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3296 &ohci->csr_config_rom_bus);
3297 OHCI_DMA_ALLOC("consistent csr_config_rom");
3298 if (ohci->csr_config_rom_cpu == NULL)
3299 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3300 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3302 /* self-id dma buffer allocation */
3303 ohci->selfid_buf_cpu =
3304 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3305 &ohci->selfid_buf_bus);
3306 OHCI_DMA_ALLOC("consistent selfid_buf");
3308 if (ohci->selfid_buf_cpu == NULL)
3309 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3310 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3312 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3313 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3314 "8Kb boundary... may cause problems on some CXD3222 chip",
3315 ohci->selfid_buf_cpu);
3317 /* No self-id errors at startup */
3318 ohci->self_id_errors = 0;
3320 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3321 /* AR DMA request context allocation */
3322 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3323 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3324 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3325 OHCI1394_AsReqRcvContextBase) < 0)
3326 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3328 /* AR DMA response context allocation */
3329 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3330 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3331 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3332 OHCI1394_AsRspRcvContextBase) < 0)
3333 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3335 /* AT DMA request context */
3336 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3337 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3338 OHCI1394_AsReqTrContextBase) < 0)
3339 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3341 /* AT DMA response context */
3342 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3343 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3344 OHCI1394_AsRspTrContextBase) < 0)
3345 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3347 /* Start off with a soft reset, to clear everything to a sane
3348 * state. */
3349 ohci_soft_reset(ohci);
3351 /* Now enable LPS, which we need in order to start accessing
3352 * most of the registers. In fact, on some cards (ALI M5251),
3353 * accessing registers in the SClk domain without LPS enabled
3354 * will lock up the machine. Wait 50msec to make sure we have
3355 * full link enabled. */
3356 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3358 /* Disable and clear interrupts */
3359 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3360 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3362 mdelay(50);
3364 /* Determine the number of available IR and IT contexts. */
3365 ohci->nb_iso_rcv_ctx =
3366 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3367 ohci->nb_iso_xmit_ctx =
3368 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3370 /* Set the usage bits for non-existent contexts so they can't
3371 * be allocated */
3372 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3373 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3375 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3376 spin_lock_init(&ohci->iso_tasklet_list_lock);
3377 ohci->ISO_channel_usage = 0;
3378 spin_lock_init(&ohci->IR_channel_lock);
3380 /* Allocate the IR DMA context right here so we don't have
3381 * to do it in interrupt path - note that this doesn't
3382 * waste much memory and avoids the jugglery required to
3383 * allocate it in IRQ path. */
3384 if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3385 DMA_CTX_ISO, 0, IR_NUM_DESC,
3386 IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3387 OHCI1394_IsoRcvContextBase) < 0) {
3388 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3391 /* We hopefully don't have to pre-allocate IT DMA like we did
3392 * for IR DMA above. Allocate it on-demand and mark inactive. */
3393 ohci->it_legacy_context.ohci = NULL;
3394 spin_lock_init(&ohci->event_lock);
3397 * interrupts are disabled, all right, but... due to IRQF_SHARED we
3398 * might get called anyway. We'll see no event, of course, but
3399 * we need to get to that "no event", so enough should be initialized
3400 * by that point.
3402 if (request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
3403 OHCI1394_DRIVER_NAME, ohci))
3404 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3406 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3407 ohci_initialize(ohci);
3409 /* Set certain csr values */
3410 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3411 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3412 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3413 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3414 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3416 if (phys_dma) {
3417 host->low_addr_space =
3418 (u64) reg_read(ohci, OHCI1394_PhyUpperBound) << 16;
3419 if (!host->low_addr_space)
3420 host->low_addr_space = OHCI1394_PHYS_UPPER_BOUND_FIXED;
3422 host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
3424 /* Tell the highlevel this host is ready */
3425 if (hpsb_add_host(host))
3426 FAIL(-ENOMEM, "Failed to register host with highlevel");
3428 ohci->init_state = OHCI_INIT_DONE;
3430 return 0;
3431 #undef FAIL
3434 static void ohci1394_pci_remove(struct pci_dev *pdev)
3436 struct ti_ohci *ohci;
3437 struct device *dev;
3439 ohci = pci_get_drvdata(pdev);
3440 if (!ohci)
3441 return;
3443 dev = get_device(&ohci->host->device);
3445 switch (ohci->init_state) {
3446 case OHCI_INIT_DONE:
3447 hpsb_remove_host(ohci->host);
3449 /* Clear out BUS Options */
3450 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3451 reg_write(ohci, OHCI1394_BusOptions,
3452 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3453 0x00ff0000);
3454 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3456 case OHCI_INIT_HAVE_IRQ:
3457 /* Clear interrupt registers */
3458 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3459 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3460 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3461 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3462 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3463 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3465 /* Disable IRM Contender */
3466 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3468 /* Clear link control register */
3469 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3471 /* Let all other nodes know to ignore us */
3472 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3474 /* Soft reset before we start - this disables
3475 * interrupts and clears linkEnable and LPS. */
3476 ohci_soft_reset(ohci);
3477 free_irq(ohci->dev->irq, ohci);
3479 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3480 /* The ohci_soft_reset() stops all DMA contexts, so we
3481 * dont need to do this. */
3482 free_dma_rcv_ctx(&ohci->ar_req_context);
3483 free_dma_rcv_ctx(&ohci->ar_resp_context);
3484 free_dma_trm_ctx(&ohci->at_req_context);
3485 free_dma_trm_ctx(&ohci->at_resp_context);
3486 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3487 free_dma_trm_ctx(&ohci->it_legacy_context);
3489 case OHCI_INIT_HAVE_SELFID_BUFFER:
3490 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3491 ohci->selfid_buf_cpu,
3492 ohci->selfid_buf_bus);
3493 OHCI_DMA_FREE("consistent selfid_buf");
3495 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3496 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3497 ohci->csr_config_rom_cpu,
3498 ohci->csr_config_rom_bus);
3499 OHCI_DMA_FREE("consistent csr_config_rom");
3501 case OHCI_INIT_HAVE_IOMAPPING:
3502 iounmap(ohci->registers);
3504 case OHCI_INIT_HAVE_MEM_REGION:
3505 release_mem_region(pci_resource_start(ohci->dev, 0),
3506 OHCI1394_REGISTER_SIZE);
3508 #ifdef CONFIG_PPC_PMAC
3509 /* On UniNorth, power down the cable and turn off the chip clock
3510 * to save power on laptops */
3511 if (machine_is(powermac)) {
3512 struct device_node* ofn = pci_device_to_OF_node(ohci->dev);
3514 if (ofn) {
3515 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3516 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3519 #endif /* CONFIG_PPC_PMAC */
3521 case OHCI_INIT_ALLOC_HOST:
3522 pci_set_drvdata(ohci->dev, NULL);
3525 if (dev)
3526 put_device(dev);
3529 #ifdef CONFIG_PM
3530 static int ohci1394_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3532 int err;
3533 struct ti_ohci *ohci = pci_get_drvdata(pdev);
3535 if (!ohci) {
3536 printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
3537 OHCI1394_DRIVER_NAME);
3538 return -ENXIO;
3540 DBGMSG("suspend called");
3542 /* Clear the async DMA contexts and stop using the controller */
3543 hpsb_bus_reset(ohci->host);
3545 /* See ohci1394_pci_remove() for comments on this sequence */
3546 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3547 reg_write(ohci, OHCI1394_BusOptions,
3548 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3549 0x00ff0000);
3550 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3551 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3552 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3553 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3554 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3555 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3556 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3557 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3558 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3559 ohci_soft_reset(ohci);
3561 err = pci_save_state(pdev);
3562 if (err) {
3563 PRINT(KERN_ERR, "pci_save_state failed with %d", err);
3564 return err;
3566 err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
3567 if (err)
3568 DBGMSG("pci_set_power_state failed with %d", err);
3570 /* PowerMac suspend code comes last */
3571 #ifdef CONFIG_PPC_PMAC
3572 if (machine_is(powermac)) {
3573 struct device_node *ofn = pci_device_to_OF_node(pdev);
3575 if (ofn)
3576 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3578 #endif /* CONFIG_PPC_PMAC */
3580 return 0;
3583 static int ohci1394_pci_resume(struct pci_dev *pdev)
3585 int err;
3586 struct ti_ohci *ohci = pci_get_drvdata(pdev);
3588 if (!ohci) {
3589 printk(KERN_ERR "%s: tried to resume nonexisting host\n",
3590 OHCI1394_DRIVER_NAME);
3591 return -ENXIO;
3593 DBGMSG("resume called");
3595 /* PowerMac resume code comes first */
3596 #ifdef CONFIG_PPC_PMAC
3597 if (machine_is(powermac)) {
3598 struct device_node *ofn = pci_device_to_OF_node(pdev);
3600 if (ofn)
3601 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3603 #endif /* CONFIG_PPC_PMAC */
3605 pci_set_power_state(pdev, PCI_D0);
3606 pci_restore_state(pdev);
3607 err = pci_enable_device(pdev);
3608 if (err) {
3609 PRINT(KERN_ERR, "pci_enable_device failed with %d", err);
3610 return err;
3613 /* See ohci1394_pci_probe() for comments on this sequence */
3614 ohci_soft_reset(ohci);
3615 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3616 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3617 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3618 mdelay(50);
3619 ohci_initialize(ohci);
3621 hpsb_resume_host(ohci->host);
3622 return 0;
3624 #endif /* CONFIG_PM */
3626 static struct pci_device_id ohci1394_pci_tbl[] = {
3628 .class = PCI_CLASS_SERIAL_FIREWIRE_OHCI,
3629 .class_mask = PCI_ANY_ID,
3630 .vendor = PCI_ANY_ID,
3631 .device = PCI_ANY_ID,
3632 .subvendor = PCI_ANY_ID,
3633 .subdevice = PCI_ANY_ID,
3635 { 0, },
3638 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3640 static struct pci_driver ohci1394_pci_driver = {
3641 .name = OHCI1394_DRIVER_NAME,
3642 .id_table = ohci1394_pci_tbl,
3643 .probe = ohci1394_pci_probe,
3644 .remove = ohci1394_pci_remove,
3645 #ifdef CONFIG_PM
3646 .resume = ohci1394_pci_resume,
3647 .suspend = ohci1394_pci_suspend,
3648 #endif
3651 /***********************************
3652 * OHCI1394 Video Interface *
3653 ***********************************/
3655 /* essentially the only purpose of this code is to allow another
3656 module to hook into ohci's interrupt handler */
3658 /* returns zero if successful, one if DMA context is locked up */
3659 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3661 int i=0;
3663 /* stop the channel program if it's still running */
3664 reg_write(ohci, reg, 0x8000);
3666 /* Wait until it effectively stops */
3667 while (reg_read(ohci, reg) & 0x400) {
3668 i++;
3669 if (i>5000) {
3670 PRINT(KERN_ERR,
3671 "Runaway loop while stopping context: %s...", msg ? msg : "");
3672 return 1;
3675 mb();
3676 udelay(10);
3678 if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3679 return 0;
3682 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3683 void (*func)(unsigned long), unsigned long data)
3685 tasklet_init(&tasklet->tasklet, func, data);
3686 tasklet->type = type;
3687 /* We init the tasklet->link field, so we can list_del() it
3688 * without worrying whether it was added to the list or not. */
3689 INIT_LIST_HEAD(&tasklet->link);
3692 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3693 struct ohci1394_iso_tasklet *tasklet)
3695 unsigned long flags, *usage;
3696 int n, i, r = -EBUSY;
3698 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3699 n = ohci->nb_iso_xmit_ctx;
3700 usage = &ohci->it_ctx_usage;
3702 else {
3703 n = ohci->nb_iso_rcv_ctx;
3704 usage = &ohci->ir_ctx_usage;
3706 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3707 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3708 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3709 return r;
3714 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3716 for (i = 0; i < n; i++)
3717 if (!test_and_set_bit(i, usage)) {
3718 tasklet->context = i;
3719 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3720 r = 0;
3721 break;
3724 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3726 return r;
3729 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3730 struct ohci1394_iso_tasklet *tasklet)
3732 unsigned long flags;
3734 tasklet_kill(&tasklet->tasklet);
3736 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3738 if (tasklet->type == OHCI_ISO_TRANSMIT)
3739 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3740 else {
3741 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3743 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3744 clear_bit(0, &ohci->ir_multichannel_used);
3748 list_del(&tasklet->link);
3750 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3753 EXPORT_SYMBOL(ohci1394_stop_context);
3754 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3755 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3756 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3758 /***********************************
3759 * General module initialization *
3760 ***********************************/
3762 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3763 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3764 MODULE_LICENSE("GPL");
3766 static void __exit ohci1394_cleanup (void)
3768 pci_unregister_driver(&ohci1394_pci_driver);
3771 static int __init ohci1394_init(void)
3773 return pci_register_driver(&ohci1394_pci_driver);
3776 /* Register before most other device drivers.
3777 * Useful for remote debugging via physical DMA, e.g. using firescope. */
3778 fs_initcall(ohci1394_init);
3779 module_exit(ohci1394_cleanup);