[PATCH] caleb/pm3386: include proper header files
[linux-2.6/mini2440.git] / drivers / ieee1394 / ohci1394.c
blob4cf9b8f3e33607af45eba4eca2f388c6e00a374d
1 /*
2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
28 * . Iso Receive
29 * . DMA mmap for iso receive
30 * . Config ROM generation
32 * Things implemented, but still in test phase:
33 * . Iso Transmit
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
36 * Things not implemented:
37 * . DMA error recovery
39 * Known bugs:
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
45 * Acknowledgments:
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
68 * Leon van Stuivenberg <leonvs@iae.nl>
69 * . Bug fixes
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
85 #include <linux/config.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/slab.h>
89 #include <linux/interrupt.h>
90 #include <linux/wait.h>
91 #include <linux/errno.h>
92 #include <linux/module.h>
93 #include <linux/moduleparam.h>
94 #include <linux/pci.h>
95 #include <linux/fs.h>
96 #include <linux/poll.h>
97 #include <asm/byteorder.h>
98 #include <asm/atomic.h>
99 #include <asm/uaccess.h>
100 #include <linux/delay.h>
101 #include <linux/spinlock.h>
103 #include <asm/pgtable.h>
104 #include <asm/page.h>
105 #include <asm/irq.h>
106 #include <linux/sched.h>
107 #include <linux/types.h>
108 #include <linux/vmalloc.h>
109 #include <linux/init.h>
111 #ifdef CONFIG_PPC_PMAC
112 #include <asm/machdep.h>
113 #include <asm/pmac_feature.h>
114 #include <asm/prom.h>
115 #include <asm/pci-bridge.h>
116 #endif
118 #include "csr1212.h"
119 #include "ieee1394.h"
120 #include "ieee1394_types.h"
121 #include "hosts.h"
122 #include "dma.h"
123 #include "iso.h"
124 #include "ieee1394_core.h"
125 #include "highlevel.h"
126 #include "ohci1394.h"
128 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
129 #define OHCI1394_DEBUG
130 #endif
132 #ifdef DBGMSG
133 #undef DBGMSG
134 #endif
136 #ifdef OHCI1394_DEBUG
137 #define DBGMSG(fmt, args...) \
138 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
139 #else
140 #define DBGMSG(fmt, args...)
141 #endif
143 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
144 #define OHCI_DMA_ALLOC(fmt, args...) \
145 HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
146 ++global_outstanding_dmas, ## args)
147 #define OHCI_DMA_FREE(fmt, args...) \
148 HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
149 --global_outstanding_dmas, ## args)
150 static int global_outstanding_dmas = 0;
151 #else
152 #define OHCI_DMA_ALLOC(fmt, args...)
153 #define OHCI_DMA_FREE(fmt, args...)
154 #endif
156 /* print general (card independent) information */
157 #define PRINT_G(level, fmt, args...) \
158 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
160 /* print card specific information */
161 #define PRINT(level, fmt, args...) \
162 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
164 static char version[] __devinitdata =
165 "$Rev: 1313 $ Ben Collins <bcollins@debian.org>";
167 /* Module Parameters */
168 static int phys_dma = 1;
169 module_param(phys_dma, int, 0644);
170 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
172 static void dma_trm_tasklet(unsigned long data);
173 static void dma_trm_reset(struct dma_trm_ctx *d);
175 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
176 enum context_type type, int ctx, int num_desc,
177 int buf_size, int split_buf_size, int context_base);
178 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
179 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
181 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
182 enum context_type type, int ctx, int num_desc,
183 int context_base);
185 static void ohci1394_pci_remove(struct pci_dev *pdev);
187 #ifndef __LITTLE_ENDIAN
188 static unsigned hdr_sizes[] =
190 3, /* TCODE_WRITEQ */
191 4, /* TCODE_WRITEB */
192 3, /* TCODE_WRITE_RESPONSE */
193 0, /* ??? */
194 3, /* TCODE_READQ */
195 4, /* TCODE_READB */
196 3, /* TCODE_READQ_RESPONSE */
197 4, /* TCODE_READB_RESPONSE */
198 1, /* TCODE_CYCLE_START (???) */
199 4, /* TCODE_LOCK_REQUEST */
200 2, /* TCODE_ISO_DATA */
201 4, /* TCODE_LOCK_RESPONSE */
204 /* Swap headers */
205 static inline void packet_swab(quadlet_t *data, int tcode)
207 size_t size = hdr_sizes[tcode];
209 if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
210 return;
212 while (size--)
213 data[size] = swab32(data[size]);
215 #else
216 /* Don't waste cycles on same sex byte swaps */
217 #define packet_swab(w,x)
218 #endif /* !LITTLE_ENDIAN */
220 /***********************************
221 * IEEE-1394 functionality section *
222 ***********************************/
224 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
226 int i;
227 unsigned long flags;
228 quadlet_t r;
230 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
232 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
234 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
235 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
236 break;
238 mdelay(1);
241 r = reg_read(ohci, OHCI1394_PhyControl);
243 if (i >= OHCI_LOOP_COUNT)
244 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
245 r, r & 0x80000000, i);
247 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
249 return (r & 0x00ff0000) >> 16;
252 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
254 int i;
255 unsigned long flags;
256 u32 r = 0;
258 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
260 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
262 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
263 r = reg_read(ohci, OHCI1394_PhyControl);
264 if (!(r & 0x00004000))
265 break;
267 mdelay(1);
270 if (i == OHCI_LOOP_COUNT)
271 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
272 r, r & 0x00004000, i);
274 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
276 return;
279 /* Or's our value into the current value */
280 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
282 u8 old;
284 old = get_phy_reg (ohci, addr);
285 old |= data;
286 set_phy_reg (ohci, addr, old);
288 return;
291 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
292 int phyid, int isroot)
294 quadlet_t *q = ohci->selfid_buf_cpu;
295 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
296 size_t size;
297 quadlet_t q0, q1;
299 /* Check status of self-id reception */
301 if (ohci->selfid_swap)
302 q0 = le32_to_cpu(q[0]);
303 else
304 q0 = q[0];
306 if ((self_id_count & 0x80000000) ||
307 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
308 PRINT(KERN_ERR,
309 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
310 self_id_count, q0, ohci->self_id_errors);
312 /* Tip by James Goodwin <jamesg@Filanet.com>:
313 * We had an error, generate another bus reset in response. */
314 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
315 set_phy_reg_mask (ohci, 1, 0x40);
316 ohci->self_id_errors++;
317 } else {
318 PRINT(KERN_ERR,
319 "Too many errors on SelfID error reception, giving up!");
321 return;
324 /* SelfID Ok, reset error counter. */
325 ohci->self_id_errors = 0;
327 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
328 q++;
330 while (size > 0) {
331 if (ohci->selfid_swap) {
332 q0 = le32_to_cpu(q[0]);
333 q1 = le32_to_cpu(q[1]);
334 } else {
335 q0 = q[0];
336 q1 = q[1];
339 if (q0 == ~q1) {
340 DBGMSG ("SelfID packet 0x%x received", q0);
341 hpsb_selfid_received(host, cpu_to_be32(q0));
342 if (((q0 & 0x3f000000) >> 24) == phyid)
343 DBGMSG ("SelfID for this node is 0x%08x", q0);
344 } else {
345 PRINT(KERN_ERR,
346 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
348 q += 2;
349 size -= 2;
352 DBGMSG("SelfID complete");
354 return;
357 static void ohci_soft_reset(struct ti_ohci *ohci) {
358 int i;
360 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
362 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
363 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
364 break;
365 mdelay(1);
367 DBGMSG ("Soft reset finished");
371 /* Generate the dma receive prgs and start the context */
372 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
374 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
375 int i;
377 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
379 for (i=0; i<d->num_desc; i++) {
380 u32 c;
382 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
383 if (generate_irq)
384 c |= DMA_CTL_IRQ;
386 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
388 /* End of descriptor list? */
389 if (i + 1 < d->num_desc) {
390 d->prg_cpu[i]->branchAddress =
391 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
392 } else {
393 d->prg_cpu[i]->branchAddress =
394 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
397 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
398 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
401 d->buf_ind = 0;
402 d->buf_offset = 0;
404 if (d->type == DMA_CTX_ISO) {
405 /* Clear contextControl */
406 reg_write(ohci, d->ctrlClear, 0xffffffff);
408 /* Set bufferFill, isochHeader, multichannel for IR context */
409 reg_write(ohci, d->ctrlSet, 0xd0000000);
411 /* Set the context match register to match on all tags */
412 reg_write(ohci, d->ctxtMatch, 0xf0000000);
414 /* Clear the multi channel mask high and low registers */
415 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
416 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
418 /* Set up isoRecvIntMask to generate interrupts */
419 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
422 /* Tell the controller where the first AR program is */
423 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
425 /* Run context */
426 reg_write(ohci, d->ctrlSet, 0x00008000);
428 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
431 /* Initialize the dma transmit context */
432 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
434 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
436 /* Stop the context */
437 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
439 d->prg_ind = 0;
440 d->sent_ind = 0;
441 d->free_prgs = d->num_desc;
442 d->branchAddrPtr = NULL;
443 INIT_LIST_HEAD(&d->fifo_list);
444 INIT_LIST_HEAD(&d->pending_list);
446 if (d->type == DMA_CTX_ISO) {
447 /* enable interrupts */
448 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
451 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
454 /* Count the number of available iso contexts */
455 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
457 int i,ctx=0;
458 u32 tmp;
460 reg_write(ohci, reg, 0xffffffff);
461 tmp = reg_read(ohci, reg);
463 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
465 /* Count the number of contexts */
466 for (i=0; i<32; i++) {
467 if (tmp & 1) ctx++;
468 tmp >>= 1;
470 return ctx;
473 /* Global initialization */
474 static void ohci_initialize(struct ti_ohci *ohci)
476 char irq_buf[16];
477 quadlet_t buf;
478 int num_ports, i;
480 spin_lock_init(&ohci->phy_reg_lock);
482 /* Put some defaults to these undefined bus options */
483 buf = reg_read(ohci, OHCI1394_BusOptions);
484 buf |= 0x60000000; /* Enable CMC and ISC */
485 if (hpsb_disable_irm)
486 buf &= ~0x80000000;
487 else
488 buf |= 0x80000000; /* Enable IRMC */
489 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
490 buf &= ~0x18000000; /* Disable PMC and BMC */
491 reg_write(ohci, OHCI1394_BusOptions, buf);
493 /* Set the bus number */
494 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
496 /* Enable posted writes */
497 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
499 /* Clear link control register */
500 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
502 /* Enable cycle timer and cycle master and set the IRM
503 * contender bit in our self ID packets if appropriate. */
504 reg_write(ohci, OHCI1394_LinkControlSet,
505 OHCI1394_LinkControl_CycleTimerEnable |
506 OHCI1394_LinkControl_CycleMaster);
507 i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
508 if (hpsb_disable_irm)
509 i &= ~PHY_04_CONTENDER;
510 else
511 i |= PHY_04_CONTENDER;
512 set_phy_reg(ohci, 4, i);
514 /* Set up self-id dma buffer */
515 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
517 /* enable self-id and phys */
518 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
519 OHCI1394_LinkControl_RcvPhyPkt);
521 /* Set the Config ROM mapping register */
522 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
524 /* Now get our max packet size */
525 ohci->max_packet_size =
526 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
528 /* Don't accept phy packets into AR request context */
529 reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
531 /* Clear the interrupt mask */
532 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
533 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
535 /* Clear the interrupt mask */
536 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
537 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
539 /* Initialize AR dma */
540 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
541 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
543 /* Initialize AT dma */
544 initialize_dma_trm_ctx(&ohci->at_req_context);
545 initialize_dma_trm_ctx(&ohci->at_resp_context);
547 /* Initialize IR Legacy DMA channel mask */
548 ohci->ir_legacy_channels = 0;
551 * Accept AT requests from all nodes. This probably
552 * will have to be controlled from the subsystem
553 * on a per node basis.
555 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
557 /* Specify AT retries */
558 reg_write(ohci, OHCI1394_ATRetries,
559 OHCI1394_MAX_AT_REQ_RETRIES |
560 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
561 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
563 /* We don't want hardware swapping */
564 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
566 /* Enable interrupts */
567 reg_write(ohci, OHCI1394_IntMaskSet,
568 OHCI1394_unrecoverableError |
569 OHCI1394_masterIntEnable |
570 OHCI1394_busReset |
571 OHCI1394_selfIDComplete |
572 OHCI1394_RSPkt |
573 OHCI1394_RQPkt |
574 OHCI1394_respTxComplete |
575 OHCI1394_reqTxComplete |
576 OHCI1394_isochRx |
577 OHCI1394_isochTx |
578 OHCI1394_cycleInconsistent);
580 /* Enable link */
581 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
583 buf = reg_read(ohci, OHCI1394_Version);
584 #ifndef __sparc__
585 sprintf (irq_buf, "%d", ohci->dev->irq);
586 #else
587 sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
588 #endif
589 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
590 "MMIO=[%lx-%lx] Max Packet=[%d]",
591 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
592 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
593 pci_resource_start(ohci->dev, 0),
594 pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
595 ohci->max_packet_size);
597 /* Check all of our ports to make sure that if anything is
598 * connected, we enable that port. */
599 num_ports = get_phy_reg(ohci, 2) & 0xf;
600 for (i = 0; i < num_ports; i++) {
601 unsigned int status;
603 set_phy_reg(ohci, 7, i);
604 status = get_phy_reg(ohci, 8);
606 if (status & 0x20)
607 set_phy_reg(ohci, 8, status & ~1);
610 /* Serial EEPROM Sanity check. */
611 if ((ohci->max_packet_size < 512) ||
612 (ohci->max_packet_size > 4096)) {
613 /* Serial EEPROM contents are suspect, set a sane max packet
614 * size and print the raw contents for bug reports if verbose
615 * debug is enabled. */
616 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
617 int i;
618 #endif
620 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
621 "attempting to setting max_packet_size to 512 bytes");
622 reg_write(ohci, OHCI1394_BusOptions,
623 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
624 ohci->max_packet_size = 512;
625 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
626 PRINT(KERN_DEBUG, " EEPROM Present: %d",
627 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
628 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
630 for (i = 0;
631 ((i < 1000) &&
632 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
633 udelay(10);
635 for (i = 0; i < 0x20; i++) {
636 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
637 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
638 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
640 #endif
645 * Insert a packet in the DMA fifo and generate the DMA prg
646 * FIXME: rewrite the program in order to accept packets crossing
647 * page boundaries.
648 * check also that a single dma descriptor doesn't cross a
649 * page boundary.
651 static void insert_packet(struct ti_ohci *ohci,
652 struct dma_trm_ctx *d, struct hpsb_packet *packet)
654 u32 cycleTimer;
655 int idx = d->prg_ind;
657 DBGMSG("Inserting packet for node " NODE_BUS_FMT
658 ", tlabel=%d, tcode=0x%x, speed=%d",
659 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
660 packet->tcode, packet->speed_code);
662 d->prg_cpu[idx]->begin.address = 0;
663 d->prg_cpu[idx]->begin.branchAddress = 0;
665 if (d->type == DMA_CTX_ASYNC_RESP) {
667 * For response packets, we need to put a timeout value in
668 * the 16 lower bits of the status... let's try 1 sec timeout
670 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
671 d->prg_cpu[idx]->begin.status = cpu_to_le32(
672 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
673 ((cycleTimer&0x01fff000)>>12));
675 DBGMSG("cycleTimer: %08x timeStamp: %08x",
676 cycleTimer, d->prg_cpu[idx]->begin.status);
677 } else
678 d->prg_cpu[idx]->begin.status = 0;
680 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
682 if (packet->type == hpsb_raw) {
683 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
684 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
685 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
686 } else {
687 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
688 (packet->header[0] & 0xFFFF);
690 if (packet->tcode == TCODE_ISO_DATA) {
691 /* Sending an async stream packet */
692 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
693 } else {
694 /* Sending a normal async request or response */
695 d->prg_cpu[idx]->data[1] =
696 (packet->header[1] & 0xFFFF) |
697 (packet->header[0] & 0xFFFF0000);
698 d->prg_cpu[idx]->data[2] = packet->header[2];
699 d->prg_cpu[idx]->data[3] = packet->header[3];
701 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
704 if (packet->data_size) { /* block transmit */
705 if (packet->tcode == TCODE_STREAM_DATA){
706 d->prg_cpu[idx]->begin.control =
707 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
708 DMA_CTL_IMMEDIATE | 0x8);
709 } else {
710 d->prg_cpu[idx]->begin.control =
711 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
712 DMA_CTL_IMMEDIATE | 0x10);
714 d->prg_cpu[idx]->end.control =
715 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
716 DMA_CTL_IRQ |
717 DMA_CTL_BRANCH |
718 packet->data_size);
720 * Check that the packet data buffer
721 * does not cross a page boundary.
723 * XXX Fix this some day. eth1394 seems to trigger
724 * it, but ignoring it doesn't seem to cause a
725 * problem.
727 #if 0
728 if (cross_bound((unsigned long)packet->data,
729 packet->data_size)>0) {
730 /* FIXME: do something about it */
731 PRINT(KERN_ERR,
732 "%s: packet data addr: %p size %Zd bytes "
733 "cross page boundary", __FUNCTION__,
734 packet->data, packet->data_size);
736 #endif
737 d->prg_cpu[idx]->end.address = cpu_to_le32(
738 pci_map_single(ohci->dev, packet->data,
739 packet->data_size,
740 PCI_DMA_TODEVICE));
741 OHCI_DMA_ALLOC("single, block transmit packet");
743 d->prg_cpu[idx]->end.branchAddress = 0;
744 d->prg_cpu[idx]->end.status = 0;
745 if (d->branchAddrPtr)
746 *(d->branchAddrPtr) =
747 cpu_to_le32(d->prg_bus[idx] | 0x3);
748 d->branchAddrPtr =
749 &(d->prg_cpu[idx]->end.branchAddress);
750 } else { /* quadlet transmit */
751 if (packet->type == hpsb_raw)
752 d->prg_cpu[idx]->begin.control =
753 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
754 DMA_CTL_IMMEDIATE |
755 DMA_CTL_IRQ |
756 DMA_CTL_BRANCH |
757 (packet->header_size + 4));
758 else
759 d->prg_cpu[idx]->begin.control =
760 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
761 DMA_CTL_IMMEDIATE |
762 DMA_CTL_IRQ |
763 DMA_CTL_BRANCH |
764 packet->header_size);
766 if (d->branchAddrPtr)
767 *(d->branchAddrPtr) =
768 cpu_to_le32(d->prg_bus[idx] | 0x2);
769 d->branchAddrPtr =
770 &(d->prg_cpu[idx]->begin.branchAddress);
773 } else { /* iso packet */
774 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
775 (packet->header[0] & 0xFFFF);
776 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
777 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
779 d->prg_cpu[idx]->begin.control =
780 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
781 DMA_CTL_IMMEDIATE | 0x8);
782 d->prg_cpu[idx]->end.control =
783 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
784 DMA_CTL_UPDATE |
785 DMA_CTL_IRQ |
786 DMA_CTL_BRANCH |
787 packet->data_size);
788 d->prg_cpu[idx]->end.address = cpu_to_le32(
789 pci_map_single(ohci->dev, packet->data,
790 packet->data_size, PCI_DMA_TODEVICE));
791 OHCI_DMA_ALLOC("single, iso transmit packet");
793 d->prg_cpu[idx]->end.branchAddress = 0;
794 d->prg_cpu[idx]->end.status = 0;
795 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
796 " begin=%08x %08x %08x %08x\n"
797 " %08x %08x %08x %08x\n"
798 " end =%08x %08x %08x %08x",
799 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
800 d->prg_cpu[idx]->begin.control,
801 d->prg_cpu[idx]->begin.address,
802 d->prg_cpu[idx]->begin.branchAddress,
803 d->prg_cpu[idx]->begin.status,
804 d->prg_cpu[idx]->data[0],
805 d->prg_cpu[idx]->data[1],
806 d->prg_cpu[idx]->data[2],
807 d->prg_cpu[idx]->data[3],
808 d->prg_cpu[idx]->end.control,
809 d->prg_cpu[idx]->end.address,
810 d->prg_cpu[idx]->end.branchAddress,
811 d->prg_cpu[idx]->end.status);
812 if (d->branchAddrPtr)
813 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
814 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
816 d->free_prgs--;
818 /* queue the packet in the appropriate context queue */
819 list_add_tail(&packet->driver_list, &d->fifo_list);
820 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
824 * This function fills the FIFO with the (eventual) pending packets
825 * and runs or wakes up the DMA prg if necessary.
827 * The function MUST be called with the d->lock held.
829 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
831 struct hpsb_packet *packet, *ptmp;
832 int idx = d->prg_ind;
833 int z = 0;
835 /* insert the packets into the dma fifo */
836 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
837 if (!d->free_prgs)
838 break;
840 /* For the first packet only */
841 if (!z)
842 z = (packet->data_size) ? 3 : 2;
844 /* Insert the packet */
845 list_del_init(&packet->driver_list);
846 insert_packet(ohci, d, packet);
849 /* Nothing must have been done, either no free_prgs or no packets */
850 if (z == 0)
851 return;
853 /* Is the context running ? (should be unless it is
854 the first packet to be sent in this context) */
855 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
856 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
858 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
859 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
861 /* Check that the node id is valid, and not 63 */
862 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
863 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
864 else
865 reg_write(ohci, d->ctrlSet, 0x8000);
866 } else {
867 /* Wake up the dma context if necessary */
868 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
869 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
871 /* do this always, to avoid race condition */
872 reg_write(ohci, d->ctrlSet, 0x1000);
875 return;
878 /* Transmission of an async or iso packet */
879 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
881 struct ti_ohci *ohci = host->hostdata;
882 struct dma_trm_ctx *d;
883 unsigned long flags;
885 if (packet->data_size > ohci->max_packet_size) {
886 PRINT(KERN_ERR,
887 "Transmit packet size %Zd is too big",
888 packet->data_size);
889 return -EOVERFLOW;
892 /* Decide whether we have an iso, a request, or a response packet */
893 if (packet->type == hpsb_raw)
894 d = &ohci->at_req_context;
895 else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
896 /* The legacy IT DMA context is initialized on first
897 * use. However, the alloc cannot be run from
898 * interrupt context, so we bail out if that is the
899 * case. I don't see anyone sending ISO packets from
900 * interrupt context anyway... */
902 if (ohci->it_legacy_context.ohci == NULL) {
903 if (in_interrupt()) {
904 PRINT(KERN_ERR,
905 "legacy IT context cannot be initialized during interrupt");
906 return -EINVAL;
909 if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
910 DMA_CTX_ISO, 0, IT_NUM_DESC,
911 OHCI1394_IsoXmitContextBase) < 0) {
912 PRINT(KERN_ERR,
913 "error initializing legacy IT context");
914 return -ENOMEM;
917 initialize_dma_trm_ctx(&ohci->it_legacy_context);
920 d = &ohci->it_legacy_context;
921 } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
922 d = &ohci->at_resp_context;
923 else
924 d = &ohci->at_req_context;
926 spin_lock_irqsave(&d->lock,flags);
928 list_add_tail(&packet->driver_list, &d->pending_list);
930 dma_trm_flush(ohci, d);
932 spin_unlock_irqrestore(&d->lock,flags);
934 return 0;
937 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
939 struct ti_ohci *ohci = host->hostdata;
940 int retval = 0;
941 unsigned long flags;
942 int phy_reg;
944 switch (cmd) {
945 case RESET_BUS:
946 switch (arg) {
947 case SHORT_RESET:
948 phy_reg = get_phy_reg(ohci, 5);
949 phy_reg |= 0x40;
950 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
951 break;
952 case LONG_RESET:
953 phy_reg = get_phy_reg(ohci, 1);
954 phy_reg |= 0x40;
955 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
956 break;
957 case SHORT_RESET_NO_FORCE_ROOT:
958 phy_reg = get_phy_reg(ohci, 1);
959 if (phy_reg & 0x80) {
960 phy_reg &= ~0x80;
961 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
964 phy_reg = get_phy_reg(ohci, 5);
965 phy_reg |= 0x40;
966 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
967 break;
968 case LONG_RESET_NO_FORCE_ROOT:
969 phy_reg = get_phy_reg(ohci, 1);
970 phy_reg &= ~0x80;
971 phy_reg |= 0x40;
972 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
973 break;
974 case SHORT_RESET_FORCE_ROOT:
975 phy_reg = get_phy_reg(ohci, 1);
976 if (!(phy_reg & 0x80)) {
977 phy_reg |= 0x80;
978 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
981 phy_reg = get_phy_reg(ohci, 5);
982 phy_reg |= 0x40;
983 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
984 break;
985 case LONG_RESET_FORCE_ROOT:
986 phy_reg = get_phy_reg(ohci, 1);
987 phy_reg |= 0xc0;
988 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
989 break;
990 default:
991 retval = -1;
993 break;
995 case GET_CYCLE_COUNTER:
996 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
997 break;
999 case SET_CYCLE_COUNTER:
1000 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
1001 break;
1003 case SET_BUS_ID:
1004 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
1005 break;
1007 case ACT_CYCLE_MASTER:
1008 if (arg) {
1009 /* check if we are root and other nodes are present */
1010 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1011 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1013 * enable cycleTimer, cycleMaster
1015 DBGMSG("Cycle master enabled");
1016 reg_write(ohci, OHCI1394_LinkControlSet,
1017 OHCI1394_LinkControl_CycleTimerEnable |
1018 OHCI1394_LinkControl_CycleMaster);
1020 } else {
1021 /* disable cycleTimer, cycleMaster, cycleSource */
1022 reg_write(ohci, OHCI1394_LinkControlClear,
1023 OHCI1394_LinkControl_CycleTimerEnable |
1024 OHCI1394_LinkControl_CycleMaster |
1025 OHCI1394_LinkControl_CycleSource);
1027 break;
1029 case CANCEL_REQUESTS:
1030 DBGMSG("Cancel request received");
1031 dma_trm_reset(&ohci->at_req_context);
1032 dma_trm_reset(&ohci->at_resp_context);
1033 break;
1035 case ISO_LISTEN_CHANNEL:
1037 u64 mask;
1038 struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1039 int ir_legacy_active;
1041 if (arg<0 || arg>63) {
1042 PRINT(KERN_ERR,
1043 "%s: IS0 listen channel %d is out of range",
1044 __FUNCTION__, arg);
1045 return -EFAULT;
1048 mask = (u64)0x1<<arg;
1050 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1052 if (ohci->ISO_channel_usage & mask) {
1053 PRINT(KERN_ERR,
1054 "%s: IS0 listen channel %d is already used",
1055 __FUNCTION__, arg);
1056 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1057 return -EFAULT;
1060 ir_legacy_active = ohci->ir_legacy_channels;
1062 ohci->ISO_channel_usage |= mask;
1063 ohci->ir_legacy_channels |= mask;
1065 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1067 if (!ir_legacy_active) {
1068 if (ohci1394_register_iso_tasklet(ohci,
1069 &ohci->ir_legacy_tasklet) < 0) {
1070 PRINT(KERN_ERR, "No IR DMA context available");
1071 return -EBUSY;
1074 /* the IR context can be assigned to any DMA context
1075 * by ohci1394_register_iso_tasklet */
1076 d->ctx = ohci->ir_legacy_tasklet.context;
1077 d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1078 32*d->ctx;
1079 d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1080 32*d->ctx;
1081 d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1082 d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1084 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1086 if (printk_ratelimit())
1087 DBGMSG("IR legacy activated");
1090 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1092 if (arg>31)
1093 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1094 1<<(arg-32));
1095 else
1096 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1097 1<<arg);
1099 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1100 DBGMSG("Listening enabled on channel %d", arg);
1101 break;
1103 case ISO_UNLISTEN_CHANNEL:
1105 u64 mask;
1107 if (arg<0 || arg>63) {
1108 PRINT(KERN_ERR,
1109 "%s: IS0 unlisten channel %d is out of range",
1110 __FUNCTION__, arg);
1111 return -EFAULT;
1114 mask = (u64)0x1<<arg;
1116 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1118 if (!(ohci->ISO_channel_usage & mask)) {
1119 PRINT(KERN_ERR,
1120 "%s: IS0 unlisten channel %d is not used",
1121 __FUNCTION__, arg);
1122 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1123 return -EFAULT;
1126 ohci->ISO_channel_usage &= ~mask;
1127 ohci->ir_legacy_channels &= ~mask;
1129 if (arg>31)
1130 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1131 1<<(arg-32));
1132 else
1133 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1134 1<<arg);
1136 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1137 DBGMSG("Listening disabled on channel %d", arg);
1139 if (ohci->ir_legacy_channels == 0) {
1140 stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1141 DBGMSG("ISO legacy receive context stopped");
1144 break;
1146 default:
1147 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1148 cmd);
1149 break;
1151 return retval;
1154 /***********************************
1155 * rawiso ISO reception *
1156 ***********************************/
1159 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1160 buffer is split into "blocks" (regions described by one DMA
1161 descriptor). Each block must be one page or less in size, and
1162 must not cross a page boundary.
1164 There is one little wrinkle with buffer-fill mode: a packet that
1165 starts in the final block may wrap around into the first block. But
1166 the user API expects all packets to be contiguous. Our solution is
1167 to keep the very last page of the DMA buffer in reserve - if a
1168 packet spans the gap, we copy its tail into this page.
1171 struct ohci_iso_recv {
1172 struct ti_ohci *ohci;
1174 struct ohci1394_iso_tasklet task;
1175 int task_active;
1177 enum { BUFFER_FILL_MODE = 0,
1178 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1180 /* memory and PCI mapping for the DMA descriptors */
1181 struct dma_prog_region prog;
1182 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1184 /* how many DMA blocks fit in the buffer */
1185 unsigned int nblocks;
1187 /* stride of DMA blocks */
1188 unsigned int buf_stride;
1190 /* number of blocks to batch between interrupts */
1191 int block_irq_interval;
1193 /* block that DMA will finish next */
1194 int block_dma;
1196 /* (buffer-fill only) block that the reader will release next */
1197 int block_reader;
1199 /* (buffer-fill only) bytes of buffer the reader has released,
1200 less than one block */
1201 int released_bytes;
1203 /* (buffer-fill only) buffer offset at which the next packet will appear */
1204 int dma_offset;
1206 /* OHCI DMA context control registers */
1207 u32 ContextControlSet;
1208 u32 ContextControlClear;
1209 u32 CommandPtr;
1210 u32 ContextMatch;
1213 static void ohci_iso_recv_task(unsigned long data);
1214 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1215 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1216 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1217 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1219 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1221 struct ti_ohci *ohci = iso->host->hostdata;
1222 struct ohci_iso_recv *recv;
1223 int ctx;
1224 int ret = -ENOMEM;
1226 recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1227 if (!recv)
1228 return -ENOMEM;
1230 iso->hostdata = recv;
1231 recv->ohci = ohci;
1232 recv->task_active = 0;
1233 dma_prog_region_init(&recv->prog);
1234 recv->block = NULL;
1236 /* use buffer-fill mode, unless irq_interval is 1
1237 (note: multichannel requires buffer-fill) */
1239 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1240 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1241 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1242 } else {
1243 recv->dma_mode = BUFFER_FILL_MODE;
1246 /* set nblocks, buf_stride, block_irq_interval */
1248 if (recv->dma_mode == BUFFER_FILL_MODE) {
1249 recv->buf_stride = PAGE_SIZE;
1251 /* one block per page of data in the DMA buffer, minus the final guard page */
1252 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1253 if (recv->nblocks < 3) {
1254 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1255 goto err;
1258 /* iso->irq_interval is in packets - translate that to blocks */
1259 if (iso->irq_interval == 1)
1260 recv->block_irq_interval = 1;
1261 else
1262 recv->block_irq_interval = iso->irq_interval *
1263 ((recv->nblocks+1)/iso->buf_packets);
1264 if (recv->block_irq_interval*4 > recv->nblocks)
1265 recv->block_irq_interval = recv->nblocks/4;
1266 if (recv->block_irq_interval < 1)
1267 recv->block_irq_interval = 1;
1269 } else {
1270 int max_packet_size;
1272 recv->nblocks = iso->buf_packets;
1273 recv->block_irq_interval = iso->irq_interval;
1274 if (recv->block_irq_interval * 4 > iso->buf_packets)
1275 recv->block_irq_interval = iso->buf_packets / 4;
1276 if (recv->block_irq_interval < 1)
1277 recv->block_irq_interval = 1;
1279 /* choose a buffer stride */
1280 /* must be a power of 2, and <= PAGE_SIZE */
1282 max_packet_size = iso->buf_size / iso->buf_packets;
1284 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1285 recv->buf_stride *= 2);
1287 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1288 recv->buf_stride > PAGE_SIZE) {
1289 /* this shouldn't happen, but anyway... */
1290 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1291 goto err;
1295 recv->block_reader = 0;
1296 recv->released_bytes = 0;
1297 recv->block_dma = 0;
1298 recv->dma_offset = 0;
1300 /* size of DMA program = one descriptor per block */
1301 if (dma_prog_region_alloc(&recv->prog,
1302 sizeof(struct dma_cmd) * recv->nblocks,
1303 recv->ohci->dev))
1304 goto err;
1306 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1308 ohci1394_init_iso_tasklet(&recv->task,
1309 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1310 OHCI_ISO_RECEIVE,
1311 ohci_iso_recv_task, (unsigned long) iso);
1313 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1314 ret = -EBUSY;
1315 goto err;
1318 recv->task_active = 1;
1320 /* recv context registers are spaced 32 bytes apart */
1321 ctx = recv->task.context;
1322 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1323 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1324 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1325 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1327 if (iso->channel == -1) {
1328 /* clear multi-channel selection mask */
1329 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1330 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1333 /* write the DMA program */
1334 ohci_iso_recv_program(iso);
1336 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1337 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1338 recv->dma_mode == BUFFER_FILL_MODE ?
1339 "buffer-fill" : "packet-per-buffer",
1340 iso->buf_size/PAGE_SIZE, iso->buf_size,
1341 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1343 return 0;
1345 err:
1346 ohci_iso_recv_shutdown(iso);
1347 return ret;
1350 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1352 struct ohci_iso_recv *recv = iso->hostdata;
1354 /* disable interrupts */
1355 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1357 /* halt DMA */
1358 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1361 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1363 struct ohci_iso_recv *recv = iso->hostdata;
1365 if (recv->task_active) {
1366 ohci_iso_recv_stop(iso);
1367 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1368 recv->task_active = 0;
1371 dma_prog_region_free(&recv->prog);
1372 kfree(recv);
1373 iso->hostdata = NULL;
1376 /* set up a "gapped" ring buffer DMA program */
1377 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1379 struct ohci_iso_recv *recv = iso->hostdata;
1380 int blk;
1382 /* address of 'branch' field in previous DMA descriptor */
1383 u32 *prev_branch = NULL;
1385 for (blk = 0; blk < recv->nblocks; blk++) {
1386 u32 control;
1388 /* the DMA descriptor */
1389 struct dma_cmd *cmd = &recv->block[blk];
1391 /* offset of the DMA descriptor relative to the DMA prog buffer */
1392 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1394 /* offset of this packet's data within the DMA buffer */
1395 unsigned long buf_offset = blk * recv->buf_stride;
1397 if (recv->dma_mode == BUFFER_FILL_MODE) {
1398 control = 2 << 28; /* INPUT_MORE */
1399 } else {
1400 control = 3 << 28; /* INPUT_LAST */
1403 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1405 /* interrupt on last block, and at intervals */
1406 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1407 control |= 3 << 20; /* want interrupt */
1410 control |= 3 << 18; /* enable branch to address */
1411 control |= recv->buf_stride;
1413 cmd->control = cpu_to_le32(control);
1414 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1415 cmd->branchAddress = 0; /* filled in on next loop */
1416 cmd->status = cpu_to_le32(recv->buf_stride);
1418 /* link the previous descriptor to this one */
1419 if (prev_branch) {
1420 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1423 prev_branch = &cmd->branchAddress;
1426 /* the final descriptor's branch address and Z should be left at 0 */
1429 /* listen or unlisten to a specific channel (multi-channel mode only) */
1430 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1432 struct ohci_iso_recv *recv = iso->hostdata;
1433 int reg, i;
1435 if (channel < 32) {
1436 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1437 i = channel;
1438 } else {
1439 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1440 i = channel - 32;
1443 reg_write(recv->ohci, reg, (1 << i));
1445 /* issue a dummy read to force all PCI writes to be posted immediately */
1446 mb();
1447 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1450 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1452 struct ohci_iso_recv *recv = iso->hostdata;
1453 int i;
1455 for (i = 0; i < 64; i++) {
1456 if (mask & (1ULL << i)) {
1457 if (i < 32)
1458 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1459 else
1460 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1461 } else {
1462 if (i < 32)
1463 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1464 else
1465 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1469 /* issue a dummy read to force all PCI writes to be posted immediately */
1470 mb();
1471 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1474 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1476 struct ohci_iso_recv *recv = iso->hostdata;
1477 struct ti_ohci *ohci = recv->ohci;
1478 u32 command, contextMatch;
1480 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1481 wmb();
1483 /* always keep ISO headers */
1484 command = (1 << 30);
1486 if (recv->dma_mode == BUFFER_FILL_MODE)
1487 command |= (1 << 31);
1489 reg_write(recv->ohci, recv->ContextControlSet, command);
1491 /* match on specified tags */
1492 contextMatch = tag_mask << 28;
1494 if (iso->channel == -1) {
1495 /* enable multichannel reception */
1496 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1497 } else {
1498 /* listen on channel */
1499 contextMatch |= iso->channel;
1502 if (cycle != -1) {
1503 u32 seconds;
1505 /* enable cycleMatch */
1506 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1508 /* set starting cycle */
1509 cycle &= 0x1FFF;
1511 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1512 just snarf them from the current time */
1513 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1515 /* advance one second to give some extra time for DMA to start */
1516 seconds += 1;
1518 cycle |= (seconds & 3) << 13;
1520 contextMatch |= cycle << 12;
1523 if (sync != -1) {
1524 /* set sync flag on first DMA descriptor */
1525 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1526 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1528 /* match sync field */
1529 contextMatch |= (sync&0xf)<<8;
1532 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1534 /* address of first descriptor block */
1535 command = dma_prog_region_offset_to_bus(&recv->prog,
1536 recv->block_dma * sizeof(struct dma_cmd));
1537 command |= 1; /* Z=1 */
1539 reg_write(recv->ohci, recv->CommandPtr, command);
1541 /* enable interrupts */
1542 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1544 wmb();
1546 /* run */
1547 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1549 /* issue a dummy read of the cycle timer register to force
1550 all PCI writes to be posted immediately */
1551 mb();
1552 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1554 /* check RUN */
1555 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1556 PRINT(KERN_ERR,
1557 "Error starting IR DMA (ContextControl 0x%08x)\n",
1558 reg_read(recv->ohci, recv->ContextControlSet));
1559 return -1;
1562 return 0;
1565 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1567 /* re-use the DMA descriptor for the block */
1568 /* by linking the previous descriptor to it */
1570 int next_i = block;
1571 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1573 struct dma_cmd *next = &recv->block[next_i];
1574 struct dma_cmd *prev = &recv->block[prev_i];
1576 /* ignore out-of-range requests */
1577 if ((block < 0) || (block > recv->nblocks))
1578 return;
1580 /* 'next' becomes the new end of the DMA chain,
1581 so disable branch and enable interrupt */
1582 next->branchAddress = 0;
1583 next->control |= cpu_to_le32(3 << 20);
1584 next->status = cpu_to_le32(recv->buf_stride);
1586 /* link prev to next */
1587 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1588 sizeof(struct dma_cmd) * next_i)
1589 | 1); /* Z=1 */
1591 /* disable interrupt on previous DMA descriptor, except at intervals */
1592 if ((prev_i % recv->block_irq_interval) == 0) {
1593 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1594 } else {
1595 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1597 wmb();
1599 /* wake up DMA in case it fell asleep */
1600 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1603 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1604 struct hpsb_iso_packet_info *info)
1606 /* release the memory where the packet was */
1607 recv->released_bytes += info->total_len;
1609 /* have we released enough memory for one block? */
1610 while (recv->released_bytes > recv->buf_stride) {
1611 ohci_iso_recv_release_block(recv, recv->block_reader);
1612 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1613 recv->released_bytes -= recv->buf_stride;
1617 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1619 struct ohci_iso_recv *recv = iso->hostdata;
1620 if (recv->dma_mode == BUFFER_FILL_MODE) {
1621 ohci_iso_recv_bufferfill_release(recv, info);
1622 } else {
1623 ohci_iso_recv_release_block(recv, info - iso->infos);
1627 /* parse all packets from blocks that have been fully received */
1628 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1630 int wake = 0;
1631 int runaway = 0;
1632 struct ti_ohci *ohci = recv->ohci;
1634 while (1) {
1635 /* we expect the next parsable packet to begin at recv->dma_offset */
1636 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1638 unsigned int offset;
1639 unsigned short len, cycle, total_len;
1640 unsigned char channel, tag, sy;
1642 unsigned char *p = iso->data_buf.kvirt;
1644 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1646 /* don't loop indefinitely */
1647 if (runaway++ > 100000) {
1648 atomic_inc(&iso->overflows);
1649 PRINT(KERN_ERR,
1650 "IR DMA error - Runaway during buffer parsing!\n");
1651 break;
1654 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1655 if (this_block == recv->block_dma)
1656 break;
1658 wake = 1;
1660 /* parse data length, tag, channel, and sy */
1662 /* note: we keep our own local copies of 'len' and 'offset'
1663 so the user can't mess with them by poking in the mmap area */
1665 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1667 if (len > 4096) {
1668 PRINT(KERN_ERR,
1669 "IR DMA error - bogus 'len' value %u\n", len);
1672 channel = p[recv->dma_offset+1] & 0x3F;
1673 tag = p[recv->dma_offset+1] >> 6;
1674 sy = p[recv->dma_offset+0] & 0xF;
1676 /* advance to data payload */
1677 recv->dma_offset += 4;
1679 /* check for wrap-around */
1680 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1681 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1684 /* dma_offset now points to the first byte of the data payload */
1685 offset = recv->dma_offset;
1687 /* advance to xferStatus/timeStamp */
1688 recv->dma_offset += len;
1690 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1691 /* payload is padded to 4 bytes */
1692 if (len % 4) {
1693 recv->dma_offset += 4 - (len%4);
1694 total_len += 4 - (len%4);
1697 /* check for wrap-around */
1698 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1699 /* uh oh, the packet data wraps from the last
1700 to the first DMA block - make the packet
1701 contiguous by copying its "tail" into the
1702 guard page */
1704 int guard_off = recv->buf_stride*recv->nblocks;
1705 int tail_len = len - (guard_off - offset);
1707 if (tail_len > 0 && tail_len < recv->buf_stride) {
1708 memcpy(iso->data_buf.kvirt + guard_off,
1709 iso->data_buf.kvirt,
1710 tail_len);
1713 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1716 /* parse timestamp */
1717 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1718 cycle &= 0x1FFF;
1720 /* advance to next packet */
1721 recv->dma_offset += 4;
1723 /* check for wrap-around */
1724 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1725 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1728 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1731 if (wake)
1732 hpsb_iso_wake(iso);
1735 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1737 int loop;
1738 struct ti_ohci *ohci = recv->ohci;
1740 /* loop over all blocks */
1741 for (loop = 0; loop < recv->nblocks; loop++) {
1743 /* check block_dma to see if it's done */
1744 struct dma_cmd *im = &recv->block[recv->block_dma];
1746 /* check the DMA descriptor for new writes to xferStatus */
1747 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1749 /* rescount is the number of bytes *remaining to be written* in the block */
1750 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1752 unsigned char event = xferstatus & 0x1F;
1754 if (!event) {
1755 /* nothing has happened to this block yet */
1756 break;
1759 if (event != 0x11) {
1760 atomic_inc(&iso->overflows);
1761 PRINT(KERN_ERR,
1762 "IR DMA error - OHCI error code 0x%02x\n", event);
1765 if (rescount != 0) {
1766 /* the card is still writing to this block;
1767 we can't touch it until it's done */
1768 break;
1771 /* OK, the block is finished... */
1773 /* sync our view of the block */
1774 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1776 /* reset the DMA descriptor */
1777 im->status = recv->buf_stride;
1779 /* advance block_dma */
1780 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1782 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1783 atomic_inc(&iso->overflows);
1784 DBGMSG("ISO reception overflow - "
1785 "ran out of DMA blocks");
1789 /* parse any packets that have arrived */
1790 ohci_iso_recv_bufferfill_parse(iso, recv);
1793 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1795 int count;
1796 int wake = 0;
1797 struct ti_ohci *ohci = recv->ohci;
1799 /* loop over the entire buffer */
1800 for (count = 0; count < recv->nblocks; count++) {
1801 u32 packet_len = 0;
1803 /* pointer to the DMA descriptor */
1804 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1806 /* check the DMA descriptor for new writes to xferStatus */
1807 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1808 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1810 unsigned char event = xferstatus & 0x1F;
1812 if (!event) {
1813 /* this packet hasn't come in yet; we are done for now */
1814 goto out;
1817 if (event == 0x11) {
1818 /* packet received successfully! */
1820 /* rescount is the number of bytes *remaining* in the packet buffer,
1821 after the packet was written */
1822 packet_len = recv->buf_stride - rescount;
1824 } else if (event == 0x02) {
1825 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1826 } else if (event) {
1827 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1830 /* sync our view of the buffer */
1831 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1833 /* record the per-packet info */
1835 /* iso header is 8 bytes ahead of the data payload */
1836 unsigned char *hdr;
1838 unsigned int offset;
1839 unsigned short cycle;
1840 unsigned char channel, tag, sy;
1842 offset = iso->pkt_dma * recv->buf_stride;
1843 hdr = iso->data_buf.kvirt + offset;
1845 /* skip iso header */
1846 offset += 8;
1847 packet_len -= 8;
1849 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1850 channel = hdr[5] & 0x3F;
1851 tag = hdr[5] >> 6;
1852 sy = hdr[4] & 0xF;
1854 hpsb_iso_packet_received(iso, offset, packet_len,
1855 recv->buf_stride, cycle, channel, tag, sy);
1858 /* reset the DMA descriptor */
1859 il->status = recv->buf_stride;
1861 wake = 1;
1862 recv->block_dma = iso->pkt_dma;
1865 out:
1866 if (wake)
1867 hpsb_iso_wake(iso);
1870 static void ohci_iso_recv_task(unsigned long data)
1872 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1873 struct ohci_iso_recv *recv = iso->hostdata;
1875 if (recv->dma_mode == BUFFER_FILL_MODE)
1876 ohci_iso_recv_bufferfill_task(iso, recv);
1877 else
1878 ohci_iso_recv_packetperbuf_task(iso, recv);
1881 /***********************************
1882 * rawiso ISO transmission *
1883 ***********************************/
1885 struct ohci_iso_xmit {
1886 struct ti_ohci *ohci;
1887 struct dma_prog_region prog;
1888 struct ohci1394_iso_tasklet task;
1889 int task_active;
1891 u32 ContextControlSet;
1892 u32 ContextControlClear;
1893 u32 CommandPtr;
1896 /* transmission DMA program:
1897 one OUTPUT_MORE_IMMEDIATE for the IT header
1898 one OUTPUT_LAST for the buffer data */
1900 struct iso_xmit_cmd {
1901 struct dma_cmd output_more_immediate;
1902 u8 iso_hdr[8];
1903 u32 unused[2];
1904 struct dma_cmd output_last;
1907 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1908 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1909 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1910 static void ohci_iso_xmit_task(unsigned long data);
1912 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1914 struct ohci_iso_xmit *xmit;
1915 unsigned int prog_size;
1916 int ctx;
1917 int ret = -ENOMEM;
1919 xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1920 if (!xmit)
1921 return -ENOMEM;
1923 iso->hostdata = xmit;
1924 xmit->ohci = iso->host->hostdata;
1925 xmit->task_active = 0;
1927 dma_prog_region_init(&xmit->prog);
1929 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1931 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1932 goto err;
1934 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1935 ohci_iso_xmit_task, (unsigned long) iso);
1937 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1938 ret = -EBUSY;
1939 goto err;
1942 xmit->task_active = 1;
1944 /* xmit context registers are spaced 16 bytes apart */
1945 ctx = xmit->task.context;
1946 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1947 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1948 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1950 return 0;
1952 err:
1953 ohci_iso_xmit_shutdown(iso);
1954 return ret;
1957 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1959 struct ohci_iso_xmit *xmit = iso->hostdata;
1960 struct ti_ohci *ohci = xmit->ohci;
1962 /* disable interrupts */
1963 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1965 /* halt DMA */
1966 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1967 /* XXX the DMA context will lock up if you try to send too much data! */
1968 PRINT(KERN_ERR,
1969 "you probably exceeded the OHCI card's bandwidth limit - "
1970 "reload the module and reduce xmit bandwidth");
1974 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1976 struct ohci_iso_xmit *xmit = iso->hostdata;
1978 if (xmit->task_active) {
1979 ohci_iso_xmit_stop(iso);
1980 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1981 xmit->task_active = 0;
1984 dma_prog_region_free(&xmit->prog);
1985 kfree(xmit);
1986 iso->hostdata = NULL;
1989 static void ohci_iso_xmit_task(unsigned long data)
1991 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1992 struct ohci_iso_xmit *xmit = iso->hostdata;
1993 struct ti_ohci *ohci = xmit->ohci;
1994 int wake = 0;
1995 int count;
1997 /* check the whole buffer if necessary, starting at pkt_dma */
1998 for (count = 0; count < iso->buf_packets; count++) {
1999 int cycle;
2001 /* DMA descriptor */
2002 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
2004 /* check for new writes to xferStatus */
2005 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
2006 u8 event = xferstatus & 0x1F;
2008 if (!event) {
2009 /* packet hasn't been sent yet; we are done for now */
2010 break;
2013 if (event != 0x11)
2014 PRINT(KERN_ERR,
2015 "IT DMA error - OHCI error code 0x%02x\n", event);
2017 /* at least one packet went out, so wake up the writer */
2018 wake = 1;
2020 /* parse cycle */
2021 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
2023 /* tell the subsystem the packet has gone out */
2024 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
2026 /* reset the DMA descriptor for next time */
2027 cmd->output_last.status = 0;
2030 if (wake)
2031 hpsb_iso_wake(iso);
2034 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2036 struct ohci_iso_xmit *xmit = iso->hostdata;
2037 struct ti_ohci *ohci = xmit->ohci;
2039 int next_i, prev_i;
2040 struct iso_xmit_cmd *next, *prev;
2042 unsigned int offset;
2043 unsigned short len;
2044 unsigned char tag, sy;
2046 /* check that the packet doesn't cross a page boundary
2047 (we could allow this if we added OUTPUT_MORE descriptor support) */
2048 if (cross_bound(info->offset, info->len)) {
2049 PRINT(KERN_ERR,
2050 "rawiso xmit: packet %u crosses a page boundary",
2051 iso->first_packet);
2052 return -EINVAL;
2055 offset = info->offset;
2056 len = info->len;
2057 tag = info->tag;
2058 sy = info->sy;
2060 /* sync up the card's view of the buffer */
2061 dma_region_sync_for_device(&iso->data_buf, offset, len);
2063 /* append first_packet to the DMA chain */
2064 /* by linking the previous descriptor to it */
2065 /* (next will become the new end of the DMA chain) */
2067 next_i = iso->first_packet;
2068 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2070 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2071 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2073 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2074 memset(next, 0, sizeof(struct iso_xmit_cmd));
2075 next->output_more_immediate.control = cpu_to_le32(0x02000008);
2077 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2079 /* tcode = 0xA, and sy */
2080 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2082 /* tag and channel number */
2083 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2085 /* transmission speed */
2086 next->iso_hdr[2] = iso->speed & 0x7;
2088 /* payload size */
2089 next->iso_hdr[6] = len & 0xFF;
2090 next->iso_hdr[7] = len >> 8;
2092 /* set up the OUTPUT_LAST */
2093 next->output_last.control = cpu_to_le32(1 << 28);
2094 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2095 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2096 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2097 next->output_last.control |= cpu_to_le32(len);
2099 /* payload bus address */
2100 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2102 /* leave branchAddress at zero for now */
2104 /* re-write the previous DMA descriptor to chain to this one */
2106 /* set prev branch address to point to next (Z=3) */
2107 prev->output_last.branchAddress = cpu_to_le32(
2108 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2110 /* disable interrupt, unless required by the IRQ interval */
2111 if (prev_i % iso->irq_interval) {
2112 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2113 } else {
2114 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2117 wmb();
2119 /* wake DMA in case it is sleeping */
2120 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2122 /* issue a dummy read of the cycle timer to force all PCI
2123 writes to be posted immediately */
2124 mb();
2125 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2127 return 0;
2130 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2132 struct ohci_iso_xmit *xmit = iso->hostdata;
2133 struct ti_ohci *ohci = xmit->ohci;
2135 /* clear out the control register */
2136 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2137 wmb();
2139 /* address and length of first descriptor block (Z=3) */
2140 reg_write(xmit->ohci, xmit->CommandPtr,
2141 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2143 /* cycle match */
2144 if (cycle != -1) {
2145 u32 start = cycle & 0x1FFF;
2147 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2148 just snarf them from the current time */
2149 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2151 /* advance one second to give some extra time for DMA to start */
2152 seconds += 1;
2154 start |= (seconds & 3) << 13;
2156 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2159 /* enable interrupts */
2160 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2162 /* run */
2163 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2164 mb();
2166 /* wait 100 usec to give the card time to go active */
2167 udelay(100);
2169 /* check the RUN bit */
2170 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2171 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2172 reg_read(xmit->ohci, xmit->ContextControlSet));
2173 return -1;
2176 return 0;
2179 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2182 switch(cmd) {
2183 case XMIT_INIT:
2184 return ohci_iso_xmit_init(iso);
2185 case XMIT_START:
2186 return ohci_iso_xmit_start(iso, arg);
2187 case XMIT_STOP:
2188 ohci_iso_xmit_stop(iso);
2189 return 0;
2190 case XMIT_QUEUE:
2191 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2192 case XMIT_SHUTDOWN:
2193 ohci_iso_xmit_shutdown(iso);
2194 return 0;
2196 case RECV_INIT:
2197 return ohci_iso_recv_init(iso);
2198 case RECV_START: {
2199 int *args = (int*) arg;
2200 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2202 case RECV_STOP:
2203 ohci_iso_recv_stop(iso);
2204 return 0;
2205 case RECV_RELEASE:
2206 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2207 return 0;
2208 case RECV_FLUSH:
2209 ohci_iso_recv_task((unsigned long) iso);
2210 return 0;
2211 case RECV_SHUTDOWN:
2212 ohci_iso_recv_shutdown(iso);
2213 return 0;
2214 case RECV_LISTEN_CHANNEL:
2215 ohci_iso_recv_change_channel(iso, arg, 1);
2216 return 0;
2217 case RECV_UNLISTEN_CHANNEL:
2218 ohci_iso_recv_change_channel(iso, arg, 0);
2219 return 0;
2220 case RECV_SET_CHANNEL_MASK:
2221 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2222 return 0;
2224 default:
2225 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2226 cmd);
2227 break;
2229 return -EINVAL;
2232 /***************************************
2233 * IEEE-1394 functionality section END *
2234 ***************************************/
2237 /********************************************************
2238 * Global stuff (interrupt handler, init/shutdown code) *
2239 ********************************************************/
2241 static void dma_trm_reset(struct dma_trm_ctx *d)
2243 unsigned long flags;
2244 LIST_HEAD(packet_list);
2245 struct ti_ohci *ohci = d->ohci;
2246 struct hpsb_packet *packet, *ptmp;
2248 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2250 /* Lock the context, reset it and release it. Move the packets
2251 * that were pending in the context to packet_list and free
2252 * them after releasing the lock. */
2254 spin_lock_irqsave(&d->lock, flags);
2256 list_splice(&d->fifo_list, &packet_list);
2257 list_splice(&d->pending_list, &packet_list);
2258 INIT_LIST_HEAD(&d->fifo_list);
2259 INIT_LIST_HEAD(&d->pending_list);
2261 d->branchAddrPtr = NULL;
2262 d->sent_ind = d->prg_ind;
2263 d->free_prgs = d->num_desc;
2265 spin_unlock_irqrestore(&d->lock, flags);
2267 if (list_empty(&packet_list))
2268 return;
2270 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2272 /* Now process subsystem callbacks for the packets from this
2273 * context. */
2274 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2275 list_del_init(&packet->driver_list);
2276 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2280 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2281 quadlet_t rx_event,
2282 quadlet_t tx_event)
2284 struct ohci1394_iso_tasklet *t;
2285 unsigned long mask;
2286 unsigned long flags;
2288 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
2290 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2291 mask = 1 << t->context;
2293 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2294 tasklet_schedule(&t->tasklet);
2295 else if (rx_event & mask)
2296 tasklet_schedule(&t->tasklet);
2299 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
2302 static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2303 struct pt_regs *regs_are_unused)
2305 quadlet_t event, node_id;
2306 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2307 struct hpsb_host *host = ohci->host;
2308 int phyid = -1, isroot = 0;
2309 unsigned long flags;
2311 /* Read and clear the interrupt event register. Don't clear
2312 * the busReset event, though. This is done when we get the
2313 * selfIDComplete interrupt. */
2314 spin_lock_irqsave(&ohci->event_lock, flags);
2315 event = reg_read(ohci, OHCI1394_IntEventClear);
2316 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2317 spin_unlock_irqrestore(&ohci->event_lock, flags);
2319 if (!event)
2320 return IRQ_NONE;
2322 /* If event is ~(u32)0 cardbus card was ejected. In this case
2323 * we just return, and clean up in the ohci1394_pci_remove
2324 * function. */
2325 if (event == ~(u32) 0) {
2326 DBGMSG("Device removed.");
2327 return IRQ_NONE;
2330 DBGMSG("IntEvent: %08x", event);
2332 if (event & OHCI1394_unrecoverableError) {
2333 int ctx;
2334 PRINT(KERN_ERR, "Unrecoverable error!");
2336 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2337 PRINT(KERN_ERR, "Async Req Tx Context died: "
2338 "ctrl[%08x] cmdptr[%08x]",
2339 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2340 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2342 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2343 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2344 "ctrl[%08x] cmdptr[%08x]",
2345 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2346 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2348 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2349 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2350 "ctrl[%08x] cmdptr[%08x]",
2351 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2352 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2354 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2355 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2356 "ctrl[%08x] cmdptr[%08x]",
2357 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2358 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2360 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2361 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2362 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2363 "ctrl[%08x] cmdptr[%08x]", ctx,
2364 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2365 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2368 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2369 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2370 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2371 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2372 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2373 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2374 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2377 event &= ~OHCI1394_unrecoverableError;
2380 if (event & OHCI1394_cycleInconsistent) {
2381 /* We subscribe to the cycleInconsistent event only to
2382 * clear the corresponding event bit... otherwise,
2383 * isochronous cycleMatch DMA won't work. */
2384 DBGMSG("OHCI1394_cycleInconsistent");
2385 event &= ~OHCI1394_cycleInconsistent;
2388 if (event & OHCI1394_busReset) {
2389 /* The busReset event bit can't be cleared during the
2390 * selfID phase, so we disable busReset interrupts, to
2391 * avoid burying the cpu in interrupt requests. */
2392 spin_lock_irqsave(&ohci->event_lock, flags);
2393 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2395 if (ohci->check_busreset) {
2396 int loop_count = 0;
2398 udelay(10);
2400 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2401 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2403 spin_unlock_irqrestore(&ohci->event_lock, flags);
2404 udelay(10);
2405 spin_lock_irqsave(&ohci->event_lock, flags);
2407 /* The loop counter check is to prevent the driver
2408 * from remaining in this state forever. For the
2409 * initial bus reset, the loop continues for ever
2410 * and the system hangs, until some device is plugged-in
2411 * or out manually into a port! The forced reset seems
2412 * to solve this problem. This mainly effects nForce2. */
2413 if (loop_count > 10000) {
2414 ohci_devctl(host, RESET_BUS, LONG_RESET);
2415 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2416 loop_count = 0;
2419 loop_count++;
2422 spin_unlock_irqrestore(&ohci->event_lock, flags);
2423 if (!host->in_bus_reset) {
2424 DBGMSG("irq_handler: Bus reset requested");
2426 /* Subsystem call */
2427 hpsb_bus_reset(ohci->host);
2429 event &= ~OHCI1394_busReset;
2432 if (event & OHCI1394_reqTxComplete) {
2433 struct dma_trm_ctx *d = &ohci->at_req_context;
2434 DBGMSG("Got reqTxComplete interrupt "
2435 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2436 if (reg_read(ohci, d->ctrlSet) & 0x800)
2437 ohci1394_stop_context(ohci, d->ctrlClear,
2438 "reqTxComplete");
2439 else
2440 dma_trm_tasklet((unsigned long)d);
2441 //tasklet_schedule(&d->task);
2442 event &= ~OHCI1394_reqTxComplete;
2444 if (event & OHCI1394_respTxComplete) {
2445 struct dma_trm_ctx *d = &ohci->at_resp_context;
2446 DBGMSG("Got respTxComplete interrupt "
2447 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2448 if (reg_read(ohci, d->ctrlSet) & 0x800)
2449 ohci1394_stop_context(ohci, d->ctrlClear,
2450 "respTxComplete");
2451 else
2452 tasklet_schedule(&d->task);
2453 event &= ~OHCI1394_respTxComplete;
2455 if (event & OHCI1394_RQPkt) {
2456 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2457 DBGMSG("Got RQPkt interrupt status=0x%08X",
2458 reg_read(ohci, d->ctrlSet));
2459 if (reg_read(ohci, d->ctrlSet) & 0x800)
2460 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2461 else
2462 tasklet_schedule(&d->task);
2463 event &= ~OHCI1394_RQPkt;
2465 if (event & OHCI1394_RSPkt) {
2466 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2467 DBGMSG("Got RSPkt interrupt status=0x%08X",
2468 reg_read(ohci, d->ctrlSet));
2469 if (reg_read(ohci, d->ctrlSet) & 0x800)
2470 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2471 else
2472 tasklet_schedule(&d->task);
2473 event &= ~OHCI1394_RSPkt;
2475 if (event & OHCI1394_isochRx) {
2476 quadlet_t rx_event;
2478 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2479 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2480 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2481 event &= ~OHCI1394_isochRx;
2483 if (event & OHCI1394_isochTx) {
2484 quadlet_t tx_event;
2486 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2487 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2488 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2489 event &= ~OHCI1394_isochTx;
2491 if (event & OHCI1394_selfIDComplete) {
2492 if (host->in_bus_reset) {
2493 node_id = reg_read(ohci, OHCI1394_NodeID);
2495 if (!(node_id & 0x80000000)) {
2496 PRINT(KERN_ERR,
2497 "SelfID received, but NodeID invalid "
2498 "(probably new bus reset occurred): %08X",
2499 node_id);
2500 goto selfid_not_valid;
2503 phyid = node_id & 0x0000003f;
2504 isroot = (node_id & 0x40000000) != 0;
2506 DBGMSG("SelfID interrupt received "
2507 "(phyid %d, %s)", phyid,
2508 (isroot ? "root" : "not root"));
2510 handle_selfid(ohci, host, phyid, isroot);
2512 /* Clear the bus reset event and re-enable the
2513 * busReset interrupt. */
2514 spin_lock_irqsave(&ohci->event_lock, flags);
2515 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2516 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2517 spin_unlock_irqrestore(&ohci->event_lock, flags);
2519 /* Accept Physical requests from all nodes. */
2520 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0xffffffff);
2521 reg_write(ohci,OHCI1394_AsReqFilterLoSet, 0xffffffff);
2523 /* Turn on phys dma reception.
2525 * TODO: Enable some sort of filtering management.
2527 if (phys_dma) {
2528 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0xffffffff);
2529 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0xffffffff);
2530 reg_write(ohci,OHCI1394_PhyUpperBound, 0xffff0000);
2531 } else {
2532 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0x00000000);
2533 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0x00000000);
2536 DBGMSG("PhyReqFilter=%08x%08x",
2537 reg_read(ohci,OHCI1394_PhyReqFilterHiSet),
2538 reg_read(ohci,OHCI1394_PhyReqFilterLoSet));
2540 hpsb_selfid_complete(host, phyid, isroot);
2541 } else
2542 PRINT(KERN_ERR,
2543 "SelfID received outside of bus reset sequence");
2545 selfid_not_valid:
2546 event &= ~OHCI1394_selfIDComplete;
2549 /* Make sure we handle everything, just in case we accidentally
2550 * enabled an interrupt that we didn't write a handler for. */
2551 if (event)
2552 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2553 event);
2555 return IRQ_HANDLED;
2558 /* Put the buffer back into the dma context */
2559 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2561 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2562 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2564 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2565 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2566 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2567 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2569 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2570 * context program descriptors before it sees the wakeup bit set. */
2571 wmb();
2573 /* wake up the dma context if necessary */
2574 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2575 PRINT(KERN_INFO,
2576 "Waking dma ctx=%d ... processing is probably too slow",
2577 d->ctx);
2580 /* do this always, to avoid race condition */
2581 reg_write(ohci, d->ctrlSet, 0x1000);
2584 #define cond_le32_to_cpu(data, noswap) \
2585 (noswap ? data : le32_to_cpu(data))
2587 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2588 -1, 0, -1, 0, -1, -1, 16, -1};
2591 * Determine the length of a packet in the buffer
2592 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2594 static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2595 int offset, unsigned char tcode, int noswap)
2597 int length = -1;
2599 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2600 length = TCODE_SIZE[tcode];
2601 if (length == 0) {
2602 if (offset + 12 >= d->buf_size) {
2603 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2604 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2605 } else {
2606 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2608 length += 20;
2610 } else if (d->type == DMA_CTX_ISO) {
2611 /* Assumption: buffer fill mode with header/trailer */
2612 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2615 if (length > 0 && length % 4)
2616 length += 4 - (length % 4);
2618 return length;
2621 /* Tasklet that processes dma receive buffers */
2622 static void dma_rcv_tasklet (unsigned long data)
2624 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2625 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2626 unsigned int split_left, idx, offset, rescount;
2627 unsigned char tcode;
2628 int length, bytes_left, ack;
2629 unsigned long flags;
2630 quadlet_t *buf_ptr;
2631 char *split_ptr;
2632 char msg[256];
2634 spin_lock_irqsave(&d->lock, flags);
2636 idx = d->buf_ind;
2637 offset = d->buf_offset;
2638 buf_ptr = d->buf_cpu[idx] + offset/4;
2640 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2641 bytes_left = d->buf_size - rescount - offset;
2643 while (bytes_left > 0) {
2644 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2646 /* packet_length() will return < 4 for an error */
2647 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2649 if (length < 4) { /* something is wrong */
2650 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2651 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2652 d->ctx, length);
2653 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2654 spin_unlock_irqrestore(&d->lock, flags);
2655 return;
2658 /* The first case is where we have a packet that crosses
2659 * over more than one descriptor. The next case is where
2660 * it's all in the first descriptor. */
2661 if ((offset + length) > d->buf_size) {
2662 DBGMSG("Split packet rcv'd");
2663 if (length > d->split_buf_size) {
2664 ohci1394_stop_context(ohci, d->ctrlClear,
2665 "Split packet size exceeded");
2666 d->buf_ind = idx;
2667 d->buf_offset = offset;
2668 spin_unlock_irqrestore(&d->lock, flags);
2669 return;
2672 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2673 == d->buf_size) {
2674 /* Other part of packet not written yet.
2675 * this should never happen I think
2676 * anyway we'll get it on the next call. */
2677 PRINT(KERN_INFO,
2678 "Got only half a packet!");
2679 d->buf_ind = idx;
2680 d->buf_offset = offset;
2681 spin_unlock_irqrestore(&d->lock, flags);
2682 return;
2685 split_left = length;
2686 split_ptr = (char *)d->spb;
2687 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2688 split_left -= d->buf_size-offset;
2689 split_ptr += d->buf_size-offset;
2690 insert_dma_buffer(d, idx);
2691 idx = (idx+1) % d->num_desc;
2692 buf_ptr = d->buf_cpu[idx];
2693 offset=0;
2695 while (split_left >= d->buf_size) {
2696 memcpy(split_ptr,buf_ptr,d->buf_size);
2697 split_ptr += d->buf_size;
2698 split_left -= d->buf_size;
2699 insert_dma_buffer(d, idx);
2700 idx = (idx+1) % d->num_desc;
2701 buf_ptr = d->buf_cpu[idx];
2704 if (split_left > 0) {
2705 memcpy(split_ptr, buf_ptr, split_left);
2706 offset = split_left;
2707 buf_ptr += offset/4;
2709 } else {
2710 DBGMSG("Single packet rcv'd");
2711 memcpy(d->spb, buf_ptr, length);
2712 offset += length;
2713 buf_ptr += length/4;
2714 if (offset==d->buf_size) {
2715 insert_dma_buffer(d, idx);
2716 idx = (idx+1) % d->num_desc;
2717 buf_ptr = d->buf_cpu[idx];
2718 offset=0;
2722 /* We get one phy packet to the async descriptor for each
2723 * bus reset. We always ignore it. */
2724 if (tcode != OHCI1394_TCODE_PHY) {
2725 if (!ohci->no_swap_incoming)
2726 packet_swab(d->spb, tcode);
2727 DBGMSG("Packet received from node"
2728 " %d ack=0x%02X spd=%d tcode=0x%X"
2729 " length=%d ctx=%d tlabel=%d",
2730 (d->spb[1]>>16)&0x3f,
2731 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2732 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2733 tcode, length, d->ctx,
2734 (cond_le32_to_cpu(d->spb[0], ohci->no_swap_incoming)>>10)&0x3f);
2736 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2737 == 0x11) ? 1 : 0;
2739 hpsb_packet_received(ohci->host, d->spb,
2740 length-4, ack);
2742 #ifdef OHCI1394_DEBUG
2743 else
2744 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2745 d->ctx);
2746 #endif
2748 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2750 bytes_left = d->buf_size - rescount - offset;
2754 d->buf_ind = idx;
2755 d->buf_offset = offset;
2757 spin_unlock_irqrestore(&d->lock, flags);
2760 /* Bottom half that processes sent packets */
2761 static void dma_trm_tasklet (unsigned long data)
2763 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2764 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2765 struct hpsb_packet *packet, *ptmp;
2766 unsigned long flags;
2767 u32 status, ack;
2768 size_t datasize;
2770 spin_lock_irqsave(&d->lock, flags);
2772 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2773 datasize = packet->data_size;
2774 if (datasize && packet->type != hpsb_raw)
2775 status = le32_to_cpu(
2776 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2777 else
2778 status = le32_to_cpu(
2779 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2781 if (status == 0)
2782 /* this packet hasn't been sent yet*/
2783 break;
2785 #ifdef OHCI1394_DEBUG
2786 if (datasize)
2787 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2788 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2789 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2790 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2791 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2792 status&0x1f, (status>>5)&0x3,
2793 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2794 d->ctx);
2795 else
2796 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2797 "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2798 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2799 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2800 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2801 status&0x1f, (status>>5)&0x3,
2802 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2803 d->ctx);
2804 else
2805 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2806 "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2807 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2808 >>16)&0x3f,
2809 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2810 >>4)&0xf,
2811 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2812 >>10)&0x3f,
2813 status&0x1f, (status>>5)&0x3,
2814 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2815 d->ctx);
2816 #endif
2818 if (status & 0x10) {
2819 ack = status & 0xf;
2820 } else {
2821 switch (status & 0x1f) {
2822 case EVT_NO_STATUS: /* that should never happen */
2823 case EVT_RESERVED_A: /* that should never happen */
2824 case EVT_LONG_PACKET: /* that should never happen */
2825 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2826 ack = ACKX_SEND_ERROR;
2827 break;
2828 case EVT_MISSING_ACK:
2829 ack = ACKX_TIMEOUT;
2830 break;
2831 case EVT_UNDERRUN:
2832 ack = ACKX_SEND_ERROR;
2833 break;
2834 case EVT_OVERRUN: /* that should never happen */
2835 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2836 ack = ACKX_SEND_ERROR;
2837 break;
2838 case EVT_DESCRIPTOR_READ:
2839 case EVT_DATA_READ:
2840 case EVT_DATA_WRITE:
2841 ack = ACKX_SEND_ERROR;
2842 break;
2843 case EVT_BUS_RESET: /* that should never happen */
2844 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2845 ack = ACKX_SEND_ERROR;
2846 break;
2847 case EVT_TIMEOUT:
2848 ack = ACKX_TIMEOUT;
2849 break;
2850 case EVT_TCODE_ERR:
2851 ack = ACKX_SEND_ERROR;
2852 break;
2853 case EVT_RESERVED_B: /* that should never happen */
2854 case EVT_RESERVED_C: /* that should never happen */
2855 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2856 ack = ACKX_SEND_ERROR;
2857 break;
2858 case EVT_UNKNOWN:
2859 case EVT_FLUSHED:
2860 ack = ACKX_SEND_ERROR;
2861 break;
2862 default:
2863 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2864 ack = ACKX_SEND_ERROR;
2865 BUG();
2869 list_del_init(&packet->driver_list);
2870 hpsb_packet_sent(ohci->host, packet, ack);
2872 if (datasize) {
2873 pci_unmap_single(ohci->dev,
2874 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2875 datasize, PCI_DMA_TODEVICE);
2876 OHCI_DMA_FREE("single Xmit data packet");
2879 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2880 d->free_prgs++;
2883 dma_trm_flush(ohci, d);
2885 spin_unlock_irqrestore(&d->lock, flags);
2888 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2890 if (d->ctrlClear) {
2891 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2893 if (d->type == DMA_CTX_ISO) {
2894 /* disable interrupts */
2895 reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2896 ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2897 } else {
2898 tasklet_kill(&d->task);
2904 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2906 int i;
2907 struct ti_ohci *ohci = d->ohci;
2909 if (ohci == NULL)
2910 return;
2912 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2914 if (d->buf_cpu) {
2915 for (i=0; i<d->num_desc; i++)
2916 if (d->buf_cpu[i] && d->buf_bus[i]) {
2917 pci_free_consistent(
2918 ohci->dev, d->buf_size,
2919 d->buf_cpu[i], d->buf_bus[i]);
2920 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2922 kfree(d->buf_cpu);
2923 kfree(d->buf_bus);
2925 if (d->prg_cpu) {
2926 for (i=0; i<d->num_desc; i++)
2927 if (d->prg_cpu[i] && d->prg_bus[i]) {
2928 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2929 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2931 pci_pool_destroy(d->prg_pool);
2932 OHCI_DMA_FREE("dma_rcv prg pool");
2933 kfree(d->prg_cpu);
2934 kfree(d->prg_bus);
2936 kfree(d->spb);
2938 /* Mark this context as freed. */
2939 d->ohci = NULL;
2942 static int
2943 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2944 enum context_type type, int ctx, int num_desc,
2945 int buf_size, int split_buf_size, int context_base)
2947 int i, len;
2948 static int num_allocs;
2949 static char pool_name[20];
2951 d->ohci = ohci;
2952 d->type = type;
2953 d->ctx = ctx;
2955 d->num_desc = num_desc;
2956 d->buf_size = buf_size;
2957 d->split_buf_size = split_buf_size;
2959 d->ctrlSet = 0;
2960 d->ctrlClear = 0;
2961 d->cmdPtr = 0;
2963 d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_ATOMIC);
2964 d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
2966 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2967 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2968 free_dma_rcv_ctx(d);
2969 return -ENOMEM;
2971 memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
2972 memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
2974 d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*),
2975 GFP_ATOMIC);
2976 d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
2978 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2979 PRINT(KERN_ERR, "Failed to allocate dma prg");
2980 free_dma_rcv_ctx(d);
2981 return -ENOMEM;
2983 memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*));
2984 memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
2986 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2988 if (d->spb == NULL) {
2989 PRINT(KERN_ERR, "Failed to allocate split buffer");
2990 free_dma_rcv_ctx(d);
2991 return -ENOMEM;
2994 len = sprintf(pool_name, "ohci1394_rcv_prg");
2995 sprintf(pool_name+len, "%d", num_allocs);
2996 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2997 sizeof(struct dma_cmd), 4, 0);
2998 if(d->prg_pool == NULL)
3000 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3001 free_dma_rcv_ctx(d);
3002 return -ENOMEM;
3004 num_allocs++;
3006 OHCI_DMA_ALLOC("dma_rcv prg pool");
3008 for (i=0; i<d->num_desc; i++) {
3009 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
3010 d->buf_size,
3011 d->buf_bus+i);
3012 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
3014 if (d->buf_cpu[i] != NULL) {
3015 memset(d->buf_cpu[i], 0, d->buf_size);
3016 } else {
3017 PRINT(KERN_ERR,
3018 "Failed to allocate dma buffer");
3019 free_dma_rcv_ctx(d);
3020 return -ENOMEM;
3023 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3024 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
3026 if (d->prg_cpu[i] != NULL) {
3027 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
3028 } else {
3029 PRINT(KERN_ERR,
3030 "Failed to allocate dma prg");
3031 free_dma_rcv_ctx(d);
3032 return -ENOMEM;
3036 spin_lock_init(&d->lock);
3038 if (type == DMA_CTX_ISO) {
3039 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3040 OHCI_ISO_MULTICHANNEL_RECEIVE,
3041 dma_rcv_tasklet, (unsigned long) d);
3042 } else {
3043 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3044 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3045 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3047 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3050 return 0;
3053 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3055 int i;
3056 struct ti_ohci *ohci = d->ohci;
3058 if (ohci == NULL)
3059 return;
3061 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3063 if (d->prg_cpu) {
3064 for (i=0; i<d->num_desc; i++)
3065 if (d->prg_cpu[i] && d->prg_bus[i]) {
3066 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3067 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3069 pci_pool_destroy(d->prg_pool);
3070 OHCI_DMA_FREE("dma_trm prg pool");
3071 kfree(d->prg_cpu);
3072 kfree(d->prg_bus);
3075 /* Mark this context as freed. */
3076 d->ohci = NULL;
3079 static int
3080 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3081 enum context_type type, int ctx, int num_desc,
3082 int context_base)
3084 int i, len;
3085 static char pool_name[20];
3086 static int num_allocs=0;
3088 d->ohci = ohci;
3089 d->type = type;
3090 d->ctx = ctx;
3091 d->num_desc = num_desc;
3092 d->ctrlSet = 0;
3093 d->ctrlClear = 0;
3094 d->cmdPtr = 0;
3096 d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*),
3097 GFP_KERNEL);
3098 d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
3100 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3101 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3102 free_dma_trm_ctx(d);
3103 return -ENOMEM;
3105 memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
3106 memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
3108 len = sprintf(pool_name, "ohci1394_trm_prg");
3109 sprintf(pool_name+len, "%d", num_allocs);
3110 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3111 sizeof(struct at_dma_prg), 4, 0);
3112 if (d->prg_pool == NULL) {
3113 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3114 free_dma_trm_ctx(d);
3115 return -ENOMEM;
3117 num_allocs++;
3119 OHCI_DMA_ALLOC("dma_rcv prg pool");
3121 for (i = 0; i < d->num_desc; i++) {
3122 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3123 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3125 if (d->prg_cpu[i] != NULL) {
3126 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3127 } else {
3128 PRINT(KERN_ERR,
3129 "Failed to allocate at dma prg");
3130 free_dma_trm_ctx(d);
3131 return -ENOMEM;
3135 spin_lock_init(&d->lock);
3137 /* initialize tasklet */
3138 if (type == DMA_CTX_ISO) {
3139 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3140 dma_trm_tasklet, (unsigned long) d);
3141 if (ohci1394_register_iso_tasklet(ohci,
3142 &ohci->it_legacy_tasklet) < 0) {
3143 PRINT(KERN_ERR, "No IT DMA context available");
3144 free_dma_trm_ctx(d);
3145 return -EBUSY;
3148 /* IT can be assigned to any context by register_iso_tasklet */
3149 d->ctx = ohci->it_legacy_tasklet.context;
3150 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3151 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3152 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3153 } else {
3154 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3155 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3156 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3157 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3160 return 0;
3163 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3165 struct ti_ohci *ohci = host->hostdata;
3167 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3168 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3170 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3174 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3175 quadlet_t data, quadlet_t compare)
3177 struct ti_ohci *ohci = host->hostdata;
3178 int i;
3180 reg_write(ohci, OHCI1394_CSRData, data);
3181 reg_write(ohci, OHCI1394_CSRCompareData, compare);
3182 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3184 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3185 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3186 break;
3188 mdelay(1);
3191 return reg_read(ohci, OHCI1394_CSRData);
3194 static struct hpsb_host_driver ohci1394_driver = {
3195 .owner = THIS_MODULE,
3196 .name = OHCI1394_DRIVER_NAME,
3197 .set_hw_config_rom = ohci_set_hw_config_rom,
3198 .transmit_packet = ohci_transmit,
3199 .devctl = ohci_devctl,
3200 .isoctl = ohci_isoctl,
3201 .hw_csr_reg = ohci_hw_csr_reg,
3206 /***********************************
3207 * PCI Driver Interface functions *
3208 ***********************************/
3210 #define FAIL(err, fmt, args...) \
3211 do { \
3212 PRINT_G(KERN_ERR, fmt , ## args); \
3213 ohci1394_pci_remove(dev); \
3214 return err; \
3215 } while (0)
3217 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3218 const struct pci_device_id *ent)
3220 static int version_printed = 0;
3222 struct hpsb_host *host;
3223 struct ti_ohci *ohci; /* shortcut to currently handled device */
3224 unsigned long ohci_base;
3226 if (version_printed++ == 0)
3227 PRINT_G(KERN_INFO, "%s", version);
3229 if (pci_enable_device(dev))
3230 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3231 pci_set_master(dev);
3233 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3234 if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3236 ohci = host->hostdata;
3237 ohci->dev = dev;
3238 ohci->host = host;
3239 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3240 host->pdev = dev;
3241 pci_set_drvdata(dev, ohci);
3243 /* We don't want hardware swapping */
3244 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3246 /* Some oddball Apple controllers do not order the selfid
3247 * properly, so we make up for it here. */
3248 #ifndef __LITTLE_ENDIAN
3249 /* XXX: Need a better way to check this. I'm wondering if we can
3250 * read the values of the OHCI1394_PCI_HCI_Control and the
3251 * noByteSwapData registers to see if they were not cleared to
3252 * zero. Should this work? Obviously it's not defined what these
3253 * registers will read when they aren't supported. Bleh! */
3254 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3255 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3256 ohci->no_swap_incoming = 1;
3257 ohci->selfid_swap = 0;
3258 } else
3259 ohci->selfid_swap = 1;
3260 #endif
3263 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3264 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3265 #endif
3267 /* These chipsets require a bit of extra care when checking after
3268 * a busreset. */
3269 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3270 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3271 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3272 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3273 ohci->check_busreset = 1;
3275 /* We hardwire the MMIO length, since some CardBus adaptors
3276 * fail to report the right length. Anyway, the ohci spec
3277 * clearly says it's 2kb, so this shouldn't be a problem. */
3278 ohci_base = pci_resource_start(dev, 0);
3279 if (pci_resource_len(dev, 0) != OHCI1394_REGISTER_SIZE)
3280 PRINT(KERN_WARNING, "Unexpected PCI resource length of %lx!",
3281 pci_resource_len(dev, 0));
3283 /* Seems PCMCIA handles this internally. Not sure why. Seems
3284 * pretty bogus to force a driver to special case this. */
3285 #ifndef PCMCIA
3286 if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3287 FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
3288 ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
3289 #endif
3290 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3292 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3293 if (ohci->registers == NULL)
3294 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3295 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3296 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3298 /* csr_config rom allocation */
3299 ohci->csr_config_rom_cpu =
3300 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3301 &ohci->csr_config_rom_bus);
3302 OHCI_DMA_ALLOC("consistent csr_config_rom");
3303 if (ohci->csr_config_rom_cpu == NULL)
3304 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3305 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3307 /* self-id dma buffer allocation */
3308 ohci->selfid_buf_cpu =
3309 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3310 &ohci->selfid_buf_bus);
3311 OHCI_DMA_ALLOC("consistent selfid_buf");
3313 if (ohci->selfid_buf_cpu == NULL)
3314 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3315 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3317 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3318 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3319 "8Kb boundary... may cause problems on some CXD3222 chip",
3320 ohci->selfid_buf_cpu);
3322 /* No self-id errors at startup */
3323 ohci->self_id_errors = 0;
3325 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3326 /* AR DMA request context allocation */
3327 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3328 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3329 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3330 OHCI1394_AsReqRcvContextBase) < 0)
3331 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3333 /* AR DMA response context allocation */
3334 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3335 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3336 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3337 OHCI1394_AsRspRcvContextBase) < 0)
3338 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3340 /* AT DMA request context */
3341 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3342 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3343 OHCI1394_AsReqTrContextBase) < 0)
3344 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3346 /* AT DMA response context */
3347 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3348 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3349 OHCI1394_AsRspTrContextBase) < 0)
3350 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3352 /* Start off with a soft reset, to clear everything to a sane
3353 * state. */
3354 ohci_soft_reset(ohci);
3356 /* Now enable LPS, which we need in order to start accessing
3357 * most of the registers. In fact, on some cards (ALI M5251),
3358 * accessing registers in the SClk domain without LPS enabled
3359 * will lock up the machine. Wait 50msec to make sure we have
3360 * full link enabled. */
3361 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3363 /* Disable and clear interrupts */
3364 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3365 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3367 mdelay(50);
3369 /* Determine the number of available IR and IT contexts. */
3370 ohci->nb_iso_rcv_ctx =
3371 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3372 DBGMSG("%d iso receive contexts available",
3373 ohci->nb_iso_rcv_ctx);
3375 ohci->nb_iso_xmit_ctx =
3376 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3377 DBGMSG("%d iso transmit contexts available",
3378 ohci->nb_iso_xmit_ctx);
3380 /* Set the usage bits for non-existent contexts so they can't
3381 * be allocated */
3382 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3383 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3385 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3386 spin_lock_init(&ohci->iso_tasklet_list_lock);
3387 ohci->ISO_channel_usage = 0;
3388 spin_lock_init(&ohci->IR_channel_lock);
3390 /* Allocate the IR DMA context right here so we don't have
3391 * to do it in interrupt path - note that this doesn't
3392 * waste much memory and avoids the jugglery required to
3393 * allocate it in IRQ path. */
3394 if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3395 DMA_CTX_ISO, 0, IR_NUM_DESC,
3396 IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3397 OHCI1394_IsoRcvContextBase) < 0) {
3398 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3401 /* We hopefully don't have to pre-allocate IT DMA like we did
3402 * for IR DMA above. Allocate it on-demand and mark inactive. */
3403 ohci->it_legacy_context.ohci = NULL;
3404 spin_lock_init(&ohci->event_lock);
3407 * interrupts are disabled, all right, but... due to SA_SHIRQ we
3408 * might get called anyway. We'll see no event, of course, but
3409 * we need to get to that "no event", so enough should be initialized
3410 * by that point.
3412 if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3413 OHCI1394_DRIVER_NAME, ohci))
3414 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3416 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3417 ohci_initialize(ohci);
3419 /* Set certain csr values */
3420 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3421 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3422 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3423 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3424 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3426 /* Tell the highlevel this host is ready */
3427 if (hpsb_add_host(host))
3428 FAIL(-ENOMEM, "Failed to register host with highlevel");
3430 ohci->init_state = OHCI_INIT_DONE;
3432 return 0;
3433 #undef FAIL
3436 static void ohci1394_pci_remove(struct pci_dev *pdev)
3438 struct ti_ohci *ohci;
3439 struct device *dev;
3441 ohci = pci_get_drvdata(pdev);
3442 if (!ohci)
3443 return;
3445 dev = get_device(&ohci->host->device);
3447 switch (ohci->init_state) {
3448 case OHCI_INIT_DONE:
3449 hpsb_remove_host(ohci->host);
3451 /* Clear out BUS Options */
3452 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3453 reg_write(ohci, OHCI1394_BusOptions,
3454 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3455 0x00ff0000);
3456 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3458 case OHCI_INIT_HAVE_IRQ:
3459 /* Clear interrupt registers */
3460 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3461 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3462 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3463 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3464 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3465 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3467 /* Disable IRM Contender */
3468 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3470 /* Clear link control register */
3471 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3473 /* Let all other nodes know to ignore us */
3474 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3476 /* Soft reset before we start - this disables
3477 * interrupts and clears linkEnable and LPS. */
3478 ohci_soft_reset(ohci);
3479 free_irq(ohci->dev->irq, ohci);
3481 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3482 /* The ohci_soft_reset() stops all DMA contexts, so we
3483 * dont need to do this. */
3484 /* Free AR dma */
3485 free_dma_rcv_ctx(&ohci->ar_req_context);
3486 free_dma_rcv_ctx(&ohci->ar_resp_context);
3488 /* Free AT dma */
3489 free_dma_trm_ctx(&ohci->at_req_context);
3490 free_dma_trm_ctx(&ohci->at_resp_context);
3492 /* Free IR dma */
3493 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3495 /* Free IT dma */
3496 free_dma_trm_ctx(&ohci->it_legacy_context);
3498 /* Free IR legacy dma */
3499 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3502 case OHCI_INIT_HAVE_SELFID_BUFFER:
3503 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3504 ohci->selfid_buf_cpu,
3505 ohci->selfid_buf_bus);
3506 OHCI_DMA_FREE("consistent selfid_buf");
3508 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3509 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3510 ohci->csr_config_rom_cpu,
3511 ohci->csr_config_rom_bus);
3512 OHCI_DMA_FREE("consistent csr_config_rom");
3514 case OHCI_INIT_HAVE_IOMAPPING:
3515 iounmap(ohci->registers);
3517 case OHCI_INIT_HAVE_MEM_REGION:
3518 #ifndef PCMCIA
3519 release_mem_region(pci_resource_start(ohci->dev, 0),
3520 OHCI1394_REGISTER_SIZE);
3521 #endif
3523 #ifdef CONFIG_PPC_PMAC
3524 /* On UniNorth, power down the cable and turn off the chip
3525 * clock when the module is removed to save power on
3526 * laptops. Turning it back ON is done by the arch code when
3527 * pci_enable_device() is called */
3529 struct device_node* of_node;
3531 of_node = pci_device_to_OF_node(ohci->dev);
3532 if (of_node) {
3533 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3534 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3537 #endif /* CONFIG_PPC_PMAC */
3539 case OHCI_INIT_ALLOC_HOST:
3540 pci_set_drvdata(ohci->dev, NULL);
3543 if (dev)
3544 put_device(dev);
3548 static int ohci1394_pci_resume (struct pci_dev *pdev)
3550 #ifdef CONFIG_PPC_PMAC
3551 if (_machine == _MACH_Pmac) {
3552 struct device_node *of_node;
3554 /* Re-enable 1394 */
3555 of_node = pci_device_to_OF_node (pdev);
3556 if (of_node)
3557 pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
3559 #endif /* CONFIG_PPC_PMAC */
3561 pci_enable_device(pdev);
3563 return 0;
3567 static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3569 #ifdef CONFIG_PPC_PMAC
3570 if (_machine == _MACH_Pmac) {
3571 struct device_node *of_node;
3573 /* Disable 1394 */
3574 of_node = pci_device_to_OF_node (pdev);
3575 if (of_node)
3576 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3578 #endif
3580 return 0;
3584 #define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3586 static struct pci_device_id ohci1394_pci_tbl[] = {
3588 .class = PCI_CLASS_FIREWIRE_OHCI,
3589 .class_mask = PCI_ANY_ID,
3590 .vendor = PCI_ANY_ID,
3591 .device = PCI_ANY_ID,
3592 .subvendor = PCI_ANY_ID,
3593 .subdevice = PCI_ANY_ID,
3595 { 0, },
3598 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3600 static struct pci_driver ohci1394_pci_driver = {
3601 .name = OHCI1394_DRIVER_NAME,
3602 .id_table = ohci1394_pci_tbl,
3603 .probe = ohci1394_pci_probe,
3604 .remove = ohci1394_pci_remove,
3605 .resume = ohci1394_pci_resume,
3606 .suspend = ohci1394_pci_suspend,
3611 /***********************************
3612 * OHCI1394 Video Interface *
3613 ***********************************/
3615 /* essentially the only purpose of this code is to allow another
3616 module to hook into ohci's interrupt handler */
3618 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3620 int i=0;
3622 /* stop the channel program if it's still running */
3623 reg_write(ohci, reg, 0x8000);
3625 /* Wait until it effectively stops */
3626 while (reg_read(ohci, reg) & 0x400) {
3627 i++;
3628 if (i>5000) {
3629 PRINT(KERN_ERR,
3630 "Runaway loop while stopping context: %s...", msg ? msg : "");
3631 return 1;
3634 mb();
3635 udelay(10);
3637 if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3638 return 0;
3641 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3642 void (*func)(unsigned long), unsigned long data)
3644 tasklet_init(&tasklet->tasklet, func, data);
3645 tasklet->type = type;
3646 /* We init the tasklet->link field, so we can list_del() it
3647 * without worrying whether it was added to the list or not. */
3648 INIT_LIST_HEAD(&tasklet->link);
3651 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3652 struct ohci1394_iso_tasklet *tasklet)
3654 unsigned long flags, *usage;
3655 int n, i, r = -EBUSY;
3657 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3658 n = ohci->nb_iso_xmit_ctx;
3659 usage = &ohci->it_ctx_usage;
3661 else {
3662 n = ohci->nb_iso_rcv_ctx;
3663 usage = &ohci->ir_ctx_usage;
3665 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3666 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3667 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3668 return r;
3673 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3675 for (i = 0; i < n; i++)
3676 if (!test_and_set_bit(i, usage)) {
3677 tasklet->context = i;
3678 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3679 r = 0;
3680 break;
3683 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3685 return r;
3688 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3689 struct ohci1394_iso_tasklet *tasklet)
3691 unsigned long flags;
3693 tasklet_kill(&tasklet->tasklet);
3695 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3697 if (tasklet->type == OHCI_ISO_TRANSMIT)
3698 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3699 else {
3700 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3702 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3703 clear_bit(0, &ohci->ir_multichannel_used);
3707 list_del(&tasklet->link);
3709 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3712 EXPORT_SYMBOL(ohci1394_stop_context);
3713 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3714 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3715 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3718 /***********************************
3719 * General module initialization *
3720 ***********************************/
3722 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3723 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3724 MODULE_LICENSE("GPL");
3726 static void __exit ohci1394_cleanup (void)
3728 pci_unregister_driver(&ohci1394_pci_driver);
3731 static int __init ohci1394_init(void)
3733 return pci_register_driver(&ohci1394_pci_driver);
3736 module_init(ohci1394_init);
3737 module_exit(ohci1394_cleanup);