[XFRM]: beet: fix IP option encapsulation
[linux-2.6.22.y-op.git] / drivers / ieee1394 / ohci1394.c
blob06fac0d21264a2b022ad1c554b52351e426b7261
1 /*
2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
28 * . Iso Receive
29 * . DMA mmap for iso receive
30 * . Config ROM generation
32 * Things implemented, but still in test phase:
33 * . Iso Transmit
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
36 * Things not implemented:
37 * . DMA error recovery
39 * Known bugs:
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
45 * Acknowledgments:
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
68 * Leon van Stuivenberg <leonvs@iae.nl>
69 * . Bug fixes
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
85 #include <linux/kernel.h>
86 #include <linux/list.h>
87 #include <linux/slab.h>
88 #include <linux/interrupt.h>
89 #include <linux/wait.h>
90 #include <linux/errno.h>
91 #include <linux/module.h>
92 #include <linux/moduleparam.h>
93 #include <linux/pci.h>
94 #include <linux/fs.h>
95 #include <linux/poll.h>
96 #include <asm/byteorder.h>
97 #include <asm/atomic.h>
98 #include <asm/uaccess.h>
99 #include <linux/delay.h>
100 #include <linux/spinlock.h>
102 #include <asm/pgtable.h>
103 #include <asm/page.h>
104 #include <asm/irq.h>
105 #include <linux/types.h>
106 #include <linux/vmalloc.h>
107 #include <linux/init.h>
109 #ifdef CONFIG_PPC_PMAC
110 #include <asm/machdep.h>
111 #include <asm/pmac_feature.h>
112 #include <asm/prom.h>
113 #include <asm/pci-bridge.h>
114 #endif
116 #include "csr1212.h"
117 #include "ieee1394.h"
118 #include "ieee1394_types.h"
119 #include "hosts.h"
120 #include "dma.h"
121 #include "iso.h"
122 #include "ieee1394_core.h"
123 #include "highlevel.h"
124 #include "ohci1394.h"
126 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
127 #define OHCI1394_DEBUG
128 #endif
130 #ifdef DBGMSG
131 #undef DBGMSG
132 #endif
134 #ifdef OHCI1394_DEBUG
135 #define DBGMSG(fmt, args...) \
136 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
137 #else
138 #define DBGMSG(fmt, args...) do {} while (0)
139 #endif
141 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
142 #define OHCI_DMA_ALLOC(fmt, args...) \
143 HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
144 ++global_outstanding_dmas, ## args)
145 #define OHCI_DMA_FREE(fmt, args...) \
146 HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
147 --global_outstanding_dmas, ## args)
148 static int global_outstanding_dmas = 0;
149 #else
150 #define OHCI_DMA_ALLOC(fmt, args...) do {} while (0)
151 #define OHCI_DMA_FREE(fmt, args...) do {} while (0)
152 #endif
154 /* print general (card independent) information */
155 #define PRINT_G(level, fmt, args...) \
156 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
158 /* print card specific information */
159 #define PRINT(level, fmt, args...) \
160 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
162 /* Module Parameters */
163 static int phys_dma = 1;
164 module_param(phys_dma, int, 0444);
165 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
167 static void dma_trm_tasklet(unsigned long data);
168 static void dma_trm_reset(struct dma_trm_ctx *d);
170 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
171 enum context_type type, int ctx, int num_desc,
172 int buf_size, int split_buf_size, int context_base);
173 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
174 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
176 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
177 enum context_type type, int ctx, int num_desc,
178 int context_base);
180 static void ohci1394_pci_remove(struct pci_dev *pdev);
182 #ifndef __LITTLE_ENDIAN
183 static const size_t hdr_sizes[] = {
184 3, /* TCODE_WRITEQ */
185 4, /* TCODE_WRITEB */
186 3, /* TCODE_WRITE_RESPONSE */
187 0, /* reserved */
188 3, /* TCODE_READQ */
189 4, /* TCODE_READB */
190 3, /* TCODE_READQ_RESPONSE */
191 4, /* TCODE_READB_RESPONSE */
192 1, /* TCODE_CYCLE_START */
193 4, /* TCODE_LOCK_REQUEST */
194 2, /* TCODE_ISO_DATA */
195 4, /* TCODE_LOCK_RESPONSE */
196 /* rest is reserved or link-internal */
199 static inline void header_le32_to_cpu(quadlet_t *data, unsigned char tcode)
201 size_t size;
203 if (unlikely(tcode >= ARRAY_SIZE(hdr_sizes)))
204 return;
206 size = hdr_sizes[tcode];
207 while (size--)
208 data[size] = le32_to_cpu(data[size]);
210 #else
211 #define header_le32_to_cpu(w,x) do {} while (0)
212 #endif /* !LITTLE_ENDIAN */
214 /***********************************
215 * IEEE-1394 functionality section *
216 ***********************************/
218 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
220 int i;
221 unsigned long flags;
222 quadlet_t r;
224 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
226 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
228 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
229 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
230 break;
232 mdelay(1);
235 r = reg_read(ohci, OHCI1394_PhyControl);
237 if (i >= OHCI_LOOP_COUNT)
238 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
239 r, r & 0x80000000, i);
241 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
243 return (r & 0x00ff0000) >> 16;
246 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
248 int i;
249 unsigned long flags;
250 u32 r = 0;
252 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
254 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
256 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
257 r = reg_read(ohci, OHCI1394_PhyControl);
258 if (!(r & 0x00004000))
259 break;
261 mdelay(1);
264 if (i == OHCI_LOOP_COUNT)
265 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
266 r, r & 0x00004000, i);
268 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
270 return;
273 /* Or's our value into the current value */
274 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
276 u8 old;
278 old = get_phy_reg (ohci, addr);
279 old |= data;
280 set_phy_reg (ohci, addr, old);
282 return;
285 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
286 int phyid, int isroot)
288 quadlet_t *q = ohci->selfid_buf_cpu;
289 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
290 size_t size;
291 quadlet_t q0, q1;
293 /* Check status of self-id reception */
295 if (ohci->selfid_swap)
296 q0 = le32_to_cpu(q[0]);
297 else
298 q0 = q[0];
300 if ((self_id_count & 0x80000000) ||
301 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
302 PRINT(KERN_ERR,
303 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
304 self_id_count, q0, ohci->self_id_errors);
306 /* Tip by James Goodwin <jamesg@Filanet.com>:
307 * We had an error, generate another bus reset in response. */
308 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
309 set_phy_reg_mask (ohci, 1, 0x40);
310 ohci->self_id_errors++;
311 } else {
312 PRINT(KERN_ERR,
313 "Too many errors on SelfID error reception, giving up!");
315 return;
318 /* SelfID Ok, reset error counter. */
319 ohci->self_id_errors = 0;
321 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
322 q++;
324 while (size > 0) {
325 if (ohci->selfid_swap) {
326 q0 = le32_to_cpu(q[0]);
327 q1 = le32_to_cpu(q[1]);
328 } else {
329 q0 = q[0];
330 q1 = q[1];
333 if (q0 == ~q1) {
334 DBGMSG ("SelfID packet 0x%x received", q0);
335 hpsb_selfid_received(host, cpu_to_be32(q0));
336 if (((q0 & 0x3f000000) >> 24) == phyid)
337 DBGMSG ("SelfID for this node is 0x%08x", q0);
338 } else {
339 PRINT(KERN_ERR,
340 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
342 q += 2;
343 size -= 2;
346 DBGMSG("SelfID complete");
348 return;
351 static void ohci_soft_reset(struct ti_ohci *ohci) {
352 int i;
354 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
356 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
357 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
358 break;
359 mdelay(1);
361 DBGMSG ("Soft reset finished");
365 /* Generate the dma receive prgs and start the context */
366 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
368 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
369 int i;
371 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
373 for (i=0; i<d->num_desc; i++) {
374 u32 c;
376 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
377 if (generate_irq)
378 c |= DMA_CTL_IRQ;
380 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
382 /* End of descriptor list? */
383 if (i + 1 < d->num_desc) {
384 d->prg_cpu[i]->branchAddress =
385 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
386 } else {
387 d->prg_cpu[i]->branchAddress =
388 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
391 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
392 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
395 d->buf_ind = 0;
396 d->buf_offset = 0;
398 if (d->type == DMA_CTX_ISO) {
399 /* Clear contextControl */
400 reg_write(ohci, d->ctrlClear, 0xffffffff);
402 /* Set bufferFill, isochHeader, multichannel for IR context */
403 reg_write(ohci, d->ctrlSet, 0xd0000000);
405 /* Set the context match register to match on all tags */
406 reg_write(ohci, d->ctxtMatch, 0xf0000000);
408 /* Clear the multi channel mask high and low registers */
409 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
410 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
412 /* Set up isoRecvIntMask to generate interrupts */
413 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
416 /* Tell the controller where the first AR program is */
417 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
419 /* Run context */
420 reg_write(ohci, d->ctrlSet, 0x00008000);
422 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
425 /* Initialize the dma transmit context */
426 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
428 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
430 /* Stop the context */
431 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
433 d->prg_ind = 0;
434 d->sent_ind = 0;
435 d->free_prgs = d->num_desc;
436 d->branchAddrPtr = NULL;
437 INIT_LIST_HEAD(&d->fifo_list);
438 INIT_LIST_HEAD(&d->pending_list);
440 if (d->type == DMA_CTX_ISO) {
441 /* enable interrupts */
442 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
445 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
448 /* Count the number of available iso contexts */
449 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
451 int i,ctx=0;
452 u32 tmp;
454 reg_write(ohci, reg, 0xffffffff);
455 tmp = reg_read(ohci, reg);
457 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
459 /* Count the number of contexts */
460 for (i=0; i<32; i++) {
461 if (tmp & 1) ctx++;
462 tmp >>= 1;
464 return ctx;
467 /* Global initialization */
468 static void ohci_initialize(struct ti_ohci *ohci)
470 quadlet_t buf;
471 int num_ports, i;
473 spin_lock_init(&ohci->phy_reg_lock);
475 /* Put some defaults to these undefined bus options */
476 buf = reg_read(ohci, OHCI1394_BusOptions);
477 buf |= 0x60000000; /* Enable CMC and ISC */
478 if (hpsb_disable_irm)
479 buf &= ~0x80000000;
480 else
481 buf |= 0x80000000; /* Enable IRMC */
482 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
483 buf &= ~0x18000000; /* Disable PMC and BMC */
484 reg_write(ohci, OHCI1394_BusOptions, buf);
486 /* Set the bus number */
487 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
489 /* Enable posted writes */
490 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
492 /* Clear link control register */
493 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
495 /* Enable cycle timer and cycle master and set the IRM
496 * contender bit in our self ID packets if appropriate. */
497 reg_write(ohci, OHCI1394_LinkControlSet,
498 OHCI1394_LinkControl_CycleTimerEnable |
499 OHCI1394_LinkControl_CycleMaster);
500 i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
501 if (hpsb_disable_irm)
502 i &= ~PHY_04_CONTENDER;
503 else
504 i |= PHY_04_CONTENDER;
505 set_phy_reg(ohci, 4, i);
507 /* Set up self-id dma buffer */
508 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
510 /* enable self-id and phys */
511 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
512 OHCI1394_LinkControl_RcvPhyPkt);
514 /* Set the Config ROM mapping register */
515 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
517 /* Now get our max packet size */
518 ohci->max_packet_size =
519 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
521 /* Don't accept phy packets into AR request context */
522 reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
524 /* Clear the interrupt mask */
525 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
526 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
528 /* Clear the interrupt mask */
529 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
530 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
532 /* Initialize AR dma */
533 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
534 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
536 /* Initialize AT dma */
537 initialize_dma_trm_ctx(&ohci->at_req_context);
538 initialize_dma_trm_ctx(&ohci->at_resp_context);
540 /* Initialize IR Legacy DMA channel mask */
541 ohci->ir_legacy_channels = 0;
543 /* Accept AR requests from all nodes */
544 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
546 /* Set the address range of the physical response unit.
547 * Most controllers do not implement it as a writable register though.
548 * They will keep a hardwired offset of 0x00010000 and show 0x0 as
549 * register content.
550 * To actually enable physical responses is the job of our interrupt
551 * handler which programs the physical request filter. */
552 reg_write(ohci, OHCI1394_PhyUpperBound,
553 OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED >> 16);
555 DBGMSG("physUpperBoundOffset=%08x",
556 reg_read(ohci, OHCI1394_PhyUpperBound));
558 /* Specify AT retries */
559 reg_write(ohci, OHCI1394_ATRetries,
560 OHCI1394_MAX_AT_REQ_RETRIES |
561 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
562 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
564 /* We don't want hardware swapping */
565 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
567 /* Enable interrupts */
568 reg_write(ohci, OHCI1394_IntMaskSet,
569 OHCI1394_unrecoverableError |
570 OHCI1394_masterIntEnable |
571 OHCI1394_busReset |
572 OHCI1394_selfIDComplete |
573 OHCI1394_RSPkt |
574 OHCI1394_RQPkt |
575 OHCI1394_respTxComplete |
576 OHCI1394_reqTxComplete |
577 OHCI1394_isochRx |
578 OHCI1394_isochTx |
579 OHCI1394_postedWriteErr |
580 OHCI1394_cycleTooLong |
581 OHCI1394_cycleInconsistent);
583 /* Enable link */
584 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
586 buf = reg_read(ohci, OHCI1394_Version);
587 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%d] "
588 "MMIO=[%llx-%llx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
589 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
590 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), ohci->dev->irq,
591 (unsigned long long)pci_resource_start(ohci->dev, 0),
592 (unsigned long long)pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
593 ohci->max_packet_size,
594 ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
596 /* Check all of our ports to make sure that if anything is
597 * connected, we enable that port. */
598 num_ports = get_phy_reg(ohci, 2) & 0xf;
599 for (i = 0; i < num_ports; i++) {
600 unsigned int status;
602 set_phy_reg(ohci, 7, i);
603 status = get_phy_reg(ohci, 8);
605 if (status & 0x20)
606 set_phy_reg(ohci, 8, status & ~1);
609 /* Serial EEPROM Sanity check. */
610 if ((ohci->max_packet_size < 512) ||
611 (ohci->max_packet_size > 4096)) {
612 /* Serial EEPROM contents are suspect, set a sane max packet
613 * size and print the raw contents for bug reports if verbose
614 * debug is enabled. */
615 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
616 int i;
617 #endif
619 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
620 "attempting to setting max_packet_size to 512 bytes");
621 reg_write(ohci, OHCI1394_BusOptions,
622 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
623 ohci->max_packet_size = 512;
624 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
625 PRINT(KERN_DEBUG, " EEPROM Present: %d",
626 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
627 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
629 for (i = 0;
630 ((i < 1000) &&
631 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
632 udelay(10);
634 for (i = 0; i < 0x20; i++) {
635 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
636 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
637 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
639 #endif
644 * Insert a packet in the DMA fifo and generate the DMA prg
645 * FIXME: rewrite the program in order to accept packets crossing
646 * page boundaries.
647 * check also that a single dma descriptor doesn't cross a
648 * page boundary.
650 static void insert_packet(struct ti_ohci *ohci,
651 struct dma_trm_ctx *d, struct hpsb_packet *packet)
653 u32 cycleTimer;
654 int idx = d->prg_ind;
656 DBGMSG("Inserting packet for node " NODE_BUS_FMT
657 ", tlabel=%d, tcode=0x%x, speed=%d",
658 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
659 packet->tcode, packet->speed_code);
661 d->prg_cpu[idx]->begin.address = 0;
662 d->prg_cpu[idx]->begin.branchAddress = 0;
664 if (d->type == DMA_CTX_ASYNC_RESP) {
666 * For response packets, we need to put a timeout value in
667 * the 16 lower bits of the status... let's try 1 sec timeout
669 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
670 d->prg_cpu[idx]->begin.status = cpu_to_le32(
671 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
672 ((cycleTimer&0x01fff000)>>12));
674 DBGMSG("cycleTimer: %08x timeStamp: %08x",
675 cycleTimer, d->prg_cpu[idx]->begin.status);
676 } else
677 d->prg_cpu[idx]->begin.status = 0;
679 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
681 if (packet->type == hpsb_raw) {
682 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
683 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
684 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
685 } else {
686 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
687 (packet->header[0] & 0xFFFF);
689 if (packet->tcode == TCODE_ISO_DATA) {
690 /* Sending an async stream packet */
691 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
692 } else {
693 /* Sending a normal async request or response */
694 d->prg_cpu[idx]->data[1] =
695 (packet->header[1] & 0xFFFF) |
696 (packet->header[0] & 0xFFFF0000);
697 d->prg_cpu[idx]->data[2] = packet->header[2];
698 d->prg_cpu[idx]->data[3] = packet->header[3];
700 header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
703 if (packet->data_size) { /* block transmit */
704 if (packet->tcode == TCODE_STREAM_DATA){
705 d->prg_cpu[idx]->begin.control =
706 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
707 DMA_CTL_IMMEDIATE | 0x8);
708 } else {
709 d->prg_cpu[idx]->begin.control =
710 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
711 DMA_CTL_IMMEDIATE | 0x10);
713 d->prg_cpu[idx]->end.control =
714 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
715 DMA_CTL_IRQ |
716 DMA_CTL_BRANCH |
717 packet->data_size);
719 * Check that the packet data buffer
720 * does not cross a page boundary.
722 * XXX Fix this some day. eth1394 seems to trigger
723 * it, but ignoring it doesn't seem to cause a
724 * problem.
726 #if 0
727 if (cross_bound((unsigned long)packet->data,
728 packet->data_size)>0) {
729 /* FIXME: do something about it */
730 PRINT(KERN_ERR,
731 "%s: packet data addr: %p size %Zd bytes "
732 "cross page boundary", __FUNCTION__,
733 packet->data, packet->data_size);
735 #endif
736 d->prg_cpu[idx]->end.address = cpu_to_le32(
737 pci_map_single(ohci->dev, packet->data,
738 packet->data_size,
739 PCI_DMA_TODEVICE));
740 OHCI_DMA_ALLOC("single, block transmit packet");
742 d->prg_cpu[idx]->end.branchAddress = 0;
743 d->prg_cpu[idx]->end.status = 0;
744 if (d->branchAddrPtr)
745 *(d->branchAddrPtr) =
746 cpu_to_le32(d->prg_bus[idx] | 0x3);
747 d->branchAddrPtr =
748 &(d->prg_cpu[idx]->end.branchAddress);
749 } else { /* quadlet transmit */
750 if (packet->type == hpsb_raw)
751 d->prg_cpu[idx]->begin.control =
752 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
753 DMA_CTL_IMMEDIATE |
754 DMA_CTL_IRQ |
755 DMA_CTL_BRANCH |
756 (packet->header_size + 4));
757 else
758 d->prg_cpu[idx]->begin.control =
759 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
760 DMA_CTL_IMMEDIATE |
761 DMA_CTL_IRQ |
762 DMA_CTL_BRANCH |
763 packet->header_size);
765 if (d->branchAddrPtr)
766 *(d->branchAddrPtr) =
767 cpu_to_le32(d->prg_bus[idx] | 0x2);
768 d->branchAddrPtr =
769 &(d->prg_cpu[idx]->begin.branchAddress);
772 } else { /* iso packet */
773 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
774 (packet->header[0] & 0xFFFF);
775 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
776 header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
778 d->prg_cpu[idx]->begin.control =
779 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
780 DMA_CTL_IMMEDIATE | 0x8);
781 d->prg_cpu[idx]->end.control =
782 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
783 DMA_CTL_UPDATE |
784 DMA_CTL_IRQ |
785 DMA_CTL_BRANCH |
786 packet->data_size);
787 d->prg_cpu[idx]->end.address = cpu_to_le32(
788 pci_map_single(ohci->dev, packet->data,
789 packet->data_size, PCI_DMA_TODEVICE));
790 OHCI_DMA_ALLOC("single, iso transmit packet");
792 d->prg_cpu[idx]->end.branchAddress = 0;
793 d->prg_cpu[idx]->end.status = 0;
794 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
795 " begin=%08x %08x %08x %08x\n"
796 " %08x %08x %08x %08x\n"
797 " end =%08x %08x %08x %08x",
798 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
799 d->prg_cpu[idx]->begin.control,
800 d->prg_cpu[idx]->begin.address,
801 d->prg_cpu[idx]->begin.branchAddress,
802 d->prg_cpu[idx]->begin.status,
803 d->prg_cpu[idx]->data[0],
804 d->prg_cpu[idx]->data[1],
805 d->prg_cpu[idx]->data[2],
806 d->prg_cpu[idx]->data[3],
807 d->prg_cpu[idx]->end.control,
808 d->prg_cpu[idx]->end.address,
809 d->prg_cpu[idx]->end.branchAddress,
810 d->prg_cpu[idx]->end.status);
811 if (d->branchAddrPtr)
812 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
813 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
815 d->free_prgs--;
817 /* queue the packet in the appropriate context queue */
818 list_add_tail(&packet->driver_list, &d->fifo_list);
819 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
823 * This function fills the FIFO with the (eventual) pending packets
824 * and runs or wakes up the DMA prg if necessary.
826 * The function MUST be called with the d->lock held.
828 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
830 struct hpsb_packet *packet, *ptmp;
831 int idx = d->prg_ind;
832 int z = 0;
834 /* insert the packets into the dma fifo */
835 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
836 if (!d->free_prgs)
837 break;
839 /* For the first packet only */
840 if (!z)
841 z = (packet->data_size) ? 3 : 2;
843 /* Insert the packet */
844 list_del_init(&packet->driver_list);
845 insert_packet(ohci, d, packet);
848 /* Nothing must have been done, either no free_prgs or no packets */
849 if (z == 0)
850 return;
852 /* Is the context running ? (should be unless it is
853 the first packet to be sent in this context) */
854 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
855 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
857 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
858 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
860 /* Check that the node id is valid, and not 63 */
861 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
862 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
863 else
864 reg_write(ohci, d->ctrlSet, 0x8000);
865 } else {
866 /* Wake up the dma context if necessary */
867 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
868 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
870 /* do this always, to avoid race condition */
871 reg_write(ohci, d->ctrlSet, 0x1000);
874 return;
877 /* Transmission of an async or iso packet */
878 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
880 struct ti_ohci *ohci = host->hostdata;
881 struct dma_trm_ctx *d;
882 unsigned long flags;
884 if (packet->data_size > ohci->max_packet_size) {
885 PRINT(KERN_ERR,
886 "Transmit packet size %Zd is too big",
887 packet->data_size);
888 return -EOVERFLOW;
891 /* Decide whether we have an iso, a request, or a response packet */
892 if (packet->type == hpsb_raw)
893 d = &ohci->at_req_context;
894 else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
895 /* The legacy IT DMA context is initialized on first
896 * use. However, the alloc cannot be run from
897 * interrupt context, so we bail out if that is the
898 * case. I don't see anyone sending ISO packets from
899 * interrupt context anyway... */
901 if (ohci->it_legacy_context.ohci == NULL) {
902 if (in_interrupt()) {
903 PRINT(KERN_ERR,
904 "legacy IT context cannot be initialized during interrupt");
905 return -EINVAL;
908 if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
909 DMA_CTX_ISO, 0, IT_NUM_DESC,
910 OHCI1394_IsoXmitContextBase) < 0) {
911 PRINT(KERN_ERR,
912 "error initializing legacy IT context");
913 return -ENOMEM;
916 initialize_dma_trm_ctx(&ohci->it_legacy_context);
919 d = &ohci->it_legacy_context;
920 } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
921 d = &ohci->at_resp_context;
922 else
923 d = &ohci->at_req_context;
925 spin_lock_irqsave(&d->lock,flags);
927 list_add_tail(&packet->driver_list, &d->pending_list);
929 dma_trm_flush(ohci, d);
931 spin_unlock_irqrestore(&d->lock,flags);
933 return 0;
936 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
938 struct ti_ohci *ohci = host->hostdata;
939 int retval = 0;
940 unsigned long flags;
941 int phy_reg;
943 switch (cmd) {
944 case RESET_BUS:
945 switch (arg) {
946 case SHORT_RESET:
947 phy_reg = get_phy_reg(ohci, 5);
948 phy_reg |= 0x40;
949 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
950 break;
951 case LONG_RESET:
952 phy_reg = get_phy_reg(ohci, 1);
953 phy_reg |= 0x40;
954 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
955 break;
956 case SHORT_RESET_NO_FORCE_ROOT:
957 phy_reg = get_phy_reg(ohci, 1);
958 if (phy_reg & 0x80) {
959 phy_reg &= ~0x80;
960 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
963 phy_reg = get_phy_reg(ohci, 5);
964 phy_reg |= 0x40;
965 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
966 break;
967 case LONG_RESET_NO_FORCE_ROOT:
968 phy_reg = get_phy_reg(ohci, 1);
969 phy_reg &= ~0x80;
970 phy_reg |= 0x40;
971 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
972 break;
973 case SHORT_RESET_FORCE_ROOT:
974 phy_reg = get_phy_reg(ohci, 1);
975 if (!(phy_reg & 0x80)) {
976 phy_reg |= 0x80;
977 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
980 phy_reg = get_phy_reg(ohci, 5);
981 phy_reg |= 0x40;
982 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
983 break;
984 case LONG_RESET_FORCE_ROOT:
985 phy_reg = get_phy_reg(ohci, 1);
986 phy_reg |= 0xc0;
987 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
988 break;
989 default:
990 retval = -1;
992 break;
994 case GET_CYCLE_COUNTER:
995 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
996 break;
998 case SET_CYCLE_COUNTER:
999 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
1000 break;
1002 case SET_BUS_ID:
1003 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
1004 break;
1006 case ACT_CYCLE_MASTER:
1007 if (arg) {
1008 /* check if we are root and other nodes are present */
1009 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1010 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1012 * enable cycleTimer, cycleMaster
1014 DBGMSG("Cycle master enabled");
1015 reg_write(ohci, OHCI1394_LinkControlSet,
1016 OHCI1394_LinkControl_CycleTimerEnable |
1017 OHCI1394_LinkControl_CycleMaster);
1019 } else {
1020 /* disable cycleTimer, cycleMaster, cycleSource */
1021 reg_write(ohci, OHCI1394_LinkControlClear,
1022 OHCI1394_LinkControl_CycleTimerEnable |
1023 OHCI1394_LinkControl_CycleMaster |
1024 OHCI1394_LinkControl_CycleSource);
1026 break;
1028 case CANCEL_REQUESTS:
1029 DBGMSG("Cancel request received");
1030 dma_trm_reset(&ohci->at_req_context);
1031 dma_trm_reset(&ohci->at_resp_context);
1032 break;
1034 case ISO_LISTEN_CHANNEL:
1036 u64 mask;
1037 struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1038 int ir_legacy_active;
1040 if (arg<0 || arg>63) {
1041 PRINT(KERN_ERR,
1042 "%s: IS0 listen channel %d is out of range",
1043 __FUNCTION__, arg);
1044 return -EFAULT;
1047 mask = (u64)0x1<<arg;
1049 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1051 if (ohci->ISO_channel_usage & mask) {
1052 PRINT(KERN_ERR,
1053 "%s: IS0 listen channel %d is already used",
1054 __FUNCTION__, arg);
1055 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1056 return -EFAULT;
1059 ir_legacy_active = ohci->ir_legacy_channels;
1061 ohci->ISO_channel_usage |= mask;
1062 ohci->ir_legacy_channels |= mask;
1064 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1066 if (!ir_legacy_active) {
1067 if (ohci1394_register_iso_tasklet(ohci,
1068 &ohci->ir_legacy_tasklet) < 0) {
1069 PRINT(KERN_ERR, "No IR DMA context available");
1070 return -EBUSY;
1073 /* the IR context can be assigned to any DMA context
1074 * by ohci1394_register_iso_tasklet */
1075 d->ctx = ohci->ir_legacy_tasklet.context;
1076 d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1077 32*d->ctx;
1078 d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1079 32*d->ctx;
1080 d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1081 d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1083 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1085 if (printk_ratelimit())
1086 DBGMSG("IR legacy activated");
1089 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1091 if (arg>31)
1092 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1093 1<<(arg-32));
1094 else
1095 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1096 1<<arg);
1098 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1099 DBGMSG("Listening enabled on channel %d", arg);
1100 break;
1102 case ISO_UNLISTEN_CHANNEL:
1104 u64 mask;
1106 if (arg<0 || arg>63) {
1107 PRINT(KERN_ERR,
1108 "%s: IS0 unlisten channel %d is out of range",
1109 __FUNCTION__, arg);
1110 return -EFAULT;
1113 mask = (u64)0x1<<arg;
1115 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1117 if (!(ohci->ISO_channel_usage & mask)) {
1118 PRINT(KERN_ERR,
1119 "%s: IS0 unlisten channel %d is not used",
1120 __FUNCTION__, arg);
1121 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1122 return -EFAULT;
1125 ohci->ISO_channel_usage &= ~mask;
1126 ohci->ir_legacy_channels &= ~mask;
1128 if (arg>31)
1129 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1130 1<<(arg-32));
1131 else
1132 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1133 1<<arg);
1135 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1136 DBGMSG("Listening disabled on channel %d", arg);
1138 if (ohci->ir_legacy_channels == 0) {
1139 stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1140 DBGMSG("ISO legacy receive context stopped");
1143 break;
1145 default:
1146 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1147 cmd);
1148 break;
1150 return retval;
1153 /***********************************
1154 * rawiso ISO reception *
1155 ***********************************/
1158 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1159 buffer is split into "blocks" (regions described by one DMA
1160 descriptor). Each block must be one page or less in size, and
1161 must not cross a page boundary.
1163 There is one little wrinkle with buffer-fill mode: a packet that
1164 starts in the final block may wrap around into the first block. But
1165 the user API expects all packets to be contiguous. Our solution is
1166 to keep the very last page of the DMA buffer in reserve - if a
1167 packet spans the gap, we copy its tail into this page.
1170 struct ohci_iso_recv {
1171 struct ti_ohci *ohci;
1173 struct ohci1394_iso_tasklet task;
1174 int task_active;
1176 enum { BUFFER_FILL_MODE = 0,
1177 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1179 /* memory and PCI mapping for the DMA descriptors */
1180 struct dma_prog_region prog;
1181 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1183 /* how many DMA blocks fit in the buffer */
1184 unsigned int nblocks;
1186 /* stride of DMA blocks */
1187 unsigned int buf_stride;
1189 /* number of blocks to batch between interrupts */
1190 int block_irq_interval;
1192 /* block that DMA will finish next */
1193 int block_dma;
1195 /* (buffer-fill only) block that the reader will release next */
1196 int block_reader;
1198 /* (buffer-fill only) bytes of buffer the reader has released,
1199 less than one block */
1200 int released_bytes;
1202 /* (buffer-fill only) buffer offset at which the next packet will appear */
1203 int dma_offset;
1205 /* OHCI DMA context control registers */
1206 u32 ContextControlSet;
1207 u32 ContextControlClear;
1208 u32 CommandPtr;
1209 u32 ContextMatch;
1212 static void ohci_iso_recv_task(unsigned long data);
1213 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1214 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1215 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1216 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1218 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1220 struct ti_ohci *ohci = iso->host->hostdata;
1221 struct ohci_iso_recv *recv;
1222 int ctx;
1223 int ret = -ENOMEM;
1225 recv = kmalloc(sizeof(*recv), GFP_KERNEL);
1226 if (!recv)
1227 return -ENOMEM;
1229 iso->hostdata = recv;
1230 recv->ohci = ohci;
1231 recv->task_active = 0;
1232 dma_prog_region_init(&recv->prog);
1233 recv->block = NULL;
1235 /* use buffer-fill mode, unless irq_interval is 1
1236 (note: multichannel requires buffer-fill) */
1238 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1239 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1240 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1241 } else {
1242 recv->dma_mode = BUFFER_FILL_MODE;
1245 /* set nblocks, buf_stride, block_irq_interval */
1247 if (recv->dma_mode == BUFFER_FILL_MODE) {
1248 recv->buf_stride = PAGE_SIZE;
1250 /* one block per page of data in the DMA buffer, minus the final guard page */
1251 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1252 if (recv->nblocks < 3) {
1253 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1254 goto err;
1257 /* iso->irq_interval is in packets - translate that to blocks */
1258 if (iso->irq_interval == 1)
1259 recv->block_irq_interval = 1;
1260 else
1261 recv->block_irq_interval = iso->irq_interval *
1262 ((recv->nblocks+1)/iso->buf_packets);
1263 if (recv->block_irq_interval*4 > recv->nblocks)
1264 recv->block_irq_interval = recv->nblocks/4;
1265 if (recv->block_irq_interval < 1)
1266 recv->block_irq_interval = 1;
1268 } else {
1269 int max_packet_size;
1271 recv->nblocks = iso->buf_packets;
1272 recv->block_irq_interval = iso->irq_interval;
1273 if (recv->block_irq_interval * 4 > iso->buf_packets)
1274 recv->block_irq_interval = iso->buf_packets / 4;
1275 if (recv->block_irq_interval < 1)
1276 recv->block_irq_interval = 1;
1278 /* choose a buffer stride */
1279 /* must be a power of 2, and <= PAGE_SIZE */
1281 max_packet_size = iso->buf_size / iso->buf_packets;
1283 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1284 recv->buf_stride *= 2);
1286 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1287 recv->buf_stride > PAGE_SIZE) {
1288 /* this shouldn't happen, but anyway... */
1289 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1290 goto err;
1294 recv->block_reader = 0;
1295 recv->released_bytes = 0;
1296 recv->block_dma = 0;
1297 recv->dma_offset = 0;
1299 /* size of DMA program = one descriptor per block */
1300 if (dma_prog_region_alloc(&recv->prog,
1301 sizeof(struct dma_cmd) * recv->nblocks,
1302 recv->ohci->dev))
1303 goto err;
1305 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1307 ohci1394_init_iso_tasklet(&recv->task,
1308 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1309 OHCI_ISO_RECEIVE,
1310 ohci_iso_recv_task, (unsigned long) iso);
1312 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1313 ret = -EBUSY;
1314 goto err;
1317 recv->task_active = 1;
1319 /* recv context registers are spaced 32 bytes apart */
1320 ctx = recv->task.context;
1321 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1322 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1323 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1324 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1326 if (iso->channel == -1) {
1327 /* clear multi-channel selection mask */
1328 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1329 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1332 /* write the DMA program */
1333 ohci_iso_recv_program(iso);
1335 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1336 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1337 recv->dma_mode == BUFFER_FILL_MODE ?
1338 "buffer-fill" : "packet-per-buffer",
1339 iso->buf_size/PAGE_SIZE, iso->buf_size,
1340 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1342 return 0;
1344 err:
1345 ohci_iso_recv_shutdown(iso);
1346 return ret;
1349 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1351 struct ohci_iso_recv *recv = iso->hostdata;
1353 /* disable interrupts */
1354 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1356 /* halt DMA */
1357 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1360 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1362 struct ohci_iso_recv *recv = iso->hostdata;
1364 if (recv->task_active) {
1365 ohci_iso_recv_stop(iso);
1366 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1367 recv->task_active = 0;
1370 dma_prog_region_free(&recv->prog);
1371 kfree(recv);
1372 iso->hostdata = NULL;
1375 /* set up a "gapped" ring buffer DMA program */
1376 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1378 struct ohci_iso_recv *recv = iso->hostdata;
1379 int blk;
1381 /* address of 'branch' field in previous DMA descriptor */
1382 u32 *prev_branch = NULL;
1384 for (blk = 0; blk < recv->nblocks; blk++) {
1385 u32 control;
1387 /* the DMA descriptor */
1388 struct dma_cmd *cmd = &recv->block[blk];
1390 /* offset of the DMA descriptor relative to the DMA prog buffer */
1391 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1393 /* offset of this packet's data within the DMA buffer */
1394 unsigned long buf_offset = blk * recv->buf_stride;
1396 if (recv->dma_mode == BUFFER_FILL_MODE) {
1397 control = 2 << 28; /* INPUT_MORE */
1398 } else {
1399 control = 3 << 28; /* INPUT_LAST */
1402 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1404 /* interrupt on last block, and at intervals */
1405 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1406 control |= 3 << 20; /* want interrupt */
1409 control |= 3 << 18; /* enable branch to address */
1410 control |= recv->buf_stride;
1412 cmd->control = cpu_to_le32(control);
1413 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1414 cmd->branchAddress = 0; /* filled in on next loop */
1415 cmd->status = cpu_to_le32(recv->buf_stride);
1417 /* link the previous descriptor to this one */
1418 if (prev_branch) {
1419 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1422 prev_branch = &cmd->branchAddress;
1425 /* the final descriptor's branch address and Z should be left at 0 */
1428 /* listen or unlisten to a specific channel (multi-channel mode only) */
1429 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1431 struct ohci_iso_recv *recv = iso->hostdata;
1432 int reg, i;
1434 if (channel < 32) {
1435 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1436 i = channel;
1437 } else {
1438 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1439 i = channel - 32;
1442 reg_write(recv->ohci, reg, (1 << i));
1444 /* issue a dummy read to force all PCI writes to be posted immediately */
1445 mb();
1446 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1449 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1451 struct ohci_iso_recv *recv = iso->hostdata;
1452 int i;
1454 for (i = 0; i < 64; i++) {
1455 if (mask & (1ULL << i)) {
1456 if (i < 32)
1457 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1458 else
1459 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1460 } else {
1461 if (i < 32)
1462 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1463 else
1464 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1468 /* issue a dummy read to force all PCI writes to be posted immediately */
1469 mb();
1470 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1473 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1475 struct ohci_iso_recv *recv = iso->hostdata;
1476 struct ti_ohci *ohci = recv->ohci;
1477 u32 command, contextMatch;
1479 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1480 wmb();
1482 /* always keep ISO headers */
1483 command = (1 << 30);
1485 if (recv->dma_mode == BUFFER_FILL_MODE)
1486 command |= (1 << 31);
1488 reg_write(recv->ohci, recv->ContextControlSet, command);
1490 /* match on specified tags */
1491 contextMatch = tag_mask << 28;
1493 if (iso->channel == -1) {
1494 /* enable multichannel reception */
1495 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1496 } else {
1497 /* listen on channel */
1498 contextMatch |= iso->channel;
1501 if (cycle != -1) {
1502 u32 seconds;
1504 /* enable cycleMatch */
1505 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1507 /* set starting cycle */
1508 cycle &= 0x1FFF;
1510 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1511 just snarf them from the current time */
1512 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1514 /* advance one second to give some extra time for DMA to start */
1515 seconds += 1;
1517 cycle |= (seconds & 3) << 13;
1519 contextMatch |= cycle << 12;
1522 if (sync != -1) {
1523 /* set sync flag on first DMA descriptor */
1524 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1525 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1527 /* match sync field */
1528 contextMatch |= (sync&0xf)<<8;
1531 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1533 /* address of first descriptor block */
1534 command = dma_prog_region_offset_to_bus(&recv->prog,
1535 recv->block_dma * sizeof(struct dma_cmd));
1536 command |= 1; /* Z=1 */
1538 reg_write(recv->ohci, recv->CommandPtr, command);
1540 /* enable interrupts */
1541 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1543 wmb();
1545 /* run */
1546 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1548 /* issue a dummy read of the cycle timer register to force
1549 all PCI writes to be posted immediately */
1550 mb();
1551 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1553 /* check RUN */
1554 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1555 PRINT(KERN_ERR,
1556 "Error starting IR DMA (ContextControl 0x%08x)\n",
1557 reg_read(recv->ohci, recv->ContextControlSet));
1558 return -1;
1561 return 0;
1564 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1566 /* re-use the DMA descriptor for the block */
1567 /* by linking the previous descriptor to it */
1569 int next_i = block;
1570 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1572 struct dma_cmd *next = &recv->block[next_i];
1573 struct dma_cmd *prev = &recv->block[prev_i];
1575 /* ignore out-of-range requests */
1576 if ((block < 0) || (block > recv->nblocks))
1577 return;
1579 /* 'next' becomes the new end of the DMA chain,
1580 so disable branch and enable interrupt */
1581 next->branchAddress = 0;
1582 next->control |= cpu_to_le32(3 << 20);
1583 next->status = cpu_to_le32(recv->buf_stride);
1585 /* link prev to next */
1586 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1587 sizeof(struct dma_cmd) * next_i)
1588 | 1); /* Z=1 */
1590 /* disable interrupt on previous DMA descriptor, except at intervals */
1591 if ((prev_i % recv->block_irq_interval) == 0) {
1592 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1593 } else {
1594 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1596 wmb();
1598 /* wake up DMA in case it fell asleep */
1599 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1602 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1603 struct hpsb_iso_packet_info *info)
1605 /* release the memory where the packet was */
1606 recv->released_bytes += info->total_len;
1608 /* have we released enough memory for one block? */
1609 while (recv->released_bytes > recv->buf_stride) {
1610 ohci_iso_recv_release_block(recv, recv->block_reader);
1611 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1612 recv->released_bytes -= recv->buf_stride;
1616 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1618 struct ohci_iso_recv *recv = iso->hostdata;
1619 if (recv->dma_mode == BUFFER_FILL_MODE) {
1620 ohci_iso_recv_bufferfill_release(recv, info);
1621 } else {
1622 ohci_iso_recv_release_block(recv, info - iso->infos);
1626 /* parse all packets from blocks that have been fully received */
1627 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1629 int wake = 0;
1630 int runaway = 0;
1631 struct ti_ohci *ohci = recv->ohci;
1633 while (1) {
1634 /* we expect the next parsable packet to begin at recv->dma_offset */
1635 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1637 unsigned int offset;
1638 unsigned short len, cycle, total_len;
1639 unsigned char channel, tag, sy;
1641 unsigned char *p = iso->data_buf.kvirt;
1643 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1645 /* don't loop indefinitely */
1646 if (runaway++ > 100000) {
1647 atomic_inc(&iso->overflows);
1648 PRINT(KERN_ERR,
1649 "IR DMA error - Runaway during buffer parsing!\n");
1650 break;
1653 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1654 if (this_block == recv->block_dma)
1655 break;
1657 wake = 1;
1659 /* parse data length, tag, channel, and sy */
1661 /* note: we keep our own local copies of 'len' and 'offset'
1662 so the user can't mess with them by poking in the mmap area */
1664 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1666 if (len > 4096) {
1667 PRINT(KERN_ERR,
1668 "IR DMA error - bogus 'len' value %u\n", len);
1671 channel = p[recv->dma_offset+1] & 0x3F;
1672 tag = p[recv->dma_offset+1] >> 6;
1673 sy = p[recv->dma_offset+0] & 0xF;
1675 /* advance to data payload */
1676 recv->dma_offset += 4;
1678 /* check for wrap-around */
1679 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1680 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1683 /* dma_offset now points to the first byte of the data payload */
1684 offset = recv->dma_offset;
1686 /* advance to xferStatus/timeStamp */
1687 recv->dma_offset += len;
1689 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1690 /* payload is padded to 4 bytes */
1691 if (len % 4) {
1692 recv->dma_offset += 4 - (len%4);
1693 total_len += 4 - (len%4);
1696 /* check for wrap-around */
1697 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1698 /* uh oh, the packet data wraps from the last
1699 to the first DMA block - make the packet
1700 contiguous by copying its "tail" into the
1701 guard page */
1703 int guard_off = recv->buf_stride*recv->nblocks;
1704 int tail_len = len - (guard_off - offset);
1706 if (tail_len > 0 && tail_len < recv->buf_stride) {
1707 memcpy(iso->data_buf.kvirt + guard_off,
1708 iso->data_buf.kvirt,
1709 tail_len);
1712 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1715 /* parse timestamp */
1716 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1717 cycle &= 0x1FFF;
1719 /* advance to next packet */
1720 recv->dma_offset += 4;
1722 /* check for wrap-around */
1723 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1724 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1727 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1730 if (wake)
1731 hpsb_iso_wake(iso);
1734 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1736 int loop;
1737 struct ti_ohci *ohci = recv->ohci;
1739 /* loop over all blocks */
1740 for (loop = 0; loop < recv->nblocks; loop++) {
1742 /* check block_dma to see if it's done */
1743 struct dma_cmd *im = &recv->block[recv->block_dma];
1745 /* check the DMA descriptor for new writes to xferStatus */
1746 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1748 /* rescount is the number of bytes *remaining to be written* in the block */
1749 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1751 unsigned char event = xferstatus & 0x1F;
1753 if (!event) {
1754 /* nothing has happened to this block yet */
1755 break;
1758 if (event != 0x11) {
1759 atomic_inc(&iso->overflows);
1760 PRINT(KERN_ERR,
1761 "IR DMA error - OHCI error code 0x%02x\n", event);
1764 if (rescount != 0) {
1765 /* the card is still writing to this block;
1766 we can't touch it until it's done */
1767 break;
1770 /* OK, the block is finished... */
1772 /* sync our view of the block */
1773 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1775 /* reset the DMA descriptor */
1776 im->status = recv->buf_stride;
1778 /* advance block_dma */
1779 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1781 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1782 atomic_inc(&iso->overflows);
1783 DBGMSG("ISO reception overflow - "
1784 "ran out of DMA blocks");
1788 /* parse any packets that have arrived */
1789 ohci_iso_recv_bufferfill_parse(iso, recv);
1792 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1794 int count;
1795 int wake = 0;
1796 struct ti_ohci *ohci = recv->ohci;
1798 /* loop over the entire buffer */
1799 for (count = 0; count < recv->nblocks; count++) {
1800 u32 packet_len = 0;
1802 /* pointer to the DMA descriptor */
1803 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1805 /* check the DMA descriptor for new writes to xferStatus */
1806 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1807 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1809 unsigned char event = xferstatus & 0x1F;
1811 if (!event) {
1812 /* this packet hasn't come in yet; we are done for now */
1813 goto out;
1816 if (event == 0x11) {
1817 /* packet received successfully! */
1819 /* rescount is the number of bytes *remaining* in the packet buffer,
1820 after the packet was written */
1821 packet_len = recv->buf_stride - rescount;
1823 } else if (event == 0x02) {
1824 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1825 } else if (event) {
1826 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1829 /* sync our view of the buffer */
1830 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1832 /* record the per-packet info */
1834 /* iso header is 8 bytes ahead of the data payload */
1835 unsigned char *hdr;
1837 unsigned int offset;
1838 unsigned short cycle;
1839 unsigned char channel, tag, sy;
1841 offset = iso->pkt_dma * recv->buf_stride;
1842 hdr = iso->data_buf.kvirt + offset;
1844 /* skip iso header */
1845 offset += 8;
1846 packet_len -= 8;
1848 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1849 channel = hdr[5] & 0x3F;
1850 tag = hdr[5] >> 6;
1851 sy = hdr[4] & 0xF;
1853 hpsb_iso_packet_received(iso, offset, packet_len,
1854 recv->buf_stride, cycle, channel, tag, sy);
1857 /* reset the DMA descriptor */
1858 il->status = recv->buf_stride;
1860 wake = 1;
1861 recv->block_dma = iso->pkt_dma;
1864 out:
1865 if (wake)
1866 hpsb_iso_wake(iso);
1869 static void ohci_iso_recv_task(unsigned long data)
1871 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1872 struct ohci_iso_recv *recv = iso->hostdata;
1874 if (recv->dma_mode == BUFFER_FILL_MODE)
1875 ohci_iso_recv_bufferfill_task(iso, recv);
1876 else
1877 ohci_iso_recv_packetperbuf_task(iso, recv);
1880 /***********************************
1881 * rawiso ISO transmission *
1882 ***********************************/
1884 struct ohci_iso_xmit {
1885 struct ti_ohci *ohci;
1886 struct dma_prog_region prog;
1887 struct ohci1394_iso_tasklet task;
1888 int task_active;
1890 u32 ContextControlSet;
1891 u32 ContextControlClear;
1892 u32 CommandPtr;
1895 /* transmission DMA program:
1896 one OUTPUT_MORE_IMMEDIATE for the IT header
1897 one OUTPUT_LAST for the buffer data */
1899 struct iso_xmit_cmd {
1900 struct dma_cmd output_more_immediate;
1901 u8 iso_hdr[8];
1902 u32 unused[2];
1903 struct dma_cmd output_last;
1906 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1907 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1908 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1909 static void ohci_iso_xmit_task(unsigned long data);
1911 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1913 struct ohci_iso_xmit *xmit;
1914 unsigned int prog_size;
1915 int ctx;
1916 int ret = -ENOMEM;
1918 xmit = kmalloc(sizeof(*xmit), GFP_KERNEL);
1919 if (!xmit)
1920 return -ENOMEM;
1922 iso->hostdata = xmit;
1923 xmit->ohci = iso->host->hostdata;
1924 xmit->task_active = 0;
1926 dma_prog_region_init(&xmit->prog);
1928 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1930 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1931 goto err;
1933 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1934 ohci_iso_xmit_task, (unsigned long) iso);
1936 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1937 ret = -EBUSY;
1938 goto err;
1941 xmit->task_active = 1;
1943 /* xmit context registers are spaced 16 bytes apart */
1944 ctx = xmit->task.context;
1945 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1946 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1947 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1949 return 0;
1951 err:
1952 ohci_iso_xmit_shutdown(iso);
1953 return ret;
1956 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1958 struct ohci_iso_xmit *xmit = iso->hostdata;
1959 struct ti_ohci *ohci = xmit->ohci;
1961 /* disable interrupts */
1962 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1964 /* halt DMA */
1965 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1966 /* XXX the DMA context will lock up if you try to send too much data! */
1967 PRINT(KERN_ERR,
1968 "you probably exceeded the OHCI card's bandwidth limit - "
1969 "reload the module and reduce xmit bandwidth");
1973 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1975 struct ohci_iso_xmit *xmit = iso->hostdata;
1977 if (xmit->task_active) {
1978 ohci_iso_xmit_stop(iso);
1979 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1980 xmit->task_active = 0;
1983 dma_prog_region_free(&xmit->prog);
1984 kfree(xmit);
1985 iso->hostdata = NULL;
1988 static void ohci_iso_xmit_task(unsigned long data)
1990 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1991 struct ohci_iso_xmit *xmit = iso->hostdata;
1992 struct ti_ohci *ohci = xmit->ohci;
1993 int wake = 0;
1994 int count;
1996 /* check the whole buffer if necessary, starting at pkt_dma */
1997 for (count = 0; count < iso->buf_packets; count++) {
1998 int cycle;
2000 /* DMA descriptor */
2001 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
2003 /* check for new writes to xferStatus */
2004 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
2005 u8 event = xferstatus & 0x1F;
2007 if (!event) {
2008 /* packet hasn't been sent yet; we are done for now */
2009 break;
2012 if (event != 0x11)
2013 PRINT(KERN_ERR,
2014 "IT DMA error - OHCI error code 0x%02x\n", event);
2016 /* at least one packet went out, so wake up the writer */
2017 wake = 1;
2019 /* parse cycle */
2020 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
2022 /* tell the subsystem the packet has gone out */
2023 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
2025 /* reset the DMA descriptor for next time */
2026 cmd->output_last.status = 0;
2029 if (wake)
2030 hpsb_iso_wake(iso);
2033 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2035 struct ohci_iso_xmit *xmit = iso->hostdata;
2036 struct ti_ohci *ohci = xmit->ohci;
2038 int next_i, prev_i;
2039 struct iso_xmit_cmd *next, *prev;
2041 unsigned int offset;
2042 unsigned short len;
2043 unsigned char tag, sy;
2045 /* check that the packet doesn't cross a page boundary
2046 (we could allow this if we added OUTPUT_MORE descriptor support) */
2047 if (cross_bound(info->offset, info->len)) {
2048 PRINT(KERN_ERR,
2049 "rawiso xmit: packet %u crosses a page boundary",
2050 iso->first_packet);
2051 return -EINVAL;
2054 offset = info->offset;
2055 len = info->len;
2056 tag = info->tag;
2057 sy = info->sy;
2059 /* sync up the card's view of the buffer */
2060 dma_region_sync_for_device(&iso->data_buf, offset, len);
2062 /* append first_packet to the DMA chain */
2063 /* by linking the previous descriptor to it */
2064 /* (next will become the new end of the DMA chain) */
2066 next_i = iso->first_packet;
2067 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2069 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2070 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2072 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2073 memset(next, 0, sizeof(struct iso_xmit_cmd));
2074 next->output_more_immediate.control = cpu_to_le32(0x02000008);
2076 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2078 /* tcode = 0xA, and sy */
2079 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2081 /* tag and channel number */
2082 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2084 /* transmission speed */
2085 next->iso_hdr[2] = iso->speed & 0x7;
2087 /* payload size */
2088 next->iso_hdr[6] = len & 0xFF;
2089 next->iso_hdr[7] = len >> 8;
2091 /* set up the OUTPUT_LAST */
2092 next->output_last.control = cpu_to_le32(1 << 28);
2093 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2094 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2095 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2096 next->output_last.control |= cpu_to_le32(len);
2098 /* payload bus address */
2099 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2101 /* leave branchAddress at zero for now */
2103 /* re-write the previous DMA descriptor to chain to this one */
2105 /* set prev branch address to point to next (Z=3) */
2106 prev->output_last.branchAddress = cpu_to_le32(
2107 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2109 /* disable interrupt, unless required by the IRQ interval */
2110 if (prev_i % iso->irq_interval) {
2111 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2112 } else {
2113 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2116 wmb();
2118 /* wake DMA in case it is sleeping */
2119 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2121 /* issue a dummy read of the cycle timer to force all PCI
2122 writes to be posted immediately */
2123 mb();
2124 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2126 return 0;
2129 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2131 struct ohci_iso_xmit *xmit = iso->hostdata;
2132 struct ti_ohci *ohci = xmit->ohci;
2134 /* clear out the control register */
2135 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2136 wmb();
2138 /* address and length of first descriptor block (Z=3) */
2139 reg_write(xmit->ohci, xmit->CommandPtr,
2140 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2142 /* cycle match */
2143 if (cycle != -1) {
2144 u32 start = cycle & 0x1FFF;
2146 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2147 just snarf them from the current time */
2148 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2150 /* advance one second to give some extra time for DMA to start */
2151 seconds += 1;
2153 start |= (seconds & 3) << 13;
2155 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2158 /* enable interrupts */
2159 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2161 /* run */
2162 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2163 mb();
2165 /* wait 100 usec to give the card time to go active */
2166 udelay(100);
2168 /* check the RUN bit */
2169 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2170 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2171 reg_read(xmit->ohci, xmit->ContextControlSet));
2172 return -1;
2175 return 0;
2178 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2181 switch(cmd) {
2182 case XMIT_INIT:
2183 return ohci_iso_xmit_init(iso);
2184 case XMIT_START:
2185 return ohci_iso_xmit_start(iso, arg);
2186 case XMIT_STOP:
2187 ohci_iso_xmit_stop(iso);
2188 return 0;
2189 case XMIT_QUEUE:
2190 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2191 case XMIT_SHUTDOWN:
2192 ohci_iso_xmit_shutdown(iso);
2193 return 0;
2195 case RECV_INIT:
2196 return ohci_iso_recv_init(iso);
2197 case RECV_START: {
2198 int *args = (int*) arg;
2199 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2201 case RECV_STOP:
2202 ohci_iso_recv_stop(iso);
2203 return 0;
2204 case RECV_RELEASE:
2205 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2206 return 0;
2207 case RECV_FLUSH:
2208 ohci_iso_recv_task((unsigned long) iso);
2209 return 0;
2210 case RECV_SHUTDOWN:
2211 ohci_iso_recv_shutdown(iso);
2212 return 0;
2213 case RECV_LISTEN_CHANNEL:
2214 ohci_iso_recv_change_channel(iso, arg, 1);
2215 return 0;
2216 case RECV_UNLISTEN_CHANNEL:
2217 ohci_iso_recv_change_channel(iso, arg, 0);
2218 return 0;
2219 case RECV_SET_CHANNEL_MASK:
2220 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2221 return 0;
2223 default:
2224 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2225 cmd);
2226 break;
2228 return -EINVAL;
2231 /***************************************
2232 * IEEE-1394 functionality section END *
2233 ***************************************/
2236 /********************************************************
2237 * Global stuff (interrupt handler, init/shutdown code) *
2238 ********************************************************/
2240 static void dma_trm_reset(struct dma_trm_ctx *d)
2242 unsigned long flags;
2243 LIST_HEAD(packet_list);
2244 struct ti_ohci *ohci = d->ohci;
2245 struct hpsb_packet *packet, *ptmp;
2247 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2249 /* Lock the context, reset it and release it. Move the packets
2250 * that were pending in the context to packet_list and free
2251 * them after releasing the lock. */
2253 spin_lock_irqsave(&d->lock, flags);
2255 list_splice(&d->fifo_list, &packet_list);
2256 list_splice(&d->pending_list, &packet_list);
2257 INIT_LIST_HEAD(&d->fifo_list);
2258 INIT_LIST_HEAD(&d->pending_list);
2260 d->branchAddrPtr = NULL;
2261 d->sent_ind = d->prg_ind;
2262 d->free_prgs = d->num_desc;
2264 spin_unlock_irqrestore(&d->lock, flags);
2266 if (list_empty(&packet_list))
2267 return;
2269 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2271 /* Now process subsystem callbacks for the packets from this
2272 * context. */
2273 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2274 list_del_init(&packet->driver_list);
2275 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2279 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2280 quadlet_t rx_event,
2281 quadlet_t tx_event)
2283 struct ohci1394_iso_tasklet *t;
2284 unsigned long mask;
2285 unsigned long flags;
2287 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
2289 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2290 mask = 1 << t->context;
2292 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2293 tasklet_schedule(&t->tasklet);
2294 else if (rx_event & mask)
2295 tasklet_schedule(&t->tasklet);
2298 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
2301 static irqreturn_t ohci_irq_handler(int irq, void *dev_id)
2303 quadlet_t event, node_id;
2304 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2305 struct hpsb_host *host = ohci->host;
2306 int phyid = -1, isroot = 0;
2307 unsigned long flags;
2309 /* Read and clear the interrupt event register. Don't clear
2310 * the busReset event, though. This is done when we get the
2311 * selfIDComplete interrupt. */
2312 spin_lock_irqsave(&ohci->event_lock, flags);
2313 event = reg_read(ohci, OHCI1394_IntEventClear);
2314 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2315 spin_unlock_irqrestore(&ohci->event_lock, flags);
2317 if (!event)
2318 return IRQ_NONE;
2320 /* If event is ~(u32)0 cardbus card was ejected. In this case
2321 * we just return, and clean up in the ohci1394_pci_remove
2322 * function. */
2323 if (event == ~(u32) 0) {
2324 DBGMSG("Device removed.");
2325 return IRQ_NONE;
2328 DBGMSG("IntEvent: %08x", event);
2330 if (event & OHCI1394_unrecoverableError) {
2331 int ctx;
2332 PRINT(KERN_ERR, "Unrecoverable error!");
2334 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2335 PRINT(KERN_ERR, "Async Req Tx Context died: "
2336 "ctrl[%08x] cmdptr[%08x]",
2337 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2338 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2340 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2341 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2342 "ctrl[%08x] cmdptr[%08x]",
2343 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2344 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2346 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2347 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2348 "ctrl[%08x] cmdptr[%08x]",
2349 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2350 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2352 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2353 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2354 "ctrl[%08x] cmdptr[%08x]",
2355 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2356 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2358 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2359 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2360 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2361 "ctrl[%08x] cmdptr[%08x]", ctx,
2362 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2363 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2366 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2367 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2368 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2369 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2370 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2371 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2372 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2375 event &= ~OHCI1394_unrecoverableError;
2377 if (event & OHCI1394_postedWriteErr) {
2378 PRINT(KERN_ERR, "physical posted write error");
2379 /* no recovery strategy yet, had to involve protocol drivers */
2381 if (event & OHCI1394_cycleTooLong) {
2382 if(printk_ratelimit())
2383 PRINT(KERN_WARNING, "isochronous cycle too long");
2384 else
2385 DBGMSG("OHCI1394_cycleTooLong");
2386 reg_write(ohci, OHCI1394_LinkControlSet,
2387 OHCI1394_LinkControl_CycleMaster);
2388 event &= ~OHCI1394_cycleTooLong;
2390 if (event & OHCI1394_cycleInconsistent) {
2391 /* We subscribe to the cycleInconsistent event only to
2392 * clear the corresponding event bit... otherwise,
2393 * isochronous cycleMatch DMA won't work. */
2394 DBGMSG("OHCI1394_cycleInconsistent");
2395 event &= ~OHCI1394_cycleInconsistent;
2397 if (event & OHCI1394_busReset) {
2398 /* The busReset event bit can't be cleared during the
2399 * selfID phase, so we disable busReset interrupts, to
2400 * avoid burying the cpu in interrupt requests. */
2401 spin_lock_irqsave(&ohci->event_lock, flags);
2402 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2404 if (ohci->check_busreset) {
2405 int loop_count = 0;
2407 udelay(10);
2409 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2410 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2412 spin_unlock_irqrestore(&ohci->event_lock, flags);
2413 udelay(10);
2414 spin_lock_irqsave(&ohci->event_lock, flags);
2416 /* The loop counter check is to prevent the driver
2417 * from remaining in this state forever. For the
2418 * initial bus reset, the loop continues for ever
2419 * and the system hangs, until some device is plugged-in
2420 * or out manually into a port! The forced reset seems
2421 * to solve this problem. This mainly effects nForce2. */
2422 if (loop_count > 10000) {
2423 ohci_devctl(host, RESET_BUS, LONG_RESET);
2424 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2425 loop_count = 0;
2428 loop_count++;
2431 spin_unlock_irqrestore(&ohci->event_lock, flags);
2432 if (!host->in_bus_reset) {
2433 DBGMSG("irq_handler: Bus reset requested");
2435 /* Subsystem call */
2436 hpsb_bus_reset(ohci->host);
2438 event &= ~OHCI1394_busReset;
2440 if (event & OHCI1394_reqTxComplete) {
2441 struct dma_trm_ctx *d = &ohci->at_req_context;
2442 DBGMSG("Got reqTxComplete interrupt "
2443 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2444 if (reg_read(ohci, d->ctrlSet) & 0x800)
2445 ohci1394_stop_context(ohci, d->ctrlClear,
2446 "reqTxComplete");
2447 else
2448 dma_trm_tasklet((unsigned long)d);
2449 //tasklet_schedule(&d->task);
2450 event &= ~OHCI1394_reqTxComplete;
2452 if (event & OHCI1394_respTxComplete) {
2453 struct dma_trm_ctx *d = &ohci->at_resp_context;
2454 DBGMSG("Got respTxComplete interrupt "
2455 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2456 if (reg_read(ohci, d->ctrlSet) & 0x800)
2457 ohci1394_stop_context(ohci, d->ctrlClear,
2458 "respTxComplete");
2459 else
2460 tasklet_schedule(&d->task);
2461 event &= ~OHCI1394_respTxComplete;
2463 if (event & OHCI1394_RQPkt) {
2464 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2465 DBGMSG("Got RQPkt interrupt status=0x%08X",
2466 reg_read(ohci, d->ctrlSet));
2467 if (reg_read(ohci, d->ctrlSet) & 0x800)
2468 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2469 else
2470 tasklet_schedule(&d->task);
2471 event &= ~OHCI1394_RQPkt;
2473 if (event & OHCI1394_RSPkt) {
2474 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2475 DBGMSG("Got RSPkt interrupt status=0x%08X",
2476 reg_read(ohci, d->ctrlSet));
2477 if (reg_read(ohci, d->ctrlSet) & 0x800)
2478 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2479 else
2480 tasklet_schedule(&d->task);
2481 event &= ~OHCI1394_RSPkt;
2483 if (event & OHCI1394_isochRx) {
2484 quadlet_t rx_event;
2486 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2487 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2488 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2489 event &= ~OHCI1394_isochRx;
2491 if (event & OHCI1394_isochTx) {
2492 quadlet_t tx_event;
2494 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2495 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2496 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2497 event &= ~OHCI1394_isochTx;
2499 if (event & OHCI1394_selfIDComplete) {
2500 if (host->in_bus_reset) {
2501 node_id = reg_read(ohci, OHCI1394_NodeID);
2503 if (!(node_id & 0x80000000)) {
2504 PRINT(KERN_ERR,
2505 "SelfID received, but NodeID invalid "
2506 "(probably new bus reset occurred): %08X",
2507 node_id);
2508 goto selfid_not_valid;
2511 phyid = node_id & 0x0000003f;
2512 isroot = (node_id & 0x40000000) != 0;
2514 DBGMSG("SelfID interrupt received "
2515 "(phyid %d, %s)", phyid,
2516 (isroot ? "root" : "not root"));
2518 handle_selfid(ohci, host, phyid, isroot);
2520 /* Clear the bus reset event and re-enable the
2521 * busReset interrupt. */
2522 spin_lock_irqsave(&ohci->event_lock, flags);
2523 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2524 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2525 spin_unlock_irqrestore(&ohci->event_lock, flags);
2527 /* Turn on phys dma reception.
2529 * TODO: Enable some sort of filtering management.
2531 if (phys_dma) {
2532 reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2533 0xffffffff);
2534 reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2535 0xffffffff);
2538 DBGMSG("PhyReqFilter=%08x%08x",
2539 reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2540 reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
2542 hpsb_selfid_complete(host, phyid, isroot);
2543 } else
2544 PRINT(KERN_ERR,
2545 "SelfID received outside of bus reset sequence");
2547 selfid_not_valid:
2548 event &= ~OHCI1394_selfIDComplete;
2551 /* Make sure we handle everything, just in case we accidentally
2552 * enabled an interrupt that we didn't write a handler for. */
2553 if (event)
2554 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2555 event);
2557 return IRQ_HANDLED;
2560 /* Put the buffer back into the dma context */
2561 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2563 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2564 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2566 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2567 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2568 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2569 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2571 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2572 * context program descriptors before it sees the wakeup bit set. */
2573 wmb();
2575 /* wake up the dma context if necessary */
2576 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2577 PRINT(KERN_INFO,
2578 "Waking dma ctx=%d ... processing is probably too slow",
2579 d->ctx);
2582 /* do this always, to avoid race condition */
2583 reg_write(ohci, d->ctrlSet, 0x1000);
2586 #define cond_le32_to_cpu(data, noswap) \
2587 (noswap ? data : le32_to_cpu(data))
2589 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2590 -1, 0, -1, 0, -1, -1, 16, -1};
2593 * Determine the length of a packet in the buffer
2594 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2596 static inline int packet_length(struct dma_rcv_ctx *d, int idx,
2597 quadlet_t *buf_ptr, int offset,
2598 unsigned char tcode, int noswap)
2600 int length = -1;
2602 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2603 length = TCODE_SIZE[tcode];
2604 if (length == 0) {
2605 if (offset + 12 >= d->buf_size) {
2606 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2607 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2608 } else {
2609 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2611 length += 20;
2613 } else if (d->type == DMA_CTX_ISO) {
2614 /* Assumption: buffer fill mode with header/trailer */
2615 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2618 if (length > 0 && length % 4)
2619 length += 4 - (length % 4);
2621 return length;
2624 /* Tasklet that processes dma receive buffers */
2625 static void dma_rcv_tasklet (unsigned long data)
2627 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2628 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2629 unsigned int split_left, idx, offset, rescount;
2630 unsigned char tcode;
2631 int length, bytes_left, ack;
2632 unsigned long flags;
2633 quadlet_t *buf_ptr;
2634 char *split_ptr;
2635 char msg[256];
2637 spin_lock_irqsave(&d->lock, flags);
2639 idx = d->buf_ind;
2640 offset = d->buf_offset;
2641 buf_ptr = d->buf_cpu[idx] + offset/4;
2643 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2644 bytes_left = d->buf_size - rescount - offset;
2646 while (bytes_left > 0) {
2647 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2649 /* packet_length() will return < 4 for an error */
2650 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2652 if (length < 4) { /* something is wrong */
2653 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2654 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2655 d->ctx, length);
2656 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2657 spin_unlock_irqrestore(&d->lock, flags);
2658 return;
2661 /* The first case is where we have a packet that crosses
2662 * over more than one descriptor. The next case is where
2663 * it's all in the first descriptor. */
2664 if ((offset + length) > d->buf_size) {
2665 DBGMSG("Split packet rcv'd");
2666 if (length > d->split_buf_size) {
2667 ohci1394_stop_context(ohci, d->ctrlClear,
2668 "Split packet size exceeded");
2669 d->buf_ind = idx;
2670 d->buf_offset = offset;
2671 spin_unlock_irqrestore(&d->lock, flags);
2672 return;
2675 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2676 == d->buf_size) {
2677 /* Other part of packet not written yet.
2678 * this should never happen I think
2679 * anyway we'll get it on the next call. */
2680 PRINT(KERN_INFO,
2681 "Got only half a packet!");
2682 d->buf_ind = idx;
2683 d->buf_offset = offset;
2684 spin_unlock_irqrestore(&d->lock, flags);
2685 return;
2688 split_left = length;
2689 split_ptr = (char *)d->spb;
2690 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2691 split_left -= d->buf_size-offset;
2692 split_ptr += d->buf_size-offset;
2693 insert_dma_buffer(d, idx);
2694 idx = (idx+1) % d->num_desc;
2695 buf_ptr = d->buf_cpu[idx];
2696 offset=0;
2698 while (split_left >= d->buf_size) {
2699 memcpy(split_ptr,buf_ptr,d->buf_size);
2700 split_ptr += d->buf_size;
2701 split_left -= d->buf_size;
2702 insert_dma_buffer(d, idx);
2703 idx = (idx+1) % d->num_desc;
2704 buf_ptr = d->buf_cpu[idx];
2707 if (split_left > 0) {
2708 memcpy(split_ptr, buf_ptr, split_left);
2709 offset = split_left;
2710 buf_ptr += offset/4;
2712 } else {
2713 DBGMSG("Single packet rcv'd");
2714 memcpy(d->spb, buf_ptr, length);
2715 offset += length;
2716 buf_ptr += length/4;
2717 if (offset==d->buf_size) {
2718 insert_dma_buffer(d, idx);
2719 idx = (idx+1) % d->num_desc;
2720 buf_ptr = d->buf_cpu[idx];
2721 offset=0;
2725 /* We get one phy packet to the async descriptor for each
2726 * bus reset. We always ignore it. */
2727 if (tcode != OHCI1394_TCODE_PHY) {
2728 if (!ohci->no_swap_incoming)
2729 header_le32_to_cpu(d->spb, tcode);
2730 DBGMSG("Packet received from node"
2731 " %d ack=0x%02X spd=%d tcode=0x%X"
2732 " length=%d ctx=%d tlabel=%d",
2733 (d->spb[1]>>16)&0x3f,
2734 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2735 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2736 tcode, length, d->ctx,
2737 (d->spb[0]>>10)&0x3f);
2739 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2740 == 0x11) ? 1 : 0;
2742 hpsb_packet_received(ohci->host, d->spb,
2743 length-4, ack);
2745 #ifdef OHCI1394_DEBUG
2746 else
2747 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2748 d->ctx);
2749 #endif
2751 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2753 bytes_left = d->buf_size - rescount - offset;
2757 d->buf_ind = idx;
2758 d->buf_offset = offset;
2760 spin_unlock_irqrestore(&d->lock, flags);
2763 /* Bottom half that processes sent packets */
2764 static void dma_trm_tasklet (unsigned long data)
2766 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2767 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2768 struct hpsb_packet *packet, *ptmp;
2769 unsigned long flags;
2770 u32 status, ack;
2771 size_t datasize;
2773 spin_lock_irqsave(&d->lock, flags);
2775 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2776 datasize = packet->data_size;
2777 if (datasize && packet->type != hpsb_raw)
2778 status = le32_to_cpu(
2779 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2780 else
2781 status = le32_to_cpu(
2782 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2784 if (status == 0)
2785 /* this packet hasn't been sent yet*/
2786 break;
2788 #ifdef OHCI1394_DEBUG
2789 if (datasize)
2790 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2791 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2792 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2793 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2794 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2795 status&0x1f, (status>>5)&0x3,
2796 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2797 d->ctx);
2798 else
2799 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2800 "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2801 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2802 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2803 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2804 status&0x1f, (status>>5)&0x3,
2805 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2806 d->ctx);
2807 else
2808 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2809 "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2810 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2811 >>16)&0x3f,
2812 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2813 >>4)&0xf,
2814 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2815 >>10)&0x3f,
2816 status&0x1f, (status>>5)&0x3,
2817 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2818 d->ctx);
2819 #endif
2821 if (status & 0x10) {
2822 ack = status & 0xf;
2823 } else {
2824 switch (status & 0x1f) {
2825 case EVT_NO_STATUS: /* that should never happen */
2826 case EVT_RESERVED_A: /* that should never happen */
2827 case EVT_LONG_PACKET: /* that should never happen */
2828 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2829 ack = ACKX_SEND_ERROR;
2830 break;
2831 case EVT_MISSING_ACK:
2832 ack = ACKX_TIMEOUT;
2833 break;
2834 case EVT_UNDERRUN:
2835 ack = ACKX_SEND_ERROR;
2836 break;
2837 case EVT_OVERRUN: /* that should never happen */
2838 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2839 ack = ACKX_SEND_ERROR;
2840 break;
2841 case EVT_DESCRIPTOR_READ:
2842 case EVT_DATA_READ:
2843 case EVT_DATA_WRITE:
2844 ack = ACKX_SEND_ERROR;
2845 break;
2846 case EVT_BUS_RESET: /* that should never happen */
2847 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2848 ack = ACKX_SEND_ERROR;
2849 break;
2850 case EVT_TIMEOUT:
2851 ack = ACKX_TIMEOUT;
2852 break;
2853 case EVT_TCODE_ERR:
2854 ack = ACKX_SEND_ERROR;
2855 break;
2856 case EVT_RESERVED_B: /* that should never happen */
2857 case EVT_RESERVED_C: /* that should never happen */
2858 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2859 ack = ACKX_SEND_ERROR;
2860 break;
2861 case EVT_UNKNOWN:
2862 case EVT_FLUSHED:
2863 ack = ACKX_SEND_ERROR;
2864 break;
2865 default:
2866 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2867 ack = ACKX_SEND_ERROR;
2868 BUG();
2872 list_del_init(&packet->driver_list);
2873 hpsb_packet_sent(ohci->host, packet, ack);
2875 if (datasize) {
2876 pci_unmap_single(ohci->dev,
2877 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2878 datasize, PCI_DMA_TODEVICE);
2879 OHCI_DMA_FREE("single Xmit data packet");
2882 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2883 d->free_prgs++;
2886 dma_trm_flush(ohci, d);
2888 spin_unlock_irqrestore(&d->lock, flags);
2891 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2893 if (d->ctrlClear) {
2894 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2896 if (d->type == DMA_CTX_ISO) {
2897 /* disable interrupts */
2898 reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2899 ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2900 } else {
2901 tasklet_kill(&d->task);
2907 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2909 int i;
2910 struct ti_ohci *ohci = d->ohci;
2912 if (ohci == NULL)
2913 return;
2915 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2917 if (d->buf_cpu) {
2918 for (i=0; i<d->num_desc; i++)
2919 if (d->buf_cpu[i] && d->buf_bus[i]) {
2920 pci_free_consistent(
2921 ohci->dev, d->buf_size,
2922 d->buf_cpu[i], d->buf_bus[i]);
2923 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2925 kfree(d->buf_cpu);
2926 kfree(d->buf_bus);
2928 if (d->prg_cpu) {
2929 for (i=0; i<d->num_desc; i++)
2930 if (d->prg_cpu[i] && d->prg_bus[i]) {
2931 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2932 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2934 pci_pool_destroy(d->prg_pool);
2935 OHCI_DMA_FREE("dma_rcv prg pool");
2936 kfree(d->prg_cpu);
2937 kfree(d->prg_bus);
2939 kfree(d->spb);
2941 /* Mark this context as freed. */
2942 d->ohci = NULL;
2945 static int
2946 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2947 enum context_type type, int ctx, int num_desc,
2948 int buf_size, int split_buf_size, int context_base)
2950 int i, len;
2951 static int num_allocs;
2952 static char pool_name[20];
2954 d->ohci = ohci;
2955 d->type = type;
2956 d->ctx = ctx;
2958 d->num_desc = num_desc;
2959 d->buf_size = buf_size;
2960 d->split_buf_size = split_buf_size;
2962 d->ctrlSet = 0;
2963 d->ctrlClear = 0;
2964 d->cmdPtr = 0;
2966 d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2967 d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
2969 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2970 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2971 free_dma_rcv_ctx(d);
2972 return -ENOMEM;
2975 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2976 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
2978 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2979 PRINT(KERN_ERR, "Failed to allocate dma prg");
2980 free_dma_rcv_ctx(d);
2981 return -ENOMEM;
2984 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2986 if (d->spb == NULL) {
2987 PRINT(KERN_ERR, "Failed to allocate split buffer");
2988 free_dma_rcv_ctx(d);
2989 return -ENOMEM;
2992 len = sprintf(pool_name, "ohci1394_rcv_prg");
2993 sprintf(pool_name+len, "%d", num_allocs);
2994 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2995 sizeof(struct dma_cmd), 4, 0);
2996 if(d->prg_pool == NULL)
2998 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2999 free_dma_rcv_ctx(d);
3000 return -ENOMEM;
3002 num_allocs++;
3004 OHCI_DMA_ALLOC("dma_rcv prg pool");
3006 for (i=0; i<d->num_desc; i++) {
3007 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
3008 d->buf_size,
3009 d->buf_bus+i);
3010 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
3012 if (d->buf_cpu[i] != NULL) {
3013 memset(d->buf_cpu[i], 0, d->buf_size);
3014 } else {
3015 PRINT(KERN_ERR,
3016 "Failed to allocate dma buffer");
3017 free_dma_rcv_ctx(d);
3018 return -ENOMEM;
3021 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
3022 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
3024 if (d->prg_cpu[i] != NULL) {
3025 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
3026 } else {
3027 PRINT(KERN_ERR,
3028 "Failed to allocate dma prg");
3029 free_dma_rcv_ctx(d);
3030 return -ENOMEM;
3034 spin_lock_init(&d->lock);
3036 if (type == DMA_CTX_ISO) {
3037 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3038 OHCI_ISO_MULTICHANNEL_RECEIVE,
3039 dma_rcv_tasklet, (unsigned long) d);
3040 } else {
3041 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3042 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3043 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3045 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3048 return 0;
3051 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3053 int i;
3054 struct ti_ohci *ohci = d->ohci;
3056 if (ohci == NULL)
3057 return;
3059 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3061 if (d->prg_cpu) {
3062 for (i=0; i<d->num_desc; i++)
3063 if (d->prg_cpu[i] && d->prg_bus[i]) {
3064 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3065 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3067 pci_pool_destroy(d->prg_pool);
3068 OHCI_DMA_FREE("dma_trm prg pool");
3069 kfree(d->prg_cpu);
3070 kfree(d->prg_bus);
3073 /* Mark this context as freed. */
3074 d->ohci = NULL;
3077 static int
3078 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3079 enum context_type type, int ctx, int num_desc,
3080 int context_base)
3082 int i, len;
3083 static char pool_name[20];
3084 static int num_allocs=0;
3086 d->ohci = ohci;
3087 d->type = type;
3088 d->ctx = ctx;
3089 d->num_desc = num_desc;
3090 d->ctrlSet = 0;
3091 d->ctrlClear = 0;
3092 d->cmdPtr = 0;
3094 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
3095 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
3097 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3098 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3099 free_dma_trm_ctx(d);
3100 return -ENOMEM;
3103 len = sprintf(pool_name, "ohci1394_trm_prg");
3104 sprintf(pool_name+len, "%d", num_allocs);
3105 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3106 sizeof(struct at_dma_prg), 4, 0);
3107 if (d->prg_pool == NULL) {
3108 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3109 free_dma_trm_ctx(d);
3110 return -ENOMEM;
3112 num_allocs++;
3114 OHCI_DMA_ALLOC("dma_rcv prg pool");
3116 for (i = 0; i < d->num_desc; i++) {
3117 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
3118 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3120 if (d->prg_cpu[i] != NULL) {
3121 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3122 } else {
3123 PRINT(KERN_ERR,
3124 "Failed to allocate at dma prg");
3125 free_dma_trm_ctx(d);
3126 return -ENOMEM;
3130 spin_lock_init(&d->lock);
3132 /* initialize tasklet */
3133 if (type == DMA_CTX_ISO) {
3134 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3135 dma_trm_tasklet, (unsigned long) d);
3136 if (ohci1394_register_iso_tasklet(ohci,
3137 &ohci->it_legacy_tasklet) < 0) {
3138 PRINT(KERN_ERR, "No IT DMA context available");
3139 free_dma_trm_ctx(d);
3140 return -EBUSY;
3143 /* IT can be assigned to any context by register_iso_tasklet */
3144 d->ctx = ohci->it_legacy_tasklet.context;
3145 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3146 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3147 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3148 } else {
3149 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3150 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3151 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3152 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3155 return 0;
3158 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3160 struct ti_ohci *ohci = host->hostdata;
3162 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3163 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3165 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3169 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3170 quadlet_t data, quadlet_t compare)
3172 struct ti_ohci *ohci = host->hostdata;
3173 int i;
3175 reg_write(ohci, OHCI1394_CSRData, data);
3176 reg_write(ohci, OHCI1394_CSRCompareData, compare);
3177 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3179 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3180 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3181 break;
3183 mdelay(1);
3186 return reg_read(ohci, OHCI1394_CSRData);
3189 static struct hpsb_host_driver ohci1394_driver = {
3190 .owner = THIS_MODULE,
3191 .name = OHCI1394_DRIVER_NAME,
3192 .set_hw_config_rom = ohci_set_hw_config_rom,
3193 .transmit_packet = ohci_transmit,
3194 .devctl = ohci_devctl,
3195 .isoctl = ohci_isoctl,
3196 .hw_csr_reg = ohci_hw_csr_reg,
3199 /***********************************
3200 * PCI Driver Interface functions *
3201 ***********************************/
3203 #define FAIL(err, fmt, args...) \
3204 do { \
3205 PRINT_G(KERN_ERR, fmt , ## args); \
3206 ohci1394_pci_remove(dev); \
3207 return err; \
3208 } while (0)
3210 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3211 const struct pci_device_id *ent)
3213 struct hpsb_host *host;
3214 struct ti_ohci *ohci; /* shortcut to currently handled device */
3215 resource_size_t ohci_base;
3217 #ifdef CONFIG_PPC_PMAC
3218 /* Necessary on some machines if ohci1394 was loaded/ unloaded before */
3219 if (machine_is(powermac)) {
3220 struct device_node *ofn = pci_device_to_OF_node(dev);
3222 if (ofn) {
3223 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3224 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3227 #endif /* CONFIG_PPC_PMAC */
3229 if (pci_enable_device(dev))
3230 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3231 pci_set_master(dev);
3233 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3234 if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3236 ohci = host->hostdata;
3237 ohci->dev = dev;
3238 ohci->host = host;
3239 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3240 host->pdev = dev;
3241 pci_set_drvdata(dev, ohci);
3243 /* We don't want hardware swapping */
3244 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3246 /* Some oddball Apple controllers do not order the selfid
3247 * properly, so we make up for it here. */
3248 #ifndef __LITTLE_ENDIAN
3249 /* XXX: Need a better way to check this. I'm wondering if we can
3250 * read the values of the OHCI1394_PCI_HCI_Control and the
3251 * noByteSwapData registers to see if they were not cleared to
3252 * zero. Should this work? Obviously it's not defined what these
3253 * registers will read when they aren't supported. Bleh! */
3254 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3255 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3256 ohci->no_swap_incoming = 1;
3257 ohci->selfid_swap = 0;
3258 } else
3259 ohci->selfid_swap = 1;
3260 #endif
3263 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3264 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3265 #endif
3267 /* These chipsets require a bit of extra care when checking after
3268 * a busreset. */
3269 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3270 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3271 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3272 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3273 ohci->check_busreset = 1;
3275 /* We hardwire the MMIO length, since some CardBus adaptors
3276 * fail to report the right length. Anyway, the ohci spec
3277 * clearly says it's 2kb, so this shouldn't be a problem. */
3278 ohci_base = pci_resource_start(dev, 0);
3279 if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
3280 PRINT(KERN_WARNING, "PCI resource length of 0x%llx too small!",
3281 (unsigned long long)pci_resource_len(dev, 0));
3283 if (!request_mem_region(ohci_base, OHCI1394_REGISTER_SIZE,
3284 OHCI1394_DRIVER_NAME))
3285 FAIL(-ENOMEM, "MMIO resource (0x%llx - 0x%llx) unavailable",
3286 (unsigned long long)ohci_base,
3287 (unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
3288 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3290 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3291 if (ohci->registers == NULL)
3292 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3293 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3294 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3296 /* csr_config rom allocation */
3297 ohci->csr_config_rom_cpu =
3298 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3299 &ohci->csr_config_rom_bus);
3300 OHCI_DMA_ALLOC("consistent csr_config_rom");
3301 if (ohci->csr_config_rom_cpu == NULL)
3302 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3303 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3305 /* self-id dma buffer allocation */
3306 ohci->selfid_buf_cpu =
3307 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3308 &ohci->selfid_buf_bus);
3309 OHCI_DMA_ALLOC("consistent selfid_buf");
3311 if (ohci->selfid_buf_cpu == NULL)
3312 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3313 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3315 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3316 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3317 "8Kb boundary... may cause problems on some CXD3222 chip",
3318 ohci->selfid_buf_cpu);
3320 /* No self-id errors at startup */
3321 ohci->self_id_errors = 0;
3323 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3324 /* AR DMA request context allocation */
3325 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3326 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3327 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3328 OHCI1394_AsReqRcvContextBase) < 0)
3329 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3331 /* AR DMA response context allocation */
3332 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3333 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3334 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3335 OHCI1394_AsRspRcvContextBase) < 0)
3336 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3338 /* AT DMA request context */
3339 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3340 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3341 OHCI1394_AsReqTrContextBase) < 0)
3342 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3344 /* AT DMA response context */
3345 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3346 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3347 OHCI1394_AsRspTrContextBase) < 0)
3348 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3350 /* Start off with a soft reset, to clear everything to a sane
3351 * state. */
3352 ohci_soft_reset(ohci);
3354 /* Now enable LPS, which we need in order to start accessing
3355 * most of the registers. In fact, on some cards (ALI M5251),
3356 * accessing registers in the SClk domain without LPS enabled
3357 * will lock up the machine. Wait 50msec to make sure we have
3358 * full link enabled. */
3359 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3361 /* Disable and clear interrupts */
3362 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3363 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3365 mdelay(50);
3367 /* Determine the number of available IR and IT contexts. */
3368 ohci->nb_iso_rcv_ctx =
3369 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3370 ohci->nb_iso_xmit_ctx =
3371 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3373 /* Set the usage bits for non-existent contexts so they can't
3374 * be allocated */
3375 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3376 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3378 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3379 spin_lock_init(&ohci->iso_tasklet_list_lock);
3380 ohci->ISO_channel_usage = 0;
3381 spin_lock_init(&ohci->IR_channel_lock);
3383 /* Allocate the IR DMA context right here so we don't have
3384 * to do it in interrupt path - note that this doesn't
3385 * waste much memory and avoids the jugglery required to
3386 * allocate it in IRQ path. */
3387 if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3388 DMA_CTX_ISO, 0, IR_NUM_DESC,
3389 IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3390 OHCI1394_IsoRcvContextBase) < 0) {
3391 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3394 /* We hopefully don't have to pre-allocate IT DMA like we did
3395 * for IR DMA above. Allocate it on-demand and mark inactive. */
3396 ohci->it_legacy_context.ohci = NULL;
3397 spin_lock_init(&ohci->event_lock);
3400 * interrupts are disabled, all right, but... due to IRQF_SHARED we
3401 * might get called anyway. We'll see no event, of course, but
3402 * we need to get to that "no event", so enough should be initialized
3403 * by that point.
3405 if (request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
3406 OHCI1394_DRIVER_NAME, ohci))
3407 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3409 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3410 ohci_initialize(ohci);
3412 /* Set certain csr values */
3413 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3414 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3415 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3416 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3417 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3419 if (phys_dma) {
3420 host->low_addr_space =
3421 (u64) reg_read(ohci, OHCI1394_PhyUpperBound) << 16;
3422 if (!host->low_addr_space)
3423 host->low_addr_space = OHCI1394_PHYS_UPPER_BOUND_FIXED;
3425 host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
3427 /* Tell the highlevel this host is ready */
3428 if (hpsb_add_host(host))
3429 FAIL(-ENOMEM, "Failed to register host with highlevel");
3431 ohci->init_state = OHCI_INIT_DONE;
3433 return 0;
3434 #undef FAIL
3437 static void ohci1394_pci_remove(struct pci_dev *pdev)
3439 struct ti_ohci *ohci;
3440 struct device *dev;
3442 ohci = pci_get_drvdata(pdev);
3443 if (!ohci)
3444 return;
3446 dev = get_device(&ohci->host->device);
3448 switch (ohci->init_state) {
3449 case OHCI_INIT_DONE:
3450 hpsb_remove_host(ohci->host);
3452 /* Clear out BUS Options */
3453 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3454 reg_write(ohci, OHCI1394_BusOptions,
3455 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3456 0x00ff0000);
3457 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3459 case OHCI_INIT_HAVE_IRQ:
3460 /* Clear interrupt registers */
3461 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3462 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3463 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3464 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3465 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3466 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3468 /* Disable IRM Contender */
3469 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3471 /* Clear link control register */
3472 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3474 /* Let all other nodes know to ignore us */
3475 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3477 /* Soft reset before we start - this disables
3478 * interrupts and clears linkEnable and LPS. */
3479 ohci_soft_reset(ohci);
3480 free_irq(ohci->dev->irq, ohci);
3482 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3483 /* The ohci_soft_reset() stops all DMA contexts, so we
3484 * dont need to do this. */
3485 free_dma_rcv_ctx(&ohci->ar_req_context);
3486 free_dma_rcv_ctx(&ohci->ar_resp_context);
3487 free_dma_trm_ctx(&ohci->at_req_context);
3488 free_dma_trm_ctx(&ohci->at_resp_context);
3489 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3490 free_dma_trm_ctx(&ohci->it_legacy_context);
3492 case OHCI_INIT_HAVE_SELFID_BUFFER:
3493 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3494 ohci->selfid_buf_cpu,
3495 ohci->selfid_buf_bus);
3496 OHCI_DMA_FREE("consistent selfid_buf");
3498 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3499 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3500 ohci->csr_config_rom_cpu,
3501 ohci->csr_config_rom_bus);
3502 OHCI_DMA_FREE("consistent csr_config_rom");
3504 case OHCI_INIT_HAVE_IOMAPPING:
3505 iounmap(ohci->registers);
3507 case OHCI_INIT_HAVE_MEM_REGION:
3508 release_mem_region(pci_resource_start(ohci->dev, 0),
3509 OHCI1394_REGISTER_SIZE);
3511 #ifdef CONFIG_PPC_PMAC
3512 /* On UniNorth, power down the cable and turn off the chip clock
3513 * to save power on laptops */
3514 if (machine_is(powermac)) {
3515 struct device_node* ofn = pci_device_to_OF_node(ohci->dev);
3517 if (ofn) {
3518 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3519 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3522 #endif /* CONFIG_PPC_PMAC */
3524 case OHCI_INIT_ALLOC_HOST:
3525 pci_set_drvdata(ohci->dev, NULL);
3528 if (dev)
3529 put_device(dev);
3532 #ifdef CONFIG_PM
3533 static int ohci1394_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3535 int err;
3536 struct ti_ohci *ohci = pci_get_drvdata(pdev);
3538 if (!ohci) {
3539 printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
3540 OHCI1394_DRIVER_NAME);
3541 return -ENXIO;
3543 DBGMSG("suspend called");
3545 /* Clear the async DMA contexts and stop using the controller */
3546 hpsb_bus_reset(ohci->host);
3548 /* See ohci1394_pci_remove() for comments on this sequence */
3549 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3550 reg_write(ohci, OHCI1394_BusOptions,
3551 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3552 0x00ff0000);
3553 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3554 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3555 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3556 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3557 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3558 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3559 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3560 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3561 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3562 ohci_soft_reset(ohci);
3564 err = pci_save_state(pdev);
3565 if (err) {
3566 PRINT(KERN_ERR, "pci_save_state failed with %d", err);
3567 return err;
3569 err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
3570 if (err)
3571 DBGMSG("pci_set_power_state failed with %d", err);
3573 /* PowerMac suspend code comes last */
3574 #ifdef CONFIG_PPC_PMAC
3575 if (machine_is(powermac)) {
3576 struct device_node *ofn = pci_device_to_OF_node(pdev);
3578 if (ofn)
3579 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3581 #endif /* CONFIG_PPC_PMAC */
3583 return 0;
3586 static int ohci1394_pci_resume(struct pci_dev *pdev)
3588 int err;
3589 struct ti_ohci *ohci = pci_get_drvdata(pdev);
3591 if (!ohci) {
3592 printk(KERN_ERR "%s: tried to resume nonexisting host\n",
3593 OHCI1394_DRIVER_NAME);
3594 return -ENXIO;
3596 DBGMSG("resume called");
3598 /* PowerMac resume code comes first */
3599 #ifdef CONFIG_PPC_PMAC
3600 if (machine_is(powermac)) {
3601 struct device_node *ofn = pci_device_to_OF_node(pdev);
3603 if (ofn)
3604 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3606 #endif /* CONFIG_PPC_PMAC */
3608 pci_set_power_state(pdev, PCI_D0);
3609 pci_restore_state(pdev);
3610 err = pci_enable_device(pdev);
3611 if (err) {
3612 PRINT(KERN_ERR, "pci_enable_device failed with %d", err);
3613 return err;
3616 /* See ohci1394_pci_probe() for comments on this sequence */
3617 ohci_soft_reset(ohci);
3618 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3619 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3620 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3621 mdelay(50);
3622 ohci_initialize(ohci);
3624 hpsb_resume_host(ohci->host);
3625 return 0;
3627 #endif /* CONFIG_PM */
3629 static struct pci_device_id ohci1394_pci_tbl[] = {
3631 .class = PCI_CLASS_SERIAL_FIREWIRE_OHCI,
3632 .class_mask = PCI_ANY_ID,
3633 .vendor = PCI_ANY_ID,
3634 .device = PCI_ANY_ID,
3635 .subvendor = PCI_ANY_ID,
3636 .subdevice = PCI_ANY_ID,
3638 { 0, },
3641 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3643 static struct pci_driver ohci1394_pci_driver = {
3644 .name = OHCI1394_DRIVER_NAME,
3645 .id_table = ohci1394_pci_tbl,
3646 .probe = ohci1394_pci_probe,
3647 .remove = ohci1394_pci_remove,
3648 #ifdef CONFIG_PM
3649 .resume = ohci1394_pci_resume,
3650 .suspend = ohci1394_pci_suspend,
3651 #endif
3654 /***********************************
3655 * OHCI1394 Video Interface *
3656 ***********************************/
3658 /* essentially the only purpose of this code is to allow another
3659 module to hook into ohci's interrupt handler */
3661 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3663 int i=0;
3665 /* stop the channel program if it's still running */
3666 reg_write(ohci, reg, 0x8000);
3668 /* Wait until it effectively stops */
3669 while (reg_read(ohci, reg) & 0x400) {
3670 i++;
3671 if (i>5000) {
3672 PRINT(KERN_ERR,
3673 "Runaway loop while stopping context: %s...", msg ? msg : "");
3674 return 1;
3677 mb();
3678 udelay(10);
3680 if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3681 return 0;
3684 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3685 void (*func)(unsigned long), unsigned long data)
3687 tasklet_init(&tasklet->tasklet, func, data);
3688 tasklet->type = type;
3689 /* We init the tasklet->link field, so we can list_del() it
3690 * without worrying whether it was added to the list or not. */
3691 INIT_LIST_HEAD(&tasklet->link);
3694 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3695 struct ohci1394_iso_tasklet *tasklet)
3697 unsigned long flags, *usage;
3698 int n, i, r = -EBUSY;
3700 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3701 n = ohci->nb_iso_xmit_ctx;
3702 usage = &ohci->it_ctx_usage;
3704 else {
3705 n = ohci->nb_iso_rcv_ctx;
3706 usage = &ohci->ir_ctx_usage;
3708 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3709 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3710 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3711 return r;
3716 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3718 for (i = 0; i < n; i++)
3719 if (!test_and_set_bit(i, usage)) {
3720 tasklet->context = i;
3721 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3722 r = 0;
3723 break;
3726 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3728 return r;
3731 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3732 struct ohci1394_iso_tasklet *tasklet)
3734 unsigned long flags;
3736 tasklet_kill(&tasklet->tasklet);
3738 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3740 if (tasklet->type == OHCI_ISO_TRANSMIT)
3741 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3742 else {
3743 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3745 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3746 clear_bit(0, &ohci->ir_multichannel_used);
3750 list_del(&tasklet->link);
3752 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3755 EXPORT_SYMBOL(ohci1394_stop_context);
3756 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3757 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3758 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3760 /***********************************
3761 * General module initialization *
3762 ***********************************/
3764 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3765 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3766 MODULE_LICENSE("GPL");
3768 static void __exit ohci1394_cleanup (void)
3770 pci_unregister_driver(&ohci1394_pci_driver);
3773 static int __init ohci1394_init(void)
3775 return pci_register_driver(&ohci1394_pci_driver);
3778 /* Register before most other device drivers.
3779 * Useful for remote debugging via physical DMA, e.g. using firescope. */
3780 fs_initcall(ohci1394_init);
3781 module_exit(ohci1394_cleanup);