[PARISC] Fix stifb with IOREMAP and a 64-bit kernel
[linux-2.6.22.y-op.git] / drivers / ieee1394 / ohci1394.c
blob19222878aae92943f5528d09e67582a517048024
1 /*
2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
28 * . Iso Receive
29 * . DMA mmap for iso receive
30 * . Config ROM generation
32 * Things implemented, but still in test phase:
33 * . Iso Transmit
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
36 * Things not implemented:
37 * . DMA error recovery
39 * Known bugs:
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
45 * Acknowledgments:
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
68 * Leon van Stuivenberg <leonvs@iae.nl>
69 * . Bug fixes
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
85 #include <linux/config.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/slab.h>
89 #include <linux/interrupt.h>
90 #include <linux/wait.h>
91 #include <linux/errno.h>
92 #include <linux/module.h>
93 #include <linux/moduleparam.h>
94 #include <linux/pci.h>
95 #include <linux/fs.h>
96 #include <linux/poll.h>
97 #include <asm/byteorder.h>
98 #include <asm/atomic.h>
99 #include <asm/uaccess.h>
100 #include <linux/delay.h>
101 #include <linux/spinlock.h>
103 #include <asm/pgtable.h>
104 #include <asm/page.h>
105 #include <asm/irq.h>
106 #include <linux/sched.h>
107 #include <linux/types.h>
108 #include <linux/vmalloc.h>
109 #include <linux/init.h>
111 #ifdef CONFIG_PPC_PMAC
112 #include <asm/machdep.h>
113 #include <asm/pmac_feature.h>
114 #include <asm/prom.h>
115 #include <asm/pci-bridge.h>
116 #endif
118 #include "csr1212.h"
119 #include "ieee1394.h"
120 #include "ieee1394_types.h"
121 #include "hosts.h"
122 #include "dma.h"
123 #include "iso.h"
124 #include "ieee1394_core.h"
125 #include "highlevel.h"
126 #include "ohci1394.h"
128 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
129 #define OHCI1394_DEBUG
130 #endif
132 #ifdef DBGMSG
133 #undef DBGMSG
134 #endif
136 #ifdef OHCI1394_DEBUG
137 #define DBGMSG(fmt, args...) \
138 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
139 #else
140 #define DBGMSG(fmt, args...)
141 #endif
143 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
144 #define OHCI_DMA_ALLOC(fmt, args...) \
145 HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
146 ++global_outstanding_dmas, ## args)
147 #define OHCI_DMA_FREE(fmt, args...) \
148 HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
149 --global_outstanding_dmas, ## args)
150 static int global_outstanding_dmas = 0;
151 #else
152 #define OHCI_DMA_ALLOC(fmt, args...)
153 #define OHCI_DMA_FREE(fmt, args...)
154 #endif
156 /* print general (card independent) information */
157 #define PRINT_G(level, fmt, args...) \
158 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
160 /* print card specific information */
161 #define PRINT(level, fmt, args...) \
162 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
164 /* Module Parameters */
165 static int phys_dma = 1;
166 module_param(phys_dma, int, 0644);
167 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
169 static void dma_trm_tasklet(unsigned long data);
170 static void dma_trm_reset(struct dma_trm_ctx *d);
172 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
173 enum context_type type, int ctx, int num_desc,
174 int buf_size, int split_buf_size, int context_base);
175 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
176 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
178 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
179 enum context_type type, int ctx, int num_desc,
180 int context_base);
182 static void ohci1394_pci_remove(struct pci_dev *pdev);
184 #ifndef __LITTLE_ENDIAN
185 static unsigned hdr_sizes[] =
187 3, /* TCODE_WRITEQ */
188 4, /* TCODE_WRITEB */
189 3, /* TCODE_WRITE_RESPONSE */
190 0, /* ??? */
191 3, /* TCODE_READQ */
192 4, /* TCODE_READB */
193 3, /* TCODE_READQ_RESPONSE */
194 4, /* TCODE_READB_RESPONSE */
195 1, /* TCODE_CYCLE_START (???) */
196 4, /* TCODE_LOCK_REQUEST */
197 2, /* TCODE_ISO_DATA */
198 4, /* TCODE_LOCK_RESPONSE */
201 /* Swap headers */
202 static inline void packet_swab(quadlet_t *data, int tcode)
204 size_t size = hdr_sizes[tcode];
206 if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
207 return;
209 while (size--)
210 data[size] = swab32(data[size]);
212 #else
213 /* Don't waste cycles on same sex byte swaps */
214 #define packet_swab(w,x)
215 #endif /* !LITTLE_ENDIAN */
217 /***********************************
218 * IEEE-1394 functionality section *
219 ***********************************/
221 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
223 int i;
224 unsigned long flags;
225 quadlet_t r;
227 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
229 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
231 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
232 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
233 break;
235 mdelay(1);
238 r = reg_read(ohci, OHCI1394_PhyControl);
240 if (i >= OHCI_LOOP_COUNT)
241 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
242 r, r & 0x80000000, i);
244 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
246 return (r & 0x00ff0000) >> 16;
249 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
251 int i;
252 unsigned long flags;
253 u32 r = 0;
255 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
257 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
259 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
260 r = reg_read(ohci, OHCI1394_PhyControl);
261 if (!(r & 0x00004000))
262 break;
264 mdelay(1);
267 if (i == OHCI_LOOP_COUNT)
268 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
269 r, r & 0x00004000, i);
271 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
273 return;
276 /* Or's our value into the current value */
277 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
279 u8 old;
281 old = get_phy_reg (ohci, addr);
282 old |= data;
283 set_phy_reg (ohci, addr, old);
285 return;
288 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
289 int phyid, int isroot)
291 quadlet_t *q = ohci->selfid_buf_cpu;
292 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
293 size_t size;
294 quadlet_t q0, q1;
296 /* Check status of self-id reception */
298 if (ohci->selfid_swap)
299 q0 = le32_to_cpu(q[0]);
300 else
301 q0 = q[0];
303 if ((self_id_count & 0x80000000) ||
304 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
305 PRINT(KERN_ERR,
306 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
307 self_id_count, q0, ohci->self_id_errors);
309 /* Tip by James Goodwin <jamesg@Filanet.com>:
310 * We had an error, generate another bus reset in response. */
311 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
312 set_phy_reg_mask (ohci, 1, 0x40);
313 ohci->self_id_errors++;
314 } else {
315 PRINT(KERN_ERR,
316 "Too many errors on SelfID error reception, giving up!");
318 return;
321 /* SelfID Ok, reset error counter. */
322 ohci->self_id_errors = 0;
324 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
325 q++;
327 while (size > 0) {
328 if (ohci->selfid_swap) {
329 q0 = le32_to_cpu(q[0]);
330 q1 = le32_to_cpu(q[1]);
331 } else {
332 q0 = q[0];
333 q1 = q[1];
336 if (q0 == ~q1) {
337 DBGMSG ("SelfID packet 0x%x received", q0);
338 hpsb_selfid_received(host, cpu_to_be32(q0));
339 if (((q0 & 0x3f000000) >> 24) == phyid)
340 DBGMSG ("SelfID for this node is 0x%08x", q0);
341 } else {
342 PRINT(KERN_ERR,
343 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
345 q += 2;
346 size -= 2;
349 DBGMSG("SelfID complete");
351 return;
354 static void ohci_soft_reset(struct ti_ohci *ohci) {
355 int i;
357 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
359 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
360 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
361 break;
362 mdelay(1);
364 DBGMSG ("Soft reset finished");
368 /* Generate the dma receive prgs and start the context */
369 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
371 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
372 int i;
374 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
376 for (i=0; i<d->num_desc; i++) {
377 u32 c;
379 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
380 if (generate_irq)
381 c |= DMA_CTL_IRQ;
383 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
385 /* End of descriptor list? */
386 if (i + 1 < d->num_desc) {
387 d->prg_cpu[i]->branchAddress =
388 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
389 } else {
390 d->prg_cpu[i]->branchAddress =
391 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
394 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
395 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
398 d->buf_ind = 0;
399 d->buf_offset = 0;
401 if (d->type == DMA_CTX_ISO) {
402 /* Clear contextControl */
403 reg_write(ohci, d->ctrlClear, 0xffffffff);
405 /* Set bufferFill, isochHeader, multichannel for IR context */
406 reg_write(ohci, d->ctrlSet, 0xd0000000);
408 /* Set the context match register to match on all tags */
409 reg_write(ohci, d->ctxtMatch, 0xf0000000);
411 /* Clear the multi channel mask high and low registers */
412 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
413 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
415 /* Set up isoRecvIntMask to generate interrupts */
416 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
419 /* Tell the controller where the first AR program is */
420 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
422 /* Run context */
423 reg_write(ohci, d->ctrlSet, 0x00008000);
425 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
428 /* Initialize the dma transmit context */
429 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
431 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
433 /* Stop the context */
434 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
436 d->prg_ind = 0;
437 d->sent_ind = 0;
438 d->free_prgs = d->num_desc;
439 d->branchAddrPtr = NULL;
440 INIT_LIST_HEAD(&d->fifo_list);
441 INIT_LIST_HEAD(&d->pending_list);
443 if (d->type == DMA_CTX_ISO) {
444 /* enable interrupts */
445 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
448 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
451 /* Count the number of available iso contexts */
452 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
454 int i,ctx=0;
455 u32 tmp;
457 reg_write(ohci, reg, 0xffffffff);
458 tmp = reg_read(ohci, reg);
460 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
462 /* Count the number of contexts */
463 for (i=0; i<32; i++) {
464 if (tmp & 1) ctx++;
465 tmp >>= 1;
467 return ctx;
470 /* Global initialization */
471 static void ohci_initialize(struct ti_ohci *ohci)
473 char irq_buf[16];
474 quadlet_t buf;
475 int num_ports, i;
477 spin_lock_init(&ohci->phy_reg_lock);
479 /* Put some defaults to these undefined bus options */
480 buf = reg_read(ohci, OHCI1394_BusOptions);
481 buf |= 0x60000000; /* Enable CMC and ISC */
482 if (hpsb_disable_irm)
483 buf &= ~0x80000000;
484 else
485 buf |= 0x80000000; /* Enable IRMC */
486 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
487 buf &= ~0x18000000; /* Disable PMC and BMC */
488 reg_write(ohci, OHCI1394_BusOptions, buf);
490 /* Set the bus number */
491 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
493 /* Enable posted writes */
494 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
496 /* Clear link control register */
497 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
499 /* Enable cycle timer and cycle master and set the IRM
500 * contender bit in our self ID packets if appropriate. */
501 reg_write(ohci, OHCI1394_LinkControlSet,
502 OHCI1394_LinkControl_CycleTimerEnable |
503 OHCI1394_LinkControl_CycleMaster);
504 i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
505 if (hpsb_disable_irm)
506 i &= ~PHY_04_CONTENDER;
507 else
508 i |= PHY_04_CONTENDER;
509 set_phy_reg(ohci, 4, i);
511 /* Set up self-id dma buffer */
512 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
514 /* enable self-id and phys */
515 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
516 OHCI1394_LinkControl_RcvPhyPkt);
518 /* Set the Config ROM mapping register */
519 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
521 /* Now get our max packet size */
522 ohci->max_packet_size =
523 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
525 /* Don't accept phy packets into AR request context */
526 reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
528 /* Clear the interrupt mask */
529 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
530 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
532 /* Clear the interrupt mask */
533 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
534 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
536 /* Initialize AR dma */
537 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
538 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
540 /* Initialize AT dma */
541 initialize_dma_trm_ctx(&ohci->at_req_context);
542 initialize_dma_trm_ctx(&ohci->at_resp_context);
544 /* Initialize IR Legacy DMA channel mask */
545 ohci->ir_legacy_channels = 0;
547 /* Accept AR requests from all nodes */
548 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
550 /* Set the address range of the physical response unit.
551 * Most controllers do not implement it as a writable register though.
552 * They will keep a hardwired offset of 0x00010000 and show 0x0 as
553 * register content.
554 * To actually enable physical responses is the job of our interrupt
555 * handler which programs the physical request filter. */
556 reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000);
558 DBGMSG("physUpperBoundOffset=%08x",
559 reg_read(ohci, OHCI1394_PhyUpperBound));
561 /* Specify AT retries */
562 reg_write(ohci, OHCI1394_ATRetries,
563 OHCI1394_MAX_AT_REQ_RETRIES |
564 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
565 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
567 /* We don't want hardware swapping */
568 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
570 /* Enable interrupts */
571 reg_write(ohci, OHCI1394_IntMaskSet,
572 OHCI1394_unrecoverableError |
573 OHCI1394_masterIntEnable |
574 OHCI1394_busReset |
575 OHCI1394_selfIDComplete |
576 OHCI1394_RSPkt |
577 OHCI1394_RQPkt |
578 OHCI1394_respTxComplete |
579 OHCI1394_reqTxComplete |
580 OHCI1394_isochRx |
581 OHCI1394_isochTx |
582 OHCI1394_postedWriteErr |
583 OHCI1394_cycleInconsistent);
585 /* Enable link */
586 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
588 buf = reg_read(ohci, OHCI1394_Version);
589 #ifndef __sparc__
590 sprintf (irq_buf, "%d", ohci->dev->irq);
591 #else
592 sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
593 #endif
594 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
595 "MMIO=[%lx-%lx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
596 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
597 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
598 pci_resource_start(ohci->dev, 0),
599 pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
600 ohci->max_packet_size,
601 ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
603 /* Check all of our ports to make sure that if anything is
604 * connected, we enable that port. */
605 num_ports = get_phy_reg(ohci, 2) & 0xf;
606 for (i = 0; i < num_ports; i++) {
607 unsigned int status;
609 set_phy_reg(ohci, 7, i);
610 status = get_phy_reg(ohci, 8);
612 if (status & 0x20)
613 set_phy_reg(ohci, 8, status & ~1);
616 /* Serial EEPROM Sanity check. */
617 if ((ohci->max_packet_size < 512) ||
618 (ohci->max_packet_size > 4096)) {
619 /* Serial EEPROM contents are suspect, set a sane max packet
620 * size and print the raw contents for bug reports if verbose
621 * debug is enabled. */
622 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
623 int i;
624 #endif
626 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
627 "attempting to setting max_packet_size to 512 bytes");
628 reg_write(ohci, OHCI1394_BusOptions,
629 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
630 ohci->max_packet_size = 512;
631 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
632 PRINT(KERN_DEBUG, " EEPROM Present: %d",
633 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
634 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
636 for (i = 0;
637 ((i < 1000) &&
638 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
639 udelay(10);
641 for (i = 0; i < 0x20; i++) {
642 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
643 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
644 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
646 #endif
651 * Insert a packet in the DMA fifo and generate the DMA prg
652 * FIXME: rewrite the program in order to accept packets crossing
653 * page boundaries.
654 * check also that a single dma descriptor doesn't cross a
655 * page boundary.
657 static void insert_packet(struct ti_ohci *ohci,
658 struct dma_trm_ctx *d, struct hpsb_packet *packet)
660 u32 cycleTimer;
661 int idx = d->prg_ind;
663 DBGMSG("Inserting packet for node " NODE_BUS_FMT
664 ", tlabel=%d, tcode=0x%x, speed=%d",
665 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
666 packet->tcode, packet->speed_code);
668 d->prg_cpu[idx]->begin.address = 0;
669 d->prg_cpu[idx]->begin.branchAddress = 0;
671 if (d->type == DMA_CTX_ASYNC_RESP) {
673 * For response packets, we need to put a timeout value in
674 * the 16 lower bits of the status... let's try 1 sec timeout
676 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
677 d->prg_cpu[idx]->begin.status = cpu_to_le32(
678 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
679 ((cycleTimer&0x01fff000)>>12));
681 DBGMSG("cycleTimer: %08x timeStamp: %08x",
682 cycleTimer, d->prg_cpu[idx]->begin.status);
683 } else
684 d->prg_cpu[idx]->begin.status = 0;
686 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
688 if (packet->type == hpsb_raw) {
689 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
690 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
691 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
692 } else {
693 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
694 (packet->header[0] & 0xFFFF);
696 if (packet->tcode == TCODE_ISO_DATA) {
697 /* Sending an async stream packet */
698 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
699 } else {
700 /* Sending a normal async request or response */
701 d->prg_cpu[idx]->data[1] =
702 (packet->header[1] & 0xFFFF) |
703 (packet->header[0] & 0xFFFF0000);
704 d->prg_cpu[idx]->data[2] = packet->header[2];
705 d->prg_cpu[idx]->data[3] = packet->header[3];
707 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
710 if (packet->data_size) { /* block transmit */
711 if (packet->tcode == TCODE_STREAM_DATA){
712 d->prg_cpu[idx]->begin.control =
713 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
714 DMA_CTL_IMMEDIATE | 0x8);
715 } else {
716 d->prg_cpu[idx]->begin.control =
717 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
718 DMA_CTL_IMMEDIATE | 0x10);
720 d->prg_cpu[idx]->end.control =
721 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
722 DMA_CTL_IRQ |
723 DMA_CTL_BRANCH |
724 packet->data_size);
726 * Check that the packet data buffer
727 * does not cross a page boundary.
729 * XXX Fix this some day. eth1394 seems to trigger
730 * it, but ignoring it doesn't seem to cause a
731 * problem.
733 #if 0
734 if (cross_bound((unsigned long)packet->data,
735 packet->data_size)>0) {
736 /* FIXME: do something about it */
737 PRINT(KERN_ERR,
738 "%s: packet data addr: %p size %Zd bytes "
739 "cross page boundary", __FUNCTION__,
740 packet->data, packet->data_size);
742 #endif
743 d->prg_cpu[idx]->end.address = cpu_to_le32(
744 pci_map_single(ohci->dev, packet->data,
745 packet->data_size,
746 PCI_DMA_TODEVICE));
747 OHCI_DMA_ALLOC("single, block transmit packet");
749 d->prg_cpu[idx]->end.branchAddress = 0;
750 d->prg_cpu[idx]->end.status = 0;
751 if (d->branchAddrPtr)
752 *(d->branchAddrPtr) =
753 cpu_to_le32(d->prg_bus[idx] | 0x3);
754 d->branchAddrPtr =
755 &(d->prg_cpu[idx]->end.branchAddress);
756 } else { /* quadlet transmit */
757 if (packet->type == hpsb_raw)
758 d->prg_cpu[idx]->begin.control =
759 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
760 DMA_CTL_IMMEDIATE |
761 DMA_CTL_IRQ |
762 DMA_CTL_BRANCH |
763 (packet->header_size + 4));
764 else
765 d->prg_cpu[idx]->begin.control =
766 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
767 DMA_CTL_IMMEDIATE |
768 DMA_CTL_IRQ |
769 DMA_CTL_BRANCH |
770 packet->header_size);
772 if (d->branchAddrPtr)
773 *(d->branchAddrPtr) =
774 cpu_to_le32(d->prg_bus[idx] | 0x2);
775 d->branchAddrPtr =
776 &(d->prg_cpu[idx]->begin.branchAddress);
779 } else { /* iso packet */
780 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
781 (packet->header[0] & 0xFFFF);
782 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
783 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
785 d->prg_cpu[idx]->begin.control =
786 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
787 DMA_CTL_IMMEDIATE | 0x8);
788 d->prg_cpu[idx]->end.control =
789 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
790 DMA_CTL_UPDATE |
791 DMA_CTL_IRQ |
792 DMA_CTL_BRANCH |
793 packet->data_size);
794 d->prg_cpu[idx]->end.address = cpu_to_le32(
795 pci_map_single(ohci->dev, packet->data,
796 packet->data_size, PCI_DMA_TODEVICE));
797 OHCI_DMA_ALLOC("single, iso transmit packet");
799 d->prg_cpu[idx]->end.branchAddress = 0;
800 d->prg_cpu[idx]->end.status = 0;
801 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
802 " begin=%08x %08x %08x %08x\n"
803 " %08x %08x %08x %08x\n"
804 " end =%08x %08x %08x %08x",
805 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
806 d->prg_cpu[idx]->begin.control,
807 d->prg_cpu[idx]->begin.address,
808 d->prg_cpu[idx]->begin.branchAddress,
809 d->prg_cpu[idx]->begin.status,
810 d->prg_cpu[idx]->data[0],
811 d->prg_cpu[idx]->data[1],
812 d->prg_cpu[idx]->data[2],
813 d->prg_cpu[idx]->data[3],
814 d->prg_cpu[idx]->end.control,
815 d->prg_cpu[idx]->end.address,
816 d->prg_cpu[idx]->end.branchAddress,
817 d->prg_cpu[idx]->end.status);
818 if (d->branchAddrPtr)
819 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
820 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
822 d->free_prgs--;
824 /* queue the packet in the appropriate context queue */
825 list_add_tail(&packet->driver_list, &d->fifo_list);
826 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
830 * This function fills the FIFO with the (eventual) pending packets
831 * and runs or wakes up the DMA prg if necessary.
833 * The function MUST be called with the d->lock held.
835 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
837 struct hpsb_packet *packet, *ptmp;
838 int idx = d->prg_ind;
839 int z = 0;
841 /* insert the packets into the dma fifo */
842 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
843 if (!d->free_prgs)
844 break;
846 /* For the first packet only */
847 if (!z)
848 z = (packet->data_size) ? 3 : 2;
850 /* Insert the packet */
851 list_del_init(&packet->driver_list);
852 insert_packet(ohci, d, packet);
855 /* Nothing must have been done, either no free_prgs or no packets */
856 if (z == 0)
857 return;
859 /* Is the context running ? (should be unless it is
860 the first packet to be sent in this context) */
861 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
862 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
864 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
865 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
867 /* Check that the node id is valid, and not 63 */
868 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
869 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
870 else
871 reg_write(ohci, d->ctrlSet, 0x8000);
872 } else {
873 /* Wake up the dma context if necessary */
874 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
875 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
877 /* do this always, to avoid race condition */
878 reg_write(ohci, d->ctrlSet, 0x1000);
881 return;
884 /* Transmission of an async or iso packet */
885 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
887 struct ti_ohci *ohci = host->hostdata;
888 struct dma_trm_ctx *d;
889 unsigned long flags;
891 if (packet->data_size > ohci->max_packet_size) {
892 PRINT(KERN_ERR,
893 "Transmit packet size %Zd is too big",
894 packet->data_size);
895 return -EOVERFLOW;
898 /* Decide whether we have an iso, a request, or a response packet */
899 if (packet->type == hpsb_raw)
900 d = &ohci->at_req_context;
901 else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
902 /* The legacy IT DMA context is initialized on first
903 * use. However, the alloc cannot be run from
904 * interrupt context, so we bail out if that is the
905 * case. I don't see anyone sending ISO packets from
906 * interrupt context anyway... */
908 if (ohci->it_legacy_context.ohci == NULL) {
909 if (in_interrupt()) {
910 PRINT(KERN_ERR,
911 "legacy IT context cannot be initialized during interrupt");
912 return -EINVAL;
915 if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
916 DMA_CTX_ISO, 0, IT_NUM_DESC,
917 OHCI1394_IsoXmitContextBase) < 0) {
918 PRINT(KERN_ERR,
919 "error initializing legacy IT context");
920 return -ENOMEM;
923 initialize_dma_trm_ctx(&ohci->it_legacy_context);
926 d = &ohci->it_legacy_context;
927 } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
928 d = &ohci->at_resp_context;
929 else
930 d = &ohci->at_req_context;
932 spin_lock_irqsave(&d->lock,flags);
934 list_add_tail(&packet->driver_list, &d->pending_list);
936 dma_trm_flush(ohci, d);
938 spin_unlock_irqrestore(&d->lock,flags);
940 return 0;
943 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
945 struct ti_ohci *ohci = host->hostdata;
946 int retval = 0;
947 unsigned long flags;
948 int phy_reg;
950 switch (cmd) {
951 case RESET_BUS:
952 switch (arg) {
953 case SHORT_RESET:
954 phy_reg = get_phy_reg(ohci, 5);
955 phy_reg |= 0x40;
956 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
957 break;
958 case LONG_RESET:
959 phy_reg = get_phy_reg(ohci, 1);
960 phy_reg |= 0x40;
961 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
962 break;
963 case SHORT_RESET_NO_FORCE_ROOT:
964 phy_reg = get_phy_reg(ohci, 1);
965 if (phy_reg & 0x80) {
966 phy_reg &= ~0x80;
967 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
970 phy_reg = get_phy_reg(ohci, 5);
971 phy_reg |= 0x40;
972 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
973 break;
974 case LONG_RESET_NO_FORCE_ROOT:
975 phy_reg = get_phy_reg(ohci, 1);
976 phy_reg &= ~0x80;
977 phy_reg |= 0x40;
978 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
979 break;
980 case SHORT_RESET_FORCE_ROOT:
981 phy_reg = get_phy_reg(ohci, 1);
982 if (!(phy_reg & 0x80)) {
983 phy_reg |= 0x80;
984 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
987 phy_reg = get_phy_reg(ohci, 5);
988 phy_reg |= 0x40;
989 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
990 break;
991 case LONG_RESET_FORCE_ROOT:
992 phy_reg = get_phy_reg(ohci, 1);
993 phy_reg |= 0xc0;
994 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
995 break;
996 default:
997 retval = -1;
999 break;
1001 case GET_CYCLE_COUNTER:
1002 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1003 break;
1005 case SET_CYCLE_COUNTER:
1006 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
1007 break;
1009 case SET_BUS_ID:
1010 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
1011 break;
1013 case ACT_CYCLE_MASTER:
1014 if (arg) {
1015 /* check if we are root and other nodes are present */
1016 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1017 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1019 * enable cycleTimer, cycleMaster
1021 DBGMSG("Cycle master enabled");
1022 reg_write(ohci, OHCI1394_LinkControlSet,
1023 OHCI1394_LinkControl_CycleTimerEnable |
1024 OHCI1394_LinkControl_CycleMaster);
1026 } else {
1027 /* disable cycleTimer, cycleMaster, cycleSource */
1028 reg_write(ohci, OHCI1394_LinkControlClear,
1029 OHCI1394_LinkControl_CycleTimerEnable |
1030 OHCI1394_LinkControl_CycleMaster |
1031 OHCI1394_LinkControl_CycleSource);
1033 break;
1035 case CANCEL_REQUESTS:
1036 DBGMSG("Cancel request received");
1037 dma_trm_reset(&ohci->at_req_context);
1038 dma_trm_reset(&ohci->at_resp_context);
1039 break;
1041 case ISO_LISTEN_CHANNEL:
1043 u64 mask;
1044 struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1045 int ir_legacy_active;
1047 if (arg<0 || arg>63) {
1048 PRINT(KERN_ERR,
1049 "%s: IS0 listen channel %d is out of range",
1050 __FUNCTION__, arg);
1051 return -EFAULT;
1054 mask = (u64)0x1<<arg;
1056 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1058 if (ohci->ISO_channel_usage & mask) {
1059 PRINT(KERN_ERR,
1060 "%s: IS0 listen channel %d is already used",
1061 __FUNCTION__, arg);
1062 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1063 return -EFAULT;
1066 ir_legacy_active = ohci->ir_legacy_channels;
1068 ohci->ISO_channel_usage |= mask;
1069 ohci->ir_legacy_channels |= mask;
1071 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1073 if (!ir_legacy_active) {
1074 if (ohci1394_register_iso_tasklet(ohci,
1075 &ohci->ir_legacy_tasklet) < 0) {
1076 PRINT(KERN_ERR, "No IR DMA context available");
1077 return -EBUSY;
1080 /* the IR context can be assigned to any DMA context
1081 * by ohci1394_register_iso_tasklet */
1082 d->ctx = ohci->ir_legacy_tasklet.context;
1083 d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1084 32*d->ctx;
1085 d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1086 32*d->ctx;
1087 d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1088 d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1090 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1092 if (printk_ratelimit())
1093 DBGMSG("IR legacy activated");
1096 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1098 if (arg>31)
1099 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1100 1<<(arg-32));
1101 else
1102 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1103 1<<arg);
1105 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1106 DBGMSG("Listening enabled on channel %d", arg);
1107 break;
1109 case ISO_UNLISTEN_CHANNEL:
1111 u64 mask;
1113 if (arg<0 || arg>63) {
1114 PRINT(KERN_ERR,
1115 "%s: IS0 unlisten channel %d is out of range",
1116 __FUNCTION__, arg);
1117 return -EFAULT;
1120 mask = (u64)0x1<<arg;
1122 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1124 if (!(ohci->ISO_channel_usage & mask)) {
1125 PRINT(KERN_ERR,
1126 "%s: IS0 unlisten channel %d is not used",
1127 __FUNCTION__, arg);
1128 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1129 return -EFAULT;
1132 ohci->ISO_channel_usage &= ~mask;
1133 ohci->ir_legacy_channels &= ~mask;
1135 if (arg>31)
1136 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1137 1<<(arg-32));
1138 else
1139 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1140 1<<arg);
1142 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1143 DBGMSG("Listening disabled on channel %d", arg);
1145 if (ohci->ir_legacy_channels == 0) {
1146 stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1147 DBGMSG("ISO legacy receive context stopped");
1150 break;
1152 default:
1153 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1154 cmd);
1155 break;
1157 return retval;
1160 /***********************************
1161 * rawiso ISO reception *
1162 ***********************************/
1165 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1166 buffer is split into "blocks" (regions described by one DMA
1167 descriptor). Each block must be one page or less in size, and
1168 must not cross a page boundary.
1170 There is one little wrinkle with buffer-fill mode: a packet that
1171 starts in the final block may wrap around into the first block. But
1172 the user API expects all packets to be contiguous. Our solution is
1173 to keep the very last page of the DMA buffer in reserve - if a
1174 packet spans the gap, we copy its tail into this page.
1177 struct ohci_iso_recv {
1178 struct ti_ohci *ohci;
1180 struct ohci1394_iso_tasklet task;
1181 int task_active;
1183 enum { BUFFER_FILL_MODE = 0,
1184 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1186 /* memory and PCI mapping for the DMA descriptors */
1187 struct dma_prog_region prog;
1188 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1190 /* how many DMA blocks fit in the buffer */
1191 unsigned int nblocks;
1193 /* stride of DMA blocks */
1194 unsigned int buf_stride;
1196 /* number of blocks to batch between interrupts */
1197 int block_irq_interval;
1199 /* block that DMA will finish next */
1200 int block_dma;
1202 /* (buffer-fill only) block that the reader will release next */
1203 int block_reader;
1205 /* (buffer-fill only) bytes of buffer the reader has released,
1206 less than one block */
1207 int released_bytes;
1209 /* (buffer-fill only) buffer offset at which the next packet will appear */
1210 int dma_offset;
1212 /* OHCI DMA context control registers */
1213 u32 ContextControlSet;
1214 u32 ContextControlClear;
1215 u32 CommandPtr;
1216 u32 ContextMatch;
1219 static void ohci_iso_recv_task(unsigned long data);
1220 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1221 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1222 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1223 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1225 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1227 struct ti_ohci *ohci = iso->host->hostdata;
1228 struct ohci_iso_recv *recv;
1229 int ctx;
1230 int ret = -ENOMEM;
1232 recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1233 if (!recv)
1234 return -ENOMEM;
1236 iso->hostdata = recv;
1237 recv->ohci = ohci;
1238 recv->task_active = 0;
1239 dma_prog_region_init(&recv->prog);
1240 recv->block = NULL;
1242 /* use buffer-fill mode, unless irq_interval is 1
1243 (note: multichannel requires buffer-fill) */
1245 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1246 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1247 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1248 } else {
1249 recv->dma_mode = BUFFER_FILL_MODE;
1252 /* set nblocks, buf_stride, block_irq_interval */
1254 if (recv->dma_mode == BUFFER_FILL_MODE) {
1255 recv->buf_stride = PAGE_SIZE;
1257 /* one block per page of data in the DMA buffer, minus the final guard page */
1258 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1259 if (recv->nblocks < 3) {
1260 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1261 goto err;
1264 /* iso->irq_interval is in packets - translate that to blocks */
1265 if (iso->irq_interval == 1)
1266 recv->block_irq_interval = 1;
1267 else
1268 recv->block_irq_interval = iso->irq_interval *
1269 ((recv->nblocks+1)/iso->buf_packets);
1270 if (recv->block_irq_interval*4 > recv->nblocks)
1271 recv->block_irq_interval = recv->nblocks/4;
1272 if (recv->block_irq_interval < 1)
1273 recv->block_irq_interval = 1;
1275 } else {
1276 int max_packet_size;
1278 recv->nblocks = iso->buf_packets;
1279 recv->block_irq_interval = iso->irq_interval;
1280 if (recv->block_irq_interval * 4 > iso->buf_packets)
1281 recv->block_irq_interval = iso->buf_packets / 4;
1282 if (recv->block_irq_interval < 1)
1283 recv->block_irq_interval = 1;
1285 /* choose a buffer stride */
1286 /* must be a power of 2, and <= PAGE_SIZE */
1288 max_packet_size = iso->buf_size / iso->buf_packets;
1290 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1291 recv->buf_stride *= 2);
1293 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1294 recv->buf_stride > PAGE_SIZE) {
1295 /* this shouldn't happen, but anyway... */
1296 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1297 goto err;
1301 recv->block_reader = 0;
1302 recv->released_bytes = 0;
1303 recv->block_dma = 0;
1304 recv->dma_offset = 0;
1306 /* size of DMA program = one descriptor per block */
1307 if (dma_prog_region_alloc(&recv->prog,
1308 sizeof(struct dma_cmd) * recv->nblocks,
1309 recv->ohci->dev))
1310 goto err;
1312 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1314 ohci1394_init_iso_tasklet(&recv->task,
1315 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1316 OHCI_ISO_RECEIVE,
1317 ohci_iso_recv_task, (unsigned long) iso);
1319 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1320 ret = -EBUSY;
1321 goto err;
1324 recv->task_active = 1;
1326 /* recv context registers are spaced 32 bytes apart */
1327 ctx = recv->task.context;
1328 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1329 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1330 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1331 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1333 if (iso->channel == -1) {
1334 /* clear multi-channel selection mask */
1335 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1336 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1339 /* write the DMA program */
1340 ohci_iso_recv_program(iso);
1342 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1343 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1344 recv->dma_mode == BUFFER_FILL_MODE ?
1345 "buffer-fill" : "packet-per-buffer",
1346 iso->buf_size/PAGE_SIZE, iso->buf_size,
1347 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1349 return 0;
1351 err:
1352 ohci_iso_recv_shutdown(iso);
1353 return ret;
1356 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1358 struct ohci_iso_recv *recv = iso->hostdata;
1360 /* disable interrupts */
1361 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1363 /* halt DMA */
1364 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1367 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1369 struct ohci_iso_recv *recv = iso->hostdata;
1371 if (recv->task_active) {
1372 ohci_iso_recv_stop(iso);
1373 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1374 recv->task_active = 0;
1377 dma_prog_region_free(&recv->prog);
1378 kfree(recv);
1379 iso->hostdata = NULL;
1382 /* set up a "gapped" ring buffer DMA program */
1383 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1385 struct ohci_iso_recv *recv = iso->hostdata;
1386 int blk;
1388 /* address of 'branch' field in previous DMA descriptor */
1389 u32 *prev_branch = NULL;
1391 for (blk = 0; blk < recv->nblocks; blk++) {
1392 u32 control;
1394 /* the DMA descriptor */
1395 struct dma_cmd *cmd = &recv->block[blk];
1397 /* offset of the DMA descriptor relative to the DMA prog buffer */
1398 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1400 /* offset of this packet's data within the DMA buffer */
1401 unsigned long buf_offset = blk * recv->buf_stride;
1403 if (recv->dma_mode == BUFFER_FILL_MODE) {
1404 control = 2 << 28; /* INPUT_MORE */
1405 } else {
1406 control = 3 << 28; /* INPUT_LAST */
1409 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1411 /* interrupt on last block, and at intervals */
1412 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1413 control |= 3 << 20; /* want interrupt */
1416 control |= 3 << 18; /* enable branch to address */
1417 control |= recv->buf_stride;
1419 cmd->control = cpu_to_le32(control);
1420 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1421 cmd->branchAddress = 0; /* filled in on next loop */
1422 cmd->status = cpu_to_le32(recv->buf_stride);
1424 /* link the previous descriptor to this one */
1425 if (prev_branch) {
1426 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1429 prev_branch = &cmd->branchAddress;
1432 /* the final descriptor's branch address and Z should be left at 0 */
1435 /* listen or unlisten to a specific channel (multi-channel mode only) */
1436 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1438 struct ohci_iso_recv *recv = iso->hostdata;
1439 int reg, i;
1441 if (channel < 32) {
1442 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1443 i = channel;
1444 } else {
1445 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1446 i = channel - 32;
1449 reg_write(recv->ohci, reg, (1 << i));
1451 /* issue a dummy read to force all PCI writes to be posted immediately */
1452 mb();
1453 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1456 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1458 struct ohci_iso_recv *recv = iso->hostdata;
1459 int i;
1461 for (i = 0; i < 64; i++) {
1462 if (mask & (1ULL << i)) {
1463 if (i < 32)
1464 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1465 else
1466 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1467 } else {
1468 if (i < 32)
1469 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1470 else
1471 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1475 /* issue a dummy read to force all PCI writes to be posted immediately */
1476 mb();
1477 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1480 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1482 struct ohci_iso_recv *recv = iso->hostdata;
1483 struct ti_ohci *ohci = recv->ohci;
1484 u32 command, contextMatch;
1486 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1487 wmb();
1489 /* always keep ISO headers */
1490 command = (1 << 30);
1492 if (recv->dma_mode == BUFFER_FILL_MODE)
1493 command |= (1 << 31);
1495 reg_write(recv->ohci, recv->ContextControlSet, command);
1497 /* match on specified tags */
1498 contextMatch = tag_mask << 28;
1500 if (iso->channel == -1) {
1501 /* enable multichannel reception */
1502 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1503 } else {
1504 /* listen on channel */
1505 contextMatch |= iso->channel;
1508 if (cycle != -1) {
1509 u32 seconds;
1511 /* enable cycleMatch */
1512 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1514 /* set starting cycle */
1515 cycle &= 0x1FFF;
1517 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1518 just snarf them from the current time */
1519 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1521 /* advance one second to give some extra time for DMA to start */
1522 seconds += 1;
1524 cycle |= (seconds & 3) << 13;
1526 contextMatch |= cycle << 12;
1529 if (sync != -1) {
1530 /* set sync flag on first DMA descriptor */
1531 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1532 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1534 /* match sync field */
1535 contextMatch |= (sync&0xf)<<8;
1538 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1540 /* address of first descriptor block */
1541 command = dma_prog_region_offset_to_bus(&recv->prog,
1542 recv->block_dma * sizeof(struct dma_cmd));
1543 command |= 1; /* Z=1 */
1545 reg_write(recv->ohci, recv->CommandPtr, command);
1547 /* enable interrupts */
1548 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1550 wmb();
1552 /* run */
1553 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1555 /* issue a dummy read of the cycle timer register to force
1556 all PCI writes to be posted immediately */
1557 mb();
1558 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1560 /* check RUN */
1561 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1562 PRINT(KERN_ERR,
1563 "Error starting IR DMA (ContextControl 0x%08x)\n",
1564 reg_read(recv->ohci, recv->ContextControlSet));
1565 return -1;
1568 return 0;
1571 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1573 /* re-use the DMA descriptor for the block */
1574 /* by linking the previous descriptor to it */
1576 int next_i = block;
1577 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1579 struct dma_cmd *next = &recv->block[next_i];
1580 struct dma_cmd *prev = &recv->block[prev_i];
1582 /* ignore out-of-range requests */
1583 if ((block < 0) || (block > recv->nblocks))
1584 return;
1586 /* 'next' becomes the new end of the DMA chain,
1587 so disable branch and enable interrupt */
1588 next->branchAddress = 0;
1589 next->control |= cpu_to_le32(3 << 20);
1590 next->status = cpu_to_le32(recv->buf_stride);
1592 /* link prev to next */
1593 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1594 sizeof(struct dma_cmd) * next_i)
1595 | 1); /* Z=1 */
1597 /* disable interrupt on previous DMA descriptor, except at intervals */
1598 if ((prev_i % recv->block_irq_interval) == 0) {
1599 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1600 } else {
1601 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1603 wmb();
1605 /* wake up DMA in case it fell asleep */
1606 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1609 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1610 struct hpsb_iso_packet_info *info)
1612 /* release the memory where the packet was */
1613 recv->released_bytes += info->total_len;
1615 /* have we released enough memory for one block? */
1616 while (recv->released_bytes > recv->buf_stride) {
1617 ohci_iso_recv_release_block(recv, recv->block_reader);
1618 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1619 recv->released_bytes -= recv->buf_stride;
1623 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1625 struct ohci_iso_recv *recv = iso->hostdata;
1626 if (recv->dma_mode == BUFFER_FILL_MODE) {
1627 ohci_iso_recv_bufferfill_release(recv, info);
1628 } else {
1629 ohci_iso_recv_release_block(recv, info - iso->infos);
1633 /* parse all packets from blocks that have been fully received */
1634 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1636 int wake = 0;
1637 int runaway = 0;
1638 struct ti_ohci *ohci = recv->ohci;
1640 while (1) {
1641 /* we expect the next parsable packet to begin at recv->dma_offset */
1642 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1644 unsigned int offset;
1645 unsigned short len, cycle, total_len;
1646 unsigned char channel, tag, sy;
1648 unsigned char *p = iso->data_buf.kvirt;
1650 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1652 /* don't loop indefinitely */
1653 if (runaway++ > 100000) {
1654 atomic_inc(&iso->overflows);
1655 PRINT(KERN_ERR,
1656 "IR DMA error - Runaway during buffer parsing!\n");
1657 break;
1660 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1661 if (this_block == recv->block_dma)
1662 break;
1664 wake = 1;
1666 /* parse data length, tag, channel, and sy */
1668 /* note: we keep our own local copies of 'len' and 'offset'
1669 so the user can't mess with them by poking in the mmap area */
1671 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1673 if (len > 4096) {
1674 PRINT(KERN_ERR,
1675 "IR DMA error - bogus 'len' value %u\n", len);
1678 channel = p[recv->dma_offset+1] & 0x3F;
1679 tag = p[recv->dma_offset+1] >> 6;
1680 sy = p[recv->dma_offset+0] & 0xF;
1682 /* advance to data payload */
1683 recv->dma_offset += 4;
1685 /* check for wrap-around */
1686 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1687 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1690 /* dma_offset now points to the first byte of the data payload */
1691 offset = recv->dma_offset;
1693 /* advance to xferStatus/timeStamp */
1694 recv->dma_offset += len;
1696 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1697 /* payload is padded to 4 bytes */
1698 if (len % 4) {
1699 recv->dma_offset += 4 - (len%4);
1700 total_len += 4 - (len%4);
1703 /* check for wrap-around */
1704 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1705 /* uh oh, the packet data wraps from the last
1706 to the first DMA block - make the packet
1707 contiguous by copying its "tail" into the
1708 guard page */
1710 int guard_off = recv->buf_stride*recv->nblocks;
1711 int tail_len = len - (guard_off - offset);
1713 if (tail_len > 0 && tail_len < recv->buf_stride) {
1714 memcpy(iso->data_buf.kvirt + guard_off,
1715 iso->data_buf.kvirt,
1716 tail_len);
1719 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1722 /* parse timestamp */
1723 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1724 cycle &= 0x1FFF;
1726 /* advance to next packet */
1727 recv->dma_offset += 4;
1729 /* check for wrap-around */
1730 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1731 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1734 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1737 if (wake)
1738 hpsb_iso_wake(iso);
1741 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1743 int loop;
1744 struct ti_ohci *ohci = recv->ohci;
1746 /* loop over all blocks */
1747 for (loop = 0; loop < recv->nblocks; loop++) {
1749 /* check block_dma to see if it's done */
1750 struct dma_cmd *im = &recv->block[recv->block_dma];
1752 /* check the DMA descriptor for new writes to xferStatus */
1753 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1755 /* rescount is the number of bytes *remaining to be written* in the block */
1756 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1758 unsigned char event = xferstatus & 0x1F;
1760 if (!event) {
1761 /* nothing has happened to this block yet */
1762 break;
1765 if (event != 0x11) {
1766 atomic_inc(&iso->overflows);
1767 PRINT(KERN_ERR,
1768 "IR DMA error - OHCI error code 0x%02x\n", event);
1771 if (rescount != 0) {
1772 /* the card is still writing to this block;
1773 we can't touch it until it's done */
1774 break;
1777 /* OK, the block is finished... */
1779 /* sync our view of the block */
1780 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1782 /* reset the DMA descriptor */
1783 im->status = recv->buf_stride;
1785 /* advance block_dma */
1786 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1788 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1789 atomic_inc(&iso->overflows);
1790 DBGMSG("ISO reception overflow - "
1791 "ran out of DMA blocks");
1795 /* parse any packets that have arrived */
1796 ohci_iso_recv_bufferfill_parse(iso, recv);
1799 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1801 int count;
1802 int wake = 0;
1803 struct ti_ohci *ohci = recv->ohci;
1805 /* loop over the entire buffer */
1806 for (count = 0; count < recv->nblocks; count++) {
1807 u32 packet_len = 0;
1809 /* pointer to the DMA descriptor */
1810 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1812 /* check the DMA descriptor for new writes to xferStatus */
1813 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1814 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1816 unsigned char event = xferstatus & 0x1F;
1818 if (!event) {
1819 /* this packet hasn't come in yet; we are done for now */
1820 goto out;
1823 if (event == 0x11) {
1824 /* packet received successfully! */
1826 /* rescount is the number of bytes *remaining* in the packet buffer,
1827 after the packet was written */
1828 packet_len = recv->buf_stride - rescount;
1830 } else if (event == 0x02) {
1831 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1832 } else if (event) {
1833 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1836 /* sync our view of the buffer */
1837 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1839 /* record the per-packet info */
1841 /* iso header is 8 bytes ahead of the data payload */
1842 unsigned char *hdr;
1844 unsigned int offset;
1845 unsigned short cycle;
1846 unsigned char channel, tag, sy;
1848 offset = iso->pkt_dma * recv->buf_stride;
1849 hdr = iso->data_buf.kvirt + offset;
1851 /* skip iso header */
1852 offset += 8;
1853 packet_len -= 8;
1855 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1856 channel = hdr[5] & 0x3F;
1857 tag = hdr[5] >> 6;
1858 sy = hdr[4] & 0xF;
1860 hpsb_iso_packet_received(iso, offset, packet_len,
1861 recv->buf_stride, cycle, channel, tag, sy);
1864 /* reset the DMA descriptor */
1865 il->status = recv->buf_stride;
1867 wake = 1;
1868 recv->block_dma = iso->pkt_dma;
1871 out:
1872 if (wake)
1873 hpsb_iso_wake(iso);
1876 static void ohci_iso_recv_task(unsigned long data)
1878 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1879 struct ohci_iso_recv *recv = iso->hostdata;
1881 if (recv->dma_mode == BUFFER_FILL_MODE)
1882 ohci_iso_recv_bufferfill_task(iso, recv);
1883 else
1884 ohci_iso_recv_packetperbuf_task(iso, recv);
1887 /***********************************
1888 * rawiso ISO transmission *
1889 ***********************************/
1891 struct ohci_iso_xmit {
1892 struct ti_ohci *ohci;
1893 struct dma_prog_region prog;
1894 struct ohci1394_iso_tasklet task;
1895 int task_active;
1897 u32 ContextControlSet;
1898 u32 ContextControlClear;
1899 u32 CommandPtr;
1902 /* transmission DMA program:
1903 one OUTPUT_MORE_IMMEDIATE for the IT header
1904 one OUTPUT_LAST for the buffer data */
1906 struct iso_xmit_cmd {
1907 struct dma_cmd output_more_immediate;
1908 u8 iso_hdr[8];
1909 u32 unused[2];
1910 struct dma_cmd output_last;
1913 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1914 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1915 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1916 static void ohci_iso_xmit_task(unsigned long data);
1918 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1920 struct ohci_iso_xmit *xmit;
1921 unsigned int prog_size;
1922 int ctx;
1923 int ret = -ENOMEM;
1925 xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1926 if (!xmit)
1927 return -ENOMEM;
1929 iso->hostdata = xmit;
1930 xmit->ohci = iso->host->hostdata;
1931 xmit->task_active = 0;
1933 dma_prog_region_init(&xmit->prog);
1935 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1937 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1938 goto err;
1940 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1941 ohci_iso_xmit_task, (unsigned long) iso);
1943 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1944 ret = -EBUSY;
1945 goto err;
1948 xmit->task_active = 1;
1950 /* xmit context registers are spaced 16 bytes apart */
1951 ctx = xmit->task.context;
1952 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1953 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1954 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1956 return 0;
1958 err:
1959 ohci_iso_xmit_shutdown(iso);
1960 return ret;
1963 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1965 struct ohci_iso_xmit *xmit = iso->hostdata;
1966 struct ti_ohci *ohci = xmit->ohci;
1968 /* disable interrupts */
1969 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1971 /* halt DMA */
1972 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1973 /* XXX the DMA context will lock up if you try to send too much data! */
1974 PRINT(KERN_ERR,
1975 "you probably exceeded the OHCI card's bandwidth limit - "
1976 "reload the module and reduce xmit bandwidth");
1980 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1982 struct ohci_iso_xmit *xmit = iso->hostdata;
1984 if (xmit->task_active) {
1985 ohci_iso_xmit_stop(iso);
1986 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1987 xmit->task_active = 0;
1990 dma_prog_region_free(&xmit->prog);
1991 kfree(xmit);
1992 iso->hostdata = NULL;
1995 static void ohci_iso_xmit_task(unsigned long data)
1997 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1998 struct ohci_iso_xmit *xmit = iso->hostdata;
1999 struct ti_ohci *ohci = xmit->ohci;
2000 int wake = 0;
2001 int count;
2003 /* check the whole buffer if necessary, starting at pkt_dma */
2004 for (count = 0; count < iso->buf_packets; count++) {
2005 int cycle;
2007 /* DMA descriptor */
2008 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
2010 /* check for new writes to xferStatus */
2011 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
2012 u8 event = xferstatus & 0x1F;
2014 if (!event) {
2015 /* packet hasn't been sent yet; we are done for now */
2016 break;
2019 if (event != 0x11)
2020 PRINT(KERN_ERR,
2021 "IT DMA error - OHCI error code 0x%02x\n", event);
2023 /* at least one packet went out, so wake up the writer */
2024 wake = 1;
2026 /* parse cycle */
2027 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
2029 /* tell the subsystem the packet has gone out */
2030 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
2032 /* reset the DMA descriptor for next time */
2033 cmd->output_last.status = 0;
2036 if (wake)
2037 hpsb_iso_wake(iso);
2040 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2042 struct ohci_iso_xmit *xmit = iso->hostdata;
2043 struct ti_ohci *ohci = xmit->ohci;
2045 int next_i, prev_i;
2046 struct iso_xmit_cmd *next, *prev;
2048 unsigned int offset;
2049 unsigned short len;
2050 unsigned char tag, sy;
2052 /* check that the packet doesn't cross a page boundary
2053 (we could allow this if we added OUTPUT_MORE descriptor support) */
2054 if (cross_bound(info->offset, info->len)) {
2055 PRINT(KERN_ERR,
2056 "rawiso xmit: packet %u crosses a page boundary",
2057 iso->first_packet);
2058 return -EINVAL;
2061 offset = info->offset;
2062 len = info->len;
2063 tag = info->tag;
2064 sy = info->sy;
2066 /* sync up the card's view of the buffer */
2067 dma_region_sync_for_device(&iso->data_buf, offset, len);
2069 /* append first_packet to the DMA chain */
2070 /* by linking the previous descriptor to it */
2071 /* (next will become the new end of the DMA chain) */
2073 next_i = iso->first_packet;
2074 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2076 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2077 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2079 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2080 memset(next, 0, sizeof(struct iso_xmit_cmd));
2081 next->output_more_immediate.control = cpu_to_le32(0x02000008);
2083 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2085 /* tcode = 0xA, and sy */
2086 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2088 /* tag and channel number */
2089 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2091 /* transmission speed */
2092 next->iso_hdr[2] = iso->speed & 0x7;
2094 /* payload size */
2095 next->iso_hdr[6] = len & 0xFF;
2096 next->iso_hdr[7] = len >> 8;
2098 /* set up the OUTPUT_LAST */
2099 next->output_last.control = cpu_to_le32(1 << 28);
2100 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2101 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2102 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2103 next->output_last.control |= cpu_to_le32(len);
2105 /* payload bus address */
2106 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2108 /* leave branchAddress at zero for now */
2110 /* re-write the previous DMA descriptor to chain to this one */
2112 /* set prev branch address to point to next (Z=3) */
2113 prev->output_last.branchAddress = cpu_to_le32(
2114 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2116 /* disable interrupt, unless required by the IRQ interval */
2117 if (prev_i % iso->irq_interval) {
2118 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2119 } else {
2120 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2123 wmb();
2125 /* wake DMA in case it is sleeping */
2126 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2128 /* issue a dummy read of the cycle timer to force all PCI
2129 writes to be posted immediately */
2130 mb();
2131 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2133 return 0;
2136 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2138 struct ohci_iso_xmit *xmit = iso->hostdata;
2139 struct ti_ohci *ohci = xmit->ohci;
2141 /* clear out the control register */
2142 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2143 wmb();
2145 /* address and length of first descriptor block (Z=3) */
2146 reg_write(xmit->ohci, xmit->CommandPtr,
2147 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2149 /* cycle match */
2150 if (cycle != -1) {
2151 u32 start = cycle & 0x1FFF;
2153 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2154 just snarf them from the current time */
2155 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2157 /* advance one second to give some extra time for DMA to start */
2158 seconds += 1;
2160 start |= (seconds & 3) << 13;
2162 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2165 /* enable interrupts */
2166 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2168 /* run */
2169 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2170 mb();
2172 /* wait 100 usec to give the card time to go active */
2173 udelay(100);
2175 /* check the RUN bit */
2176 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2177 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2178 reg_read(xmit->ohci, xmit->ContextControlSet));
2179 return -1;
2182 return 0;
2185 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2188 switch(cmd) {
2189 case XMIT_INIT:
2190 return ohci_iso_xmit_init(iso);
2191 case XMIT_START:
2192 return ohci_iso_xmit_start(iso, arg);
2193 case XMIT_STOP:
2194 ohci_iso_xmit_stop(iso);
2195 return 0;
2196 case XMIT_QUEUE:
2197 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2198 case XMIT_SHUTDOWN:
2199 ohci_iso_xmit_shutdown(iso);
2200 return 0;
2202 case RECV_INIT:
2203 return ohci_iso_recv_init(iso);
2204 case RECV_START: {
2205 int *args = (int*) arg;
2206 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2208 case RECV_STOP:
2209 ohci_iso_recv_stop(iso);
2210 return 0;
2211 case RECV_RELEASE:
2212 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2213 return 0;
2214 case RECV_FLUSH:
2215 ohci_iso_recv_task((unsigned long) iso);
2216 return 0;
2217 case RECV_SHUTDOWN:
2218 ohci_iso_recv_shutdown(iso);
2219 return 0;
2220 case RECV_LISTEN_CHANNEL:
2221 ohci_iso_recv_change_channel(iso, arg, 1);
2222 return 0;
2223 case RECV_UNLISTEN_CHANNEL:
2224 ohci_iso_recv_change_channel(iso, arg, 0);
2225 return 0;
2226 case RECV_SET_CHANNEL_MASK:
2227 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2228 return 0;
2230 default:
2231 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2232 cmd);
2233 break;
2235 return -EINVAL;
2238 /***************************************
2239 * IEEE-1394 functionality section END *
2240 ***************************************/
2243 /********************************************************
2244 * Global stuff (interrupt handler, init/shutdown code) *
2245 ********************************************************/
2247 static void dma_trm_reset(struct dma_trm_ctx *d)
2249 unsigned long flags;
2250 LIST_HEAD(packet_list);
2251 struct ti_ohci *ohci = d->ohci;
2252 struct hpsb_packet *packet, *ptmp;
2254 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2256 /* Lock the context, reset it and release it. Move the packets
2257 * that were pending in the context to packet_list and free
2258 * them after releasing the lock. */
2260 spin_lock_irqsave(&d->lock, flags);
2262 list_splice(&d->fifo_list, &packet_list);
2263 list_splice(&d->pending_list, &packet_list);
2264 INIT_LIST_HEAD(&d->fifo_list);
2265 INIT_LIST_HEAD(&d->pending_list);
2267 d->branchAddrPtr = NULL;
2268 d->sent_ind = d->prg_ind;
2269 d->free_prgs = d->num_desc;
2271 spin_unlock_irqrestore(&d->lock, flags);
2273 if (list_empty(&packet_list))
2274 return;
2276 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2278 /* Now process subsystem callbacks for the packets from this
2279 * context. */
2280 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2281 list_del_init(&packet->driver_list);
2282 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2286 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2287 quadlet_t rx_event,
2288 quadlet_t tx_event)
2290 struct ohci1394_iso_tasklet *t;
2291 unsigned long mask;
2292 unsigned long flags;
2294 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
2296 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2297 mask = 1 << t->context;
2299 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2300 tasklet_schedule(&t->tasklet);
2301 else if (rx_event & mask)
2302 tasklet_schedule(&t->tasklet);
2305 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
2308 static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2309 struct pt_regs *regs_are_unused)
2311 quadlet_t event, node_id;
2312 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2313 struct hpsb_host *host = ohci->host;
2314 int phyid = -1, isroot = 0;
2315 unsigned long flags;
2317 /* Read and clear the interrupt event register. Don't clear
2318 * the busReset event, though. This is done when we get the
2319 * selfIDComplete interrupt. */
2320 spin_lock_irqsave(&ohci->event_lock, flags);
2321 event = reg_read(ohci, OHCI1394_IntEventClear);
2322 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2323 spin_unlock_irqrestore(&ohci->event_lock, flags);
2325 if (!event)
2326 return IRQ_NONE;
2328 /* If event is ~(u32)0 cardbus card was ejected. In this case
2329 * we just return, and clean up in the ohci1394_pci_remove
2330 * function. */
2331 if (event == ~(u32) 0) {
2332 DBGMSG("Device removed.");
2333 return IRQ_NONE;
2336 DBGMSG("IntEvent: %08x", event);
2338 if (event & OHCI1394_unrecoverableError) {
2339 int ctx;
2340 PRINT(KERN_ERR, "Unrecoverable error!");
2342 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2343 PRINT(KERN_ERR, "Async Req Tx Context died: "
2344 "ctrl[%08x] cmdptr[%08x]",
2345 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2346 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2348 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2349 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2350 "ctrl[%08x] cmdptr[%08x]",
2351 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2352 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2354 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2355 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2356 "ctrl[%08x] cmdptr[%08x]",
2357 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2358 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2360 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2361 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2362 "ctrl[%08x] cmdptr[%08x]",
2363 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2364 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2366 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2367 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2368 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2369 "ctrl[%08x] cmdptr[%08x]", ctx,
2370 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2371 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2374 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2375 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2376 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2377 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2378 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2379 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2380 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2383 event &= ~OHCI1394_unrecoverableError;
2385 if (event & OHCI1394_postedWriteErr) {
2386 PRINT(KERN_ERR, "physical posted write error");
2387 /* no recovery strategy yet, had to involve protocol drivers */
2389 if (event & OHCI1394_cycleInconsistent) {
2390 /* We subscribe to the cycleInconsistent event only to
2391 * clear the corresponding event bit... otherwise,
2392 * isochronous cycleMatch DMA won't work. */
2393 DBGMSG("OHCI1394_cycleInconsistent");
2394 event &= ~OHCI1394_cycleInconsistent;
2396 if (event & OHCI1394_busReset) {
2397 /* The busReset event bit can't be cleared during the
2398 * selfID phase, so we disable busReset interrupts, to
2399 * avoid burying the cpu in interrupt requests. */
2400 spin_lock_irqsave(&ohci->event_lock, flags);
2401 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2403 if (ohci->check_busreset) {
2404 int loop_count = 0;
2406 udelay(10);
2408 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2409 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2411 spin_unlock_irqrestore(&ohci->event_lock, flags);
2412 udelay(10);
2413 spin_lock_irqsave(&ohci->event_lock, flags);
2415 /* The loop counter check is to prevent the driver
2416 * from remaining in this state forever. For the
2417 * initial bus reset, the loop continues for ever
2418 * and the system hangs, until some device is plugged-in
2419 * or out manually into a port! The forced reset seems
2420 * to solve this problem. This mainly effects nForce2. */
2421 if (loop_count > 10000) {
2422 ohci_devctl(host, RESET_BUS, LONG_RESET);
2423 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2424 loop_count = 0;
2427 loop_count++;
2430 spin_unlock_irqrestore(&ohci->event_lock, flags);
2431 if (!host->in_bus_reset) {
2432 DBGMSG("irq_handler: Bus reset requested");
2434 /* Subsystem call */
2435 hpsb_bus_reset(ohci->host);
2437 event &= ~OHCI1394_busReset;
2439 if (event & OHCI1394_reqTxComplete) {
2440 struct dma_trm_ctx *d = &ohci->at_req_context;
2441 DBGMSG("Got reqTxComplete interrupt "
2442 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2443 if (reg_read(ohci, d->ctrlSet) & 0x800)
2444 ohci1394_stop_context(ohci, d->ctrlClear,
2445 "reqTxComplete");
2446 else
2447 dma_trm_tasklet((unsigned long)d);
2448 //tasklet_schedule(&d->task);
2449 event &= ~OHCI1394_reqTxComplete;
2451 if (event & OHCI1394_respTxComplete) {
2452 struct dma_trm_ctx *d = &ohci->at_resp_context;
2453 DBGMSG("Got respTxComplete interrupt "
2454 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2455 if (reg_read(ohci, d->ctrlSet) & 0x800)
2456 ohci1394_stop_context(ohci, d->ctrlClear,
2457 "respTxComplete");
2458 else
2459 tasklet_schedule(&d->task);
2460 event &= ~OHCI1394_respTxComplete;
2462 if (event & OHCI1394_RQPkt) {
2463 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2464 DBGMSG("Got RQPkt interrupt status=0x%08X",
2465 reg_read(ohci, d->ctrlSet));
2466 if (reg_read(ohci, d->ctrlSet) & 0x800)
2467 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2468 else
2469 tasklet_schedule(&d->task);
2470 event &= ~OHCI1394_RQPkt;
2472 if (event & OHCI1394_RSPkt) {
2473 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2474 DBGMSG("Got RSPkt interrupt status=0x%08X",
2475 reg_read(ohci, d->ctrlSet));
2476 if (reg_read(ohci, d->ctrlSet) & 0x800)
2477 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2478 else
2479 tasklet_schedule(&d->task);
2480 event &= ~OHCI1394_RSPkt;
2482 if (event & OHCI1394_isochRx) {
2483 quadlet_t rx_event;
2485 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2486 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2487 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2488 event &= ~OHCI1394_isochRx;
2490 if (event & OHCI1394_isochTx) {
2491 quadlet_t tx_event;
2493 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2494 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2495 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2496 event &= ~OHCI1394_isochTx;
2498 if (event & OHCI1394_selfIDComplete) {
2499 if (host->in_bus_reset) {
2500 node_id = reg_read(ohci, OHCI1394_NodeID);
2502 if (!(node_id & 0x80000000)) {
2503 PRINT(KERN_ERR,
2504 "SelfID received, but NodeID invalid "
2505 "(probably new bus reset occurred): %08X",
2506 node_id);
2507 goto selfid_not_valid;
2510 phyid = node_id & 0x0000003f;
2511 isroot = (node_id & 0x40000000) != 0;
2513 DBGMSG("SelfID interrupt received "
2514 "(phyid %d, %s)", phyid,
2515 (isroot ? "root" : "not root"));
2517 handle_selfid(ohci, host, phyid, isroot);
2519 /* Clear the bus reset event and re-enable the
2520 * busReset interrupt. */
2521 spin_lock_irqsave(&ohci->event_lock, flags);
2522 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2523 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2524 spin_unlock_irqrestore(&ohci->event_lock, flags);
2526 /* Turn on phys dma reception.
2528 * TODO: Enable some sort of filtering management.
2530 if (phys_dma) {
2531 reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2532 0xffffffff);
2533 reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2534 0xffffffff);
2537 DBGMSG("PhyReqFilter=%08x%08x",
2538 reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2539 reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
2541 hpsb_selfid_complete(host, phyid, isroot);
2542 } else
2543 PRINT(KERN_ERR,
2544 "SelfID received outside of bus reset sequence");
2546 selfid_not_valid:
2547 event &= ~OHCI1394_selfIDComplete;
2550 /* Make sure we handle everything, just in case we accidentally
2551 * enabled an interrupt that we didn't write a handler for. */
2552 if (event)
2553 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2554 event);
2556 return IRQ_HANDLED;
2559 /* Put the buffer back into the dma context */
2560 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2562 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2563 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2565 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2566 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2567 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2568 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2570 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2571 * context program descriptors before it sees the wakeup bit set. */
2572 wmb();
2574 /* wake up the dma context if necessary */
2575 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2576 PRINT(KERN_INFO,
2577 "Waking dma ctx=%d ... processing is probably too slow",
2578 d->ctx);
2581 /* do this always, to avoid race condition */
2582 reg_write(ohci, d->ctrlSet, 0x1000);
2585 #define cond_le32_to_cpu(data, noswap) \
2586 (noswap ? data : le32_to_cpu(data))
2588 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2589 -1, 0, -1, 0, -1, -1, 16, -1};
2592 * Determine the length of a packet in the buffer
2593 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2595 static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2596 int offset, unsigned char tcode, int noswap)
2598 int length = -1;
2600 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2601 length = TCODE_SIZE[tcode];
2602 if (length == 0) {
2603 if (offset + 12 >= d->buf_size) {
2604 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2605 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2606 } else {
2607 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2609 length += 20;
2611 } else if (d->type == DMA_CTX_ISO) {
2612 /* Assumption: buffer fill mode with header/trailer */
2613 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2616 if (length > 0 && length % 4)
2617 length += 4 - (length % 4);
2619 return length;
2622 /* Tasklet that processes dma receive buffers */
2623 static void dma_rcv_tasklet (unsigned long data)
2625 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2626 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2627 unsigned int split_left, idx, offset, rescount;
2628 unsigned char tcode;
2629 int length, bytes_left, ack;
2630 unsigned long flags;
2631 quadlet_t *buf_ptr;
2632 char *split_ptr;
2633 char msg[256];
2635 spin_lock_irqsave(&d->lock, flags);
2637 idx = d->buf_ind;
2638 offset = d->buf_offset;
2639 buf_ptr = d->buf_cpu[idx] + offset/4;
2641 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2642 bytes_left = d->buf_size - rescount - offset;
2644 while (bytes_left > 0) {
2645 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2647 /* packet_length() will return < 4 for an error */
2648 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2650 if (length < 4) { /* something is wrong */
2651 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2652 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2653 d->ctx, length);
2654 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2655 spin_unlock_irqrestore(&d->lock, flags);
2656 return;
2659 /* The first case is where we have a packet that crosses
2660 * over more than one descriptor. The next case is where
2661 * it's all in the first descriptor. */
2662 if ((offset + length) > d->buf_size) {
2663 DBGMSG("Split packet rcv'd");
2664 if (length > d->split_buf_size) {
2665 ohci1394_stop_context(ohci, d->ctrlClear,
2666 "Split packet size exceeded");
2667 d->buf_ind = idx;
2668 d->buf_offset = offset;
2669 spin_unlock_irqrestore(&d->lock, flags);
2670 return;
2673 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2674 == d->buf_size) {
2675 /* Other part of packet not written yet.
2676 * this should never happen I think
2677 * anyway we'll get it on the next call. */
2678 PRINT(KERN_INFO,
2679 "Got only half a packet!");
2680 d->buf_ind = idx;
2681 d->buf_offset = offset;
2682 spin_unlock_irqrestore(&d->lock, flags);
2683 return;
2686 split_left = length;
2687 split_ptr = (char *)d->spb;
2688 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2689 split_left -= d->buf_size-offset;
2690 split_ptr += d->buf_size-offset;
2691 insert_dma_buffer(d, idx);
2692 idx = (idx+1) % d->num_desc;
2693 buf_ptr = d->buf_cpu[idx];
2694 offset=0;
2696 while (split_left >= d->buf_size) {
2697 memcpy(split_ptr,buf_ptr,d->buf_size);
2698 split_ptr += d->buf_size;
2699 split_left -= d->buf_size;
2700 insert_dma_buffer(d, idx);
2701 idx = (idx+1) % d->num_desc;
2702 buf_ptr = d->buf_cpu[idx];
2705 if (split_left > 0) {
2706 memcpy(split_ptr, buf_ptr, split_left);
2707 offset = split_left;
2708 buf_ptr += offset/4;
2710 } else {
2711 DBGMSG("Single packet rcv'd");
2712 memcpy(d->spb, buf_ptr, length);
2713 offset += length;
2714 buf_ptr += length/4;
2715 if (offset==d->buf_size) {
2716 insert_dma_buffer(d, idx);
2717 idx = (idx+1) % d->num_desc;
2718 buf_ptr = d->buf_cpu[idx];
2719 offset=0;
2723 /* We get one phy packet to the async descriptor for each
2724 * bus reset. We always ignore it. */
2725 if (tcode != OHCI1394_TCODE_PHY) {
2726 if (!ohci->no_swap_incoming)
2727 packet_swab(d->spb, tcode);
2728 DBGMSG("Packet received from node"
2729 " %d ack=0x%02X spd=%d tcode=0x%X"
2730 " length=%d ctx=%d tlabel=%d",
2731 (d->spb[1]>>16)&0x3f,
2732 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2733 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2734 tcode, length, d->ctx,
2735 (cond_le32_to_cpu(d->spb[0], ohci->no_swap_incoming)>>10)&0x3f);
2737 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2738 == 0x11) ? 1 : 0;
2740 hpsb_packet_received(ohci->host, d->spb,
2741 length-4, ack);
2743 #ifdef OHCI1394_DEBUG
2744 else
2745 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2746 d->ctx);
2747 #endif
2749 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2751 bytes_left = d->buf_size - rescount - offset;
2755 d->buf_ind = idx;
2756 d->buf_offset = offset;
2758 spin_unlock_irqrestore(&d->lock, flags);
2761 /* Bottom half that processes sent packets */
2762 static void dma_trm_tasklet (unsigned long data)
2764 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2765 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2766 struct hpsb_packet *packet, *ptmp;
2767 unsigned long flags;
2768 u32 status, ack;
2769 size_t datasize;
2771 spin_lock_irqsave(&d->lock, flags);
2773 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2774 datasize = packet->data_size;
2775 if (datasize && packet->type != hpsb_raw)
2776 status = le32_to_cpu(
2777 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2778 else
2779 status = le32_to_cpu(
2780 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2782 if (status == 0)
2783 /* this packet hasn't been sent yet*/
2784 break;
2786 #ifdef OHCI1394_DEBUG
2787 if (datasize)
2788 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2789 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2790 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2791 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2792 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2793 status&0x1f, (status>>5)&0x3,
2794 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2795 d->ctx);
2796 else
2797 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2798 "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2799 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2800 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2801 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2802 status&0x1f, (status>>5)&0x3,
2803 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2804 d->ctx);
2805 else
2806 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2807 "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2808 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2809 >>16)&0x3f,
2810 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2811 >>4)&0xf,
2812 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2813 >>10)&0x3f,
2814 status&0x1f, (status>>5)&0x3,
2815 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2816 d->ctx);
2817 #endif
2819 if (status & 0x10) {
2820 ack = status & 0xf;
2821 } else {
2822 switch (status & 0x1f) {
2823 case EVT_NO_STATUS: /* that should never happen */
2824 case EVT_RESERVED_A: /* that should never happen */
2825 case EVT_LONG_PACKET: /* that should never happen */
2826 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2827 ack = ACKX_SEND_ERROR;
2828 break;
2829 case EVT_MISSING_ACK:
2830 ack = ACKX_TIMEOUT;
2831 break;
2832 case EVT_UNDERRUN:
2833 ack = ACKX_SEND_ERROR;
2834 break;
2835 case EVT_OVERRUN: /* that should never happen */
2836 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2837 ack = ACKX_SEND_ERROR;
2838 break;
2839 case EVT_DESCRIPTOR_READ:
2840 case EVT_DATA_READ:
2841 case EVT_DATA_WRITE:
2842 ack = ACKX_SEND_ERROR;
2843 break;
2844 case EVT_BUS_RESET: /* that should never happen */
2845 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2846 ack = ACKX_SEND_ERROR;
2847 break;
2848 case EVT_TIMEOUT:
2849 ack = ACKX_TIMEOUT;
2850 break;
2851 case EVT_TCODE_ERR:
2852 ack = ACKX_SEND_ERROR;
2853 break;
2854 case EVT_RESERVED_B: /* that should never happen */
2855 case EVT_RESERVED_C: /* that should never happen */
2856 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2857 ack = ACKX_SEND_ERROR;
2858 break;
2859 case EVT_UNKNOWN:
2860 case EVT_FLUSHED:
2861 ack = ACKX_SEND_ERROR;
2862 break;
2863 default:
2864 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2865 ack = ACKX_SEND_ERROR;
2866 BUG();
2870 list_del_init(&packet->driver_list);
2871 hpsb_packet_sent(ohci->host, packet, ack);
2873 if (datasize) {
2874 pci_unmap_single(ohci->dev,
2875 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2876 datasize, PCI_DMA_TODEVICE);
2877 OHCI_DMA_FREE("single Xmit data packet");
2880 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2881 d->free_prgs++;
2884 dma_trm_flush(ohci, d);
2886 spin_unlock_irqrestore(&d->lock, flags);
2889 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2891 if (d->ctrlClear) {
2892 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2894 if (d->type == DMA_CTX_ISO) {
2895 /* disable interrupts */
2896 reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2897 ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2898 } else {
2899 tasklet_kill(&d->task);
2905 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2907 int i;
2908 struct ti_ohci *ohci = d->ohci;
2910 if (ohci == NULL)
2911 return;
2913 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2915 if (d->buf_cpu) {
2916 for (i=0; i<d->num_desc; i++)
2917 if (d->buf_cpu[i] && d->buf_bus[i]) {
2918 pci_free_consistent(
2919 ohci->dev, d->buf_size,
2920 d->buf_cpu[i], d->buf_bus[i]);
2921 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2923 kfree(d->buf_cpu);
2924 kfree(d->buf_bus);
2926 if (d->prg_cpu) {
2927 for (i=0; i<d->num_desc; i++)
2928 if (d->prg_cpu[i] && d->prg_bus[i]) {
2929 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2930 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2932 pci_pool_destroy(d->prg_pool);
2933 OHCI_DMA_FREE("dma_rcv prg pool");
2934 kfree(d->prg_cpu);
2935 kfree(d->prg_bus);
2937 kfree(d->spb);
2939 /* Mark this context as freed. */
2940 d->ohci = NULL;
2943 static int
2944 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2945 enum context_type type, int ctx, int num_desc,
2946 int buf_size, int split_buf_size, int context_base)
2948 int i, len;
2949 static int num_allocs;
2950 static char pool_name[20];
2952 d->ohci = ohci;
2953 d->type = type;
2954 d->ctx = ctx;
2956 d->num_desc = num_desc;
2957 d->buf_size = buf_size;
2958 d->split_buf_size = split_buf_size;
2960 d->ctrlSet = 0;
2961 d->ctrlClear = 0;
2962 d->cmdPtr = 0;
2964 d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2965 d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
2967 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2968 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2969 free_dma_rcv_ctx(d);
2970 return -ENOMEM;
2973 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2974 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
2976 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2977 PRINT(KERN_ERR, "Failed to allocate dma prg");
2978 free_dma_rcv_ctx(d);
2979 return -ENOMEM;
2982 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2984 if (d->spb == NULL) {
2985 PRINT(KERN_ERR, "Failed to allocate split buffer");
2986 free_dma_rcv_ctx(d);
2987 return -ENOMEM;
2990 len = sprintf(pool_name, "ohci1394_rcv_prg");
2991 sprintf(pool_name+len, "%d", num_allocs);
2992 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2993 sizeof(struct dma_cmd), 4, 0);
2994 if(d->prg_pool == NULL)
2996 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2997 free_dma_rcv_ctx(d);
2998 return -ENOMEM;
3000 num_allocs++;
3002 OHCI_DMA_ALLOC("dma_rcv prg pool");
3004 for (i=0; i<d->num_desc; i++) {
3005 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
3006 d->buf_size,
3007 d->buf_bus+i);
3008 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
3010 if (d->buf_cpu[i] != NULL) {
3011 memset(d->buf_cpu[i], 0, d->buf_size);
3012 } else {
3013 PRINT(KERN_ERR,
3014 "Failed to allocate dma buffer");
3015 free_dma_rcv_ctx(d);
3016 return -ENOMEM;
3019 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3020 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
3022 if (d->prg_cpu[i] != NULL) {
3023 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
3024 } else {
3025 PRINT(KERN_ERR,
3026 "Failed to allocate dma prg");
3027 free_dma_rcv_ctx(d);
3028 return -ENOMEM;
3032 spin_lock_init(&d->lock);
3034 if (type == DMA_CTX_ISO) {
3035 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3036 OHCI_ISO_MULTICHANNEL_RECEIVE,
3037 dma_rcv_tasklet, (unsigned long) d);
3038 } else {
3039 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3040 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3041 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3043 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3046 return 0;
3049 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3051 int i;
3052 struct ti_ohci *ohci = d->ohci;
3054 if (ohci == NULL)
3055 return;
3057 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3059 if (d->prg_cpu) {
3060 for (i=0; i<d->num_desc; i++)
3061 if (d->prg_cpu[i] && d->prg_bus[i]) {
3062 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3063 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3065 pci_pool_destroy(d->prg_pool);
3066 OHCI_DMA_FREE("dma_trm prg pool");
3067 kfree(d->prg_cpu);
3068 kfree(d->prg_bus);
3071 /* Mark this context as freed. */
3072 d->ohci = NULL;
3075 static int
3076 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3077 enum context_type type, int ctx, int num_desc,
3078 int context_base)
3080 int i, len;
3081 static char pool_name[20];
3082 static int num_allocs=0;
3084 d->ohci = ohci;
3085 d->type = type;
3086 d->ctx = ctx;
3087 d->num_desc = num_desc;
3088 d->ctrlSet = 0;
3089 d->ctrlClear = 0;
3090 d->cmdPtr = 0;
3092 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
3093 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
3095 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3096 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3097 free_dma_trm_ctx(d);
3098 return -ENOMEM;
3101 len = sprintf(pool_name, "ohci1394_trm_prg");
3102 sprintf(pool_name+len, "%d", num_allocs);
3103 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3104 sizeof(struct at_dma_prg), 4, 0);
3105 if (d->prg_pool == NULL) {
3106 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3107 free_dma_trm_ctx(d);
3108 return -ENOMEM;
3110 num_allocs++;
3112 OHCI_DMA_ALLOC("dma_rcv prg pool");
3114 for (i = 0; i < d->num_desc; i++) {
3115 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3116 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3118 if (d->prg_cpu[i] != NULL) {
3119 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3120 } else {
3121 PRINT(KERN_ERR,
3122 "Failed to allocate at dma prg");
3123 free_dma_trm_ctx(d);
3124 return -ENOMEM;
3128 spin_lock_init(&d->lock);
3130 /* initialize tasklet */
3131 if (type == DMA_CTX_ISO) {
3132 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3133 dma_trm_tasklet, (unsigned long) d);
3134 if (ohci1394_register_iso_tasklet(ohci,
3135 &ohci->it_legacy_tasklet) < 0) {
3136 PRINT(KERN_ERR, "No IT DMA context available");
3137 free_dma_trm_ctx(d);
3138 return -EBUSY;
3141 /* IT can be assigned to any context by register_iso_tasklet */
3142 d->ctx = ohci->it_legacy_tasklet.context;
3143 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3144 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3145 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3146 } else {
3147 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3148 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3149 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3150 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3153 return 0;
3156 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3158 struct ti_ohci *ohci = host->hostdata;
3160 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3161 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3163 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3167 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3168 quadlet_t data, quadlet_t compare)
3170 struct ti_ohci *ohci = host->hostdata;
3171 int i;
3173 reg_write(ohci, OHCI1394_CSRData, data);
3174 reg_write(ohci, OHCI1394_CSRCompareData, compare);
3175 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3177 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3178 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3179 break;
3181 mdelay(1);
3184 return reg_read(ohci, OHCI1394_CSRData);
3187 static struct hpsb_host_driver ohci1394_driver = {
3188 .owner = THIS_MODULE,
3189 .name = OHCI1394_DRIVER_NAME,
3190 .set_hw_config_rom = ohci_set_hw_config_rom,
3191 .transmit_packet = ohci_transmit,
3192 .devctl = ohci_devctl,
3193 .isoctl = ohci_isoctl,
3194 .hw_csr_reg = ohci_hw_csr_reg,
3197 /***********************************
3198 * PCI Driver Interface functions *
3199 ***********************************/
3201 #define FAIL(err, fmt, args...) \
3202 do { \
3203 PRINT_G(KERN_ERR, fmt , ## args); \
3204 ohci1394_pci_remove(dev); \
3205 return err; \
3206 } while (0)
3208 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3209 const struct pci_device_id *ent)
3211 struct hpsb_host *host;
3212 struct ti_ohci *ohci; /* shortcut to currently handled device */
3213 unsigned long ohci_base;
3215 if (pci_enable_device(dev))
3216 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3217 pci_set_master(dev);
3219 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3220 if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3222 ohci = host->hostdata;
3223 ohci->dev = dev;
3224 ohci->host = host;
3225 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3226 host->pdev = dev;
3227 pci_set_drvdata(dev, ohci);
3229 /* We don't want hardware swapping */
3230 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3232 /* Some oddball Apple controllers do not order the selfid
3233 * properly, so we make up for it here. */
3234 #ifndef __LITTLE_ENDIAN
3235 /* XXX: Need a better way to check this. I'm wondering if we can
3236 * read the values of the OHCI1394_PCI_HCI_Control and the
3237 * noByteSwapData registers to see if they were not cleared to
3238 * zero. Should this work? Obviously it's not defined what these
3239 * registers will read when they aren't supported. Bleh! */
3240 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3241 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3242 ohci->no_swap_incoming = 1;
3243 ohci->selfid_swap = 0;
3244 } else
3245 ohci->selfid_swap = 1;
3246 #endif
3249 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3250 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3251 #endif
3253 /* These chipsets require a bit of extra care when checking after
3254 * a busreset. */
3255 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3256 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3257 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3258 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3259 ohci->check_busreset = 1;
3261 /* We hardwire the MMIO length, since some CardBus adaptors
3262 * fail to report the right length. Anyway, the ohci spec
3263 * clearly says it's 2kb, so this shouldn't be a problem. */
3264 ohci_base = pci_resource_start(dev, 0);
3265 if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
3266 PRINT(KERN_WARNING, "PCI resource length of %lx too small!",
3267 pci_resource_len(dev, 0));
3269 /* Seems PCMCIA handles this internally. Not sure why. Seems
3270 * pretty bogus to force a driver to special case this. */
3271 #ifndef PCMCIA
3272 if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3273 FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
3274 ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
3275 #endif
3276 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3278 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3279 if (ohci->registers == NULL)
3280 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3281 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3282 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3284 /* csr_config rom allocation */
3285 ohci->csr_config_rom_cpu =
3286 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3287 &ohci->csr_config_rom_bus);
3288 OHCI_DMA_ALLOC("consistent csr_config_rom");
3289 if (ohci->csr_config_rom_cpu == NULL)
3290 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3291 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3293 /* self-id dma buffer allocation */
3294 ohci->selfid_buf_cpu =
3295 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3296 &ohci->selfid_buf_bus);
3297 OHCI_DMA_ALLOC("consistent selfid_buf");
3299 if (ohci->selfid_buf_cpu == NULL)
3300 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3301 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3303 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3304 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3305 "8Kb boundary... may cause problems on some CXD3222 chip",
3306 ohci->selfid_buf_cpu);
3308 /* No self-id errors at startup */
3309 ohci->self_id_errors = 0;
3311 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3312 /* AR DMA request context allocation */
3313 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3314 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3315 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3316 OHCI1394_AsReqRcvContextBase) < 0)
3317 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3319 /* AR DMA response context allocation */
3320 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3321 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3322 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3323 OHCI1394_AsRspRcvContextBase) < 0)
3324 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3326 /* AT DMA request context */
3327 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3328 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3329 OHCI1394_AsReqTrContextBase) < 0)
3330 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3332 /* AT DMA response context */
3333 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3334 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3335 OHCI1394_AsRspTrContextBase) < 0)
3336 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3338 /* Start off with a soft reset, to clear everything to a sane
3339 * state. */
3340 ohci_soft_reset(ohci);
3342 /* Now enable LPS, which we need in order to start accessing
3343 * most of the registers. In fact, on some cards (ALI M5251),
3344 * accessing registers in the SClk domain without LPS enabled
3345 * will lock up the machine. Wait 50msec to make sure we have
3346 * full link enabled. */
3347 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3349 /* Disable and clear interrupts */
3350 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3351 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3353 mdelay(50);
3355 /* Determine the number of available IR and IT contexts. */
3356 ohci->nb_iso_rcv_ctx =
3357 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3358 ohci->nb_iso_xmit_ctx =
3359 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3361 /* Set the usage bits for non-existent contexts so they can't
3362 * be allocated */
3363 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3364 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3366 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3367 spin_lock_init(&ohci->iso_tasklet_list_lock);
3368 ohci->ISO_channel_usage = 0;
3369 spin_lock_init(&ohci->IR_channel_lock);
3371 /* Allocate the IR DMA context right here so we don't have
3372 * to do it in interrupt path - note that this doesn't
3373 * waste much memory and avoids the jugglery required to
3374 * allocate it in IRQ path. */
3375 if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3376 DMA_CTX_ISO, 0, IR_NUM_DESC,
3377 IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3378 OHCI1394_IsoRcvContextBase) < 0) {
3379 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3382 /* We hopefully don't have to pre-allocate IT DMA like we did
3383 * for IR DMA above. Allocate it on-demand and mark inactive. */
3384 ohci->it_legacy_context.ohci = NULL;
3385 spin_lock_init(&ohci->event_lock);
3388 * interrupts are disabled, all right, but... due to SA_SHIRQ we
3389 * might get called anyway. We'll see no event, of course, but
3390 * we need to get to that "no event", so enough should be initialized
3391 * by that point.
3393 if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3394 OHCI1394_DRIVER_NAME, ohci))
3395 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3397 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3398 ohci_initialize(ohci);
3400 /* Set certain csr values */
3401 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3402 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3403 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3404 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3405 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3407 /* Tell the highlevel this host is ready */
3408 if (hpsb_add_host(host))
3409 FAIL(-ENOMEM, "Failed to register host with highlevel");
3411 ohci->init_state = OHCI_INIT_DONE;
3413 return 0;
3414 #undef FAIL
3417 static void ohci1394_pci_remove(struct pci_dev *pdev)
3419 struct ti_ohci *ohci;
3420 struct device *dev;
3422 ohci = pci_get_drvdata(pdev);
3423 if (!ohci)
3424 return;
3426 dev = get_device(&ohci->host->device);
3428 switch (ohci->init_state) {
3429 case OHCI_INIT_DONE:
3430 hpsb_remove_host(ohci->host);
3432 /* Clear out BUS Options */
3433 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3434 reg_write(ohci, OHCI1394_BusOptions,
3435 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3436 0x00ff0000);
3437 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3439 case OHCI_INIT_HAVE_IRQ:
3440 /* Clear interrupt registers */
3441 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3442 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3443 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3444 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3445 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3446 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3448 /* Disable IRM Contender */
3449 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3451 /* Clear link control register */
3452 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3454 /* Let all other nodes know to ignore us */
3455 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3457 /* Soft reset before we start - this disables
3458 * interrupts and clears linkEnable and LPS. */
3459 ohci_soft_reset(ohci);
3460 free_irq(ohci->dev->irq, ohci);
3462 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3463 /* The ohci_soft_reset() stops all DMA contexts, so we
3464 * dont need to do this. */
3465 /* Free AR dma */
3466 free_dma_rcv_ctx(&ohci->ar_req_context);
3467 free_dma_rcv_ctx(&ohci->ar_resp_context);
3469 /* Free AT dma */
3470 free_dma_trm_ctx(&ohci->at_req_context);
3471 free_dma_trm_ctx(&ohci->at_resp_context);
3473 /* Free IR dma */
3474 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3476 /* Free IT dma */
3477 free_dma_trm_ctx(&ohci->it_legacy_context);
3479 /* Free IR legacy dma */
3480 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3483 case OHCI_INIT_HAVE_SELFID_BUFFER:
3484 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3485 ohci->selfid_buf_cpu,
3486 ohci->selfid_buf_bus);
3487 OHCI_DMA_FREE("consistent selfid_buf");
3489 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3490 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3491 ohci->csr_config_rom_cpu,
3492 ohci->csr_config_rom_bus);
3493 OHCI_DMA_FREE("consistent csr_config_rom");
3495 case OHCI_INIT_HAVE_IOMAPPING:
3496 iounmap(ohci->registers);
3498 case OHCI_INIT_HAVE_MEM_REGION:
3499 #ifndef PCMCIA
3500 release_mem_region(pci_resource_start(ohci->dev, 0),
3501 OHCI1394_REGISTER_SIZE);
3502 #endif
3504 #ifdef CONFIG_PPC_PMAC
3505 /* On UniNorth, power down the cable and turn off the chip
3506 * clock when the module is removed to save power on
3507 * laptops. Turning it back ON is done by the arch code when
3508 * pci_enable_device() is called */
3510 struct device_node* of_node;
3512 of_node = pci_device_to_OF_node(ohci->dev);
3513 if (of_node) {
3514 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3515 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3518 #endif /* CONFIG_PPC_PMAC */
3520 case OHCI_INIT_ALLOC_HOST:
3521 pci_set_drvdata(ohci->dev, NULL);
3524 if (dev)
3525 put_device(dev);
3529 static int ohci1394_pci_resume (struct pci_dev *pdev)
3531 #ifdef CONFIG_PPC_PMAC
3532 if (machine_is(powermac)) {
3533 struct device_node *of_node;
3535 /* Re-enable 1394 */
3536 of_node = pci_device_to_OF_node (pdev);
3537 if (of_node)
3538 pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
3540 #endif /* CONFIG_PPC_PMAC */
3542 pci_enable_device(pdev);
3544 return 0;
3548 static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3550 #ifdef CONFIG_PPC_PMAC
3551 if (machine_is(powermac)) {
3552 struct device_node *of_node;
3554 /* Disable 1394 */
3555 of_node = pci_device_to_OF_node (pdev);
3556 if (of_node)
3557 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3559 #endif
3561 return 0;
3565 #define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3567 static struct pci_device_id ohci1394_pci_tbl[] = {
3569 .class = PCI_CLASS_FIREWIRE_OHCI,
3570 .class_mask = PCI_ANY_ID,
3571 .vendor = PCI_ANY_ID,
3572 .device = PCI_ANY_ID,
3573 .subvendor = PCI_ANY_ID,
3574 .subdevice = PCI_ANY_ID,
3576 { 0, },
3579 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3581 static struct pci_driver ohci1394_pci_driver = {
3582 .name = OHCI1394_DRIVER_NAME,
3583 .id_table = ohci1394_pci_tbl,
3584 .probe = ohci1394_pci_probe,
3585 .remove = ohci1394_pci_remove,
3586 .resume = ohci1394_pci_resume,
3587 .suspend = ohci1394_pci_suspend,
3590 /***********************************
3591 * OHCI1394 Video Interface *
3592 ***********************************/
3594 /* essentially the only purpose of this code is to allow another
3595 module to hook into ohci's interrupt handler */
3597 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3599 int i=0;
3601 /* stop the channel program if it's still running */
3602 reg_write(ohci, reg, 0x8000);
3604 /* Wait until it effectively stops */
3605 while (reg_read(ohci, reg) & 0x400) {
3606 i++;
3607 if (i>5000) {
3608 PRINT(KERN_ERR,
3609 "Runaway loop while stopping context: %s...", msg ? msg : "");
3610 return 1;
3613 mb();
3614 udelay(10);
3616 if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3617 return 0;
3620 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3621 void (*func)(unsigned long), unsigned long data)
3623 tasklet_init(&tasklet->tasklet, func, data);
3624 tasklet->type = type;
3625 /* We init the tasklet->link field, so we can list_del() it
3626 * without worrying whether it was added to the list or not. */
3627 INIT_LIST_HEAD(&tasklet->link);
3630 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3631 struct ohci1394_iso_tasklet *tasklet)
3633 unsigned long flags, *usage;
3634 int n, i, r = -EBUSY;
3636 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3637 n = ohci->nb_iso_xmit_ctx;
3638 usage = &ohci->it_ctx_usage;
3640 else {
3641 n = ohci->nb_iso_rcv_ctx;
3642 usage = &ohci->ir_ctx_usage;
3644 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3645 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3646 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3647 return r;
3652 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3654 for (i = 0; i < n; i++)
3655 if (!test_and_set_bit(i, usage)) {
3656 tasklet->context = i;
3657 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3658 r = 0;
3659 break;
3662 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3664 return r;
3667 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3668 struct ohci1394_iso_tasklet *tasklet)
3670 unsigned long flags;
3672 tasklet_kill(&tasklet->tasklet);
3674 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3676 if (tasklet->type == OHCI_ISO_TRANSMIT)
3677 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3678 else {
3679 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3681 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3682 clear_bit(0, &ohci->ir_multichannel_used);
3686 list_del(&tasklet->link);
3688 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3691 EXPORT_SYMBOL(ohci1394_stop_context);
3692 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3693 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3694 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3696 /***********************************
3697 * General module initialization *
3698 ***********************************/
3700 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3701 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3702 MODULE_LICENSE("GPL");
3704 static void __exit ohci1394_cleanup (void)
3706 pci_unregister_driver(&ohci1394_pci_driver);
3709 static int __init ohci1394_init(void)
3711 return pci_register_driver(&ohci1394_pci_driver);
3714 module_init(ohci1394_init);
3715 module_exit(ohci1394_cleanup);