GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / ieee1394 / ohci1394.c
blob92da187495fe5b31aee59702d3d2d2e671ae6d90
1 /*
2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
28 * . Iso Receive
29 * . DMA mmap for iso receive
30 * . Config ROM generation
32 * Things implemented, but still in test phase:
33 * . Iso Transmit
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
36 * Things not implemented:
37 * . DMA error recovery
39 * Known bugs:
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
45 * Acknowledgments:
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
68 * Leon van Stuivenberg <leonvs@iae.nl>
69 * . Bug fixes
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
85 #include <linux/bitops.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/slab.h>
89 #include <linux/interrupt.h>
90 #include <linux/wait.h>
91 #include <linux/errno.h>
92 #include <linux/module.h>
93 #include <linux/moduleparam.h>
94 #include <linux/pci.h>
95 #include <linux/fs.h>
96 #include <linux/poll.h>
97 #include <asm/byteorder.h>
98 #include <asm/atomic.h>
99 #include <asm/uaccess.h>
100 #include <linux/delay.h>
101 #include <linux/spinlock.h>
103 #include <asm/pgtable.h>
104 #include <asm/page.h>
105 #include <asm/irq.h>
106 #include <linux/types.h>
107 #include <linux/vmalloc.h>
108 #include <linux/init.h>
110 #ifdef CONFIG_PPC_PMAC
111 #include <asm/machdep.h>
112 #include <asm/pmac_feature.h>
113 #include <asm/prom.h>
114 #include <asm/pci-bridge.h>
115 #endif
117 #include "csr1212.h"
118 #include "ieee1394.h"
119 #include "ieee1394_types.h"
120 #include "hosts.h"
121 #include "dma.h"
122 #include "iso.h"
123 #include "ieee1394_core.h"
124 #include "highlevel.h"
125 #include "ohci1394.h"
127 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
128 #define OHCI1394_DEBUG
129 #endif
131 #ifdef DBGMSG
132 #undef DBGMSG
133 #endif
135 #ifdef OHCI1394_DEBUG
136 #define DBGMSG(fmt, args...) \
137 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
138 #else
139 #define DBGMSG(fmt, args...) do {} while (0)
140 #endif
142 /* print general (card independent) information */
143 #define PRINT_G(level, fmt, args...) \
144 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
146 /* print card specific information */
147 #define PRINT(level, fmt, args...) \
148 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
150 /* Module Parameters */
151 static int phys_dma = 1;
152 module_param(phys_dma, int, 0444);
153 MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
155 static void dma_trm_tasklet(unsigned long data);
156 static void dma_trm_reset(struct dma_trm_ctx *d);
158 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
159 enum context_type type, int ctx, int num_desc,
160 int buf_size, int split_buf_size, int context_base);
161 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
163 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
164 enum context_type type, int ctx, int num_desc,
165 int context_base);
167 static void ohci1394_pci_remove(struct pci_dev *pdev);
169 #ifndef __LITTLE_ENDIAN
170 static const size_t hdr_sizes[] = {
171 3, /* TCODE_WRITEQ */
172 4, /* TCODE_WRITEB */
173 3, /* TCODE_WRITE_RESPONSE */
174 0, /* reserved */
175 3, /* TCODE_READQ */
176 4, /* TCODE_READB */
177 3, /* TCODE_READQ_RESPONSE */
178 4, /* TCODE_READB_RESPONSE */
179 1, /* TCODE_CYCLE_START */
180 4, /* TCODE_LOCK_REQUEST */
181 2, /* TCODE_ISO_DATA */
182 4, /* TCODE_LOCK_RESPONSE */
183 /* rest is reserved or link-internal */
186 static inline void header_le32_to_cpu(quadlet_t *data, unsigned char tcode)
188 size_t size;
190 if (unlikely(tcode >= ARRAY_SIZE(hdr_sizes)))
191 return;
193 size = hdr_sizes[tcode];
194 while (size--)
195 data[size] = le32_to_cpu(data[size]);
197 #else
198 #define header_le32_to_cpu(w,x) do {} while (0)
199 #endif /* !LITTLE_ENDIAN */
201 /***********************************
202 * IEEE-1394 functionality section *
203 ***********************************/
205 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
207 int i;
208 unsigned long flags;
209 quadlet_t r;
211 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
213 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
215 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
216 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
217 break;
219 mdelay(1);
222 r = reg_read(ohci, OHCI1394_PhyControl);
224 if (i >= OHCI_LOOP_COUNT)
225 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
226 r, r & 0x80000000, i);
228 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
230 return (r & 0x00ff0000) >> 16;
233 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
235 int i;
236 unsigned long flags;
237 u32 r = 0;
239 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
241 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
243 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
244 r = reg_read(ohci, OHCI1394_PhyControl);
245 if (!(r & 0x00004000))
246 break;
248 mdelay(1);
251 if (i == OHCI_LOOP_COUNT)
252 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
253 r, r & 0x00004000, i);
255 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
257 return;
260 /* Or's our value into the current value */
261 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
263 u8 old;
265 old = get_phy_reg (ohci, addr);
266 old |= data;
267 set_phy_reg (ohci, addr, old);
269 return;
272 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
273 int phyid, int isroot)
275 quadlet_t *q = ohci->selfid_buf_cpu;
276 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
277 size_t size;
278 quadlet_t q0, q1;
280 /* Check status of self-id reception */
282 if (ohci->selfid_swap)
283 q0 = le32_to_cpu(q[0]);
284 else
285 q0 = q[0];
287 if ((self_id_count & 0x80000000) ||
288 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
289 PRINT(KERN_ERR,
290 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
291 self_id_count, q0, ohci->self_id_errors);
293 /* Tip by James Goodwin <jamesg@Filanet.com>:
294 * We had an error, generate another bus reset in response. */
295 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
296 set_phy_reg_mask (ohci, 1, 0x40);
297 ohci->self_id_errors++;
298 } else {
299 PRINT(KERN_ERR,
300 "Too many errors on SelfID error reception, giving up!");
302 return;
305 /* SelfID Ok, reset error counter. */
306 ohci->self_id_errors = 0;
308 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
309 q++;
311 while (size > 0) {
312 if (ohci->selfid_swap) {
313 q0 = le32_to_cpu(q[0]);
314 q1 = le32_to_cpu(q[1]);
315 } else {
316 q0 = q[0];
317 q1 = q[1];
320 if (q0 == ~q1) {
321 DBGMSG ("SelfID packet 0x%x received", q0);
322 hpsb_selfid_received(host, cpu_to_be32(q0));
323 if (((q0 & 0x3f000000) >> 24) == phyid)
324 DBGMSG ("SelfID for this node is 0x%08x", q0);
325 } else {
326 PRINT(KERN_ERR,
327 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
329 q += 2;
330 size -= 2;
333 DBGMSG("SelfID complete");
335 return;
338 static void ohci_soft_reset(struct ti_ohci *ohci) {
339 int i;
341 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
343 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
344 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
345 break;
346 mdelay(1);
348 DBGMSG ("Soft reset finished");
352 /* Generate the dma receive prgs and start the context */
353 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
355 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
356 int i;
358 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
360 for (i=0; i<d->num_desc; i++) {
361 u32 c;
363 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
364 if (generate_irq)
365 c |= DMA_CTL_IRQ;
367 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
369 /* End of descriptor list? */
370 if (i + 1 < d->num_desc) {
371 d->prg_cpu[i]->branchAddress =
372 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
373 } else {
374 d->prg_cpu[i]->branchAddress =
375 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
378 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
379 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
382 d->buf_ind = 0;
383 d->buf_offset = 0;
385 if (d->type == DMA_CTX_ISO) {
386 /* Clear contextControl */
387 reg_write(ohci, d->ctrlClear, 0xffffffff);
389 /* Set bufferFill, isochHeader, multichannel for IR context */
390 reg_write(ohci, d->ctrlSet, 0xd0000000);
392 /* Set the context match register to match on all tags */
393 reg_write(ohci, d->ctxtMatch, 0xf0000000);
395 /* Clear the multi channel mask high and low registers */
396 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
397 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
399 /* Set up isoRecvIntMask to generate interrupts */
400 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
403 /* Tell the controller where the first AR program is */
404 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
406 /* Run context */
407 reg_write(ohci, d->ctrlSet, 0x00008000);
409 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
412 /* Initialize the dma transmit context */
413 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
415 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
417 /* Stop the context */
418 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
420 d->prg_ind = 0;
421 d->sent_ind = 0;
422 d->free_prgs = d->num_desc;
423 d->branchAddrPtr = NULL;
424 INIT_LIST_HEAD(&d->fifo_list);
425 INIT_LIST_HEAD(&d->pending_list);
427 if (d->type == DMA_CTX_ISO) {
428 /* enable interrupts */
429 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
432 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
435 /* Count the number of available iso contexts */
436 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
438 u32 tmp;
440 reg_write(ohci, reg, 0xffffffff);
441 tmp = reg_read(ohci, reg);
443 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
445 /* Count the number of contexts */
446 return hweight32(tmp);
449 /* Global initialization */
450 static void ohci_initialize(struct ti_ohci *ohci)
452 quadlet_t buf;
453 int num_ports, i;
455 spin_lock_init(&ohci->phy_reg_lock);
457 /* Put some defaults to these undefined bus options */
458 buf = reg_read(ohci, OHCI1394_BusOptions);
459 buf |= 0x60000000; /* Enable CMC and ISC */
460 if (hpsb_disable_irm)
461 buf &= ~0x80000000;
462 else
463 buf |= 0x80000000; /* Enable IRMC */
464 buf &= ~0x00ff0000;
465 buf &= ~0x18000000; /* Disable PMC and BMC */
466 reg_write(ohci, OHCI1394_BusOptions, buf);
468 /* Set the bus number */
469 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
471 /* Enable posted writes */
472 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
474 /* Clear link control register */
475 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
477 /* Enable cycle timer and cycle master and set the IRM
478 * contender bit in our self ID packets if appropriate. */
479 reg_write(ohci, OHCI1394_LinkControlSet,
480 OHCI1394_LinkControl_CycleTimerEnable |
481 OHCI1394_LinkControl_CycleMaster);
482 i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
483 if (hpsb_disable_irm)
484 i &= ~PHY_04_CONTENDER;
485 else
486 i |= PHY_04_CONTENDER;
487 set_phy_reg(ohci, 4, i);
489 /* Set up self-id dma buffer */
490 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
492 /* enable self-id */
493 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID);
495 /* Set the Config ROM mapping register */
496 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
498 /* Now get our max packet size */
499 ohci->max_packet_size =
500 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
502 /* Clear the interrupt mask */
503 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
504 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
506 /* Clear the interrupt mask */
507 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
508 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
510 /* Initialize AR dma */
511 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
512 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
514 /* Initialize AT dma */
515 initialize_dma_trm_ctx(&ohci->at_req_context);
516 initialize_dma_trm_ctx(&ohci->at_resp_context);
518 /* Accept AR requests from all nodes */
519 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
521 /* Set the address range of the physical response unit.
522 * Most controllers do not implement it as a writable register though.
523 * They will keep a hardwired offset of 0x00010000 and show 0x0 as
524 * register content.
525 * To actually enable physical responses is the job of our interrupt
526 * handler which programs the physical request filter. */
527 reg_write(ohci, OHCI1394_PhyUpperBound,
528 OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED >> 16);
530 DBGMSG("physUpperBoundOffset=%08x",
531 reg_read(ohci, OHCI1394_PhyUpperBound));
533 /* Specify AT retries */
534 reg_write(ohci, OHCI1394_ATRetries,
535 OHCI1394_MAX_AT_REQ_RETRIES |
536 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
537 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
539 /* We don't want hardware swapping */
540 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
542 /* Enable interrupts */
543 reg_write(ohci, OHCI1394_IntMaskSet,
544 OHCI1394_unrecoverableError |
545 OHCI1394_masterIntEnable |
546 OHCI1394_busReset |
547 OHCI1394_selfIDComplete |
548 OHCI1394_RSPkt |
549 OHCI1394_RQPkt |
550 OHCI1394_respTxComplete |
551 OHCI1394_reqTxComplete |
552 OHCI1394_isochRx |
553 OHCI1394_isochTx |
554 OHCI1394_postedWriteErr |
555 OHCI1394_cycleTooLong |
556 OHCI1394_cycleInconsistent);
558 /* Enable link */
559 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
561 buf = reg_read(ohci, OHCI1394_Version);
562 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%d] "
563 "MMIO=[%llx-%llx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
564 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
565 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), ohci->dev->irq,
566 (unsigned long long)pci_resource_start(ohci->dev, 0),
567 (unsigned long long)pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
568 ohci->max_packet_size,
569 ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
571 /* Check all of our ports to make sure that if anything is
572 * connected, we enable that port. */
573 num_ports = get_phy_reg(ohci, 2) & 0xf;
574 for (i = 0; i < num_ports; i++) {
575 unsigned int status;
577 set_phy_reg(ohci, 7, i);
578 status = get_phy_reg(ohci, 8);
580 if (status & 0x20)
581 set_phy_reg(ohci, 8, status & ~1);
584 /* Serial EEPROM Sanity check. */
585 if ((ohci->max_packet_size < 512) ||
586 (ohci->max_packet_size > 4096)) {
587 /* Serial EEPROM contents are suspect, set a sane max packet
588 * size and print the raw contents for bug reports if verbose
589 * debug is enabled. */
590 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
591 int i;
592 #endif
594 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
595 "attempting to set max_packet_size to 512 bytes");
596 reg_write(ohci, OHCI1394_BusOptions,
597 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
598 ohci->max_packet_size = 512;
599 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
600 PRINT(KERN_DEBUG, " EEPROM Present: %d",
601 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
602 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
604 for (i = 0;
605 ((i < 1000) &&
606 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
607 udelay(10);
609 for (i = 0; i < 0x20; i++) {
610 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
611 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
612 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
614 #endif
618 static void insert_packet(struct ti_ohci *ohci,
619 struct dma_trm_ctx *d, struct hpsb_packet *packet)
621 u32 cycleTimer;
622 int idx = d->prg_ind;
624 DBGMSG("Inserting packet for node " NODE_BUS_FMT
625 ", tlabel=%d, tcode=0x%x, speed=%d",
626 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
627 packet->tcode, packet->speed_code);
629 d->prg_cpu[idx]->begin.address = 0;
630 d->prg_cpu[idx]->begin.branchAddress = 0;
632 if (d->type == DMA_CTX_ASYNC_RESP) {
634 * For response packets, we need to put a timeout value in
635 * the 16 lower bits of the status... let's try 1 sec timeout
637 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
638 d->prg_cpu[idx]->begin.status = cpu_to_le32(
639 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
640 ((cycleTimer&0x01fff000)>>12));
642 DBGMSG("cycleTimer: %08x timeStamp: %08x",
643 cycleTimer, d->prg_cpu[idx]->begin.status);
644 } else
645 d->prg_cpu[idx]->begin.status = 0;
647 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
649 if (packet->type == hpsb_raw) {
650 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
651 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
652 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
653 } else {
654 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
655 (packet->header[0] & 0xFFFF);
657 if (packet->tcode == TCODE_ISO_DATA) {
658 /* Sending an async stream packet */
659 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
660 } else {
661 /* Sending a normal async request or response */
662 d->prg_cpu[idx]->data[1] =
663 (packet->header[1] & 0xFFFF) |
664 (packet->header[0] & 0xFFFF0000);
665 d->prg_cpu[idx]->data[2] = packet->header[2];
666 d->prg_cpu[idx]->data[3] = packet->header[3];
668 header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
671 if (packet->data_size) { /* block transmit */
672 if (packet->tcode == TCODE_STREAM_DATA){
673 d->prg_cpu[idx]->begin.control =
674 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
675 DMA_CTL_IMMEDIATE | 0x8);
676 } else {
677 d->prg_cpu[idx]->begin.control =
678 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
679 DMA_CTL_IMMEDIATE | 0x10);
681 d->prg_cpu[idx]->end.control =
682 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
683 DMA_CTL_IRQ |
684 DMA_CTL_BRANCH |
685 packet->data_size);
686 d->prg_cpu[idx]->end.address = cpu_to_le32(
687 pci_map_single(ohci->dev, packet->data,
688 packet->data_size,
689 PCI_DMA_TODEVICE));
691 d->prg_cpu[idx]->end.branchAddress = 0;
692 d->prg_cpu[idx]->end.status = 0;
693 if (d->branchAddrPtr)
694 *(d->branchAddrPtr) =
695 cpu_to_le32(d->prg_bus[idx] | 0x3);
696 d->branchAddrPtr =
697 &(d->prg_cpu[idx]->end.branchAddress);
698 } else { /* quadlet transmit */
699 if (packet->type == hpsb_raw)
700 d->prg_cpu[idx]->begin.control =
701 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
702 DMA_CTL_IMMEDIATE |
703 DMA_CTL_IRQ |
704 DMA_CTL_BRANCH |
705 (packet->header_size + 4));
706 else
707 d->prg_cpu[idx]->begin.control =
708 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
709 DMA_CTL_IMMEDIATE |
710 DMA_CTL_IRQ |
711 DMA_CTL_BRANCH |
712 packet->header_size);
714 if (d->branchAddrPtr)
715 *(d->branchAddrPtr) =
716 cpu_to_le32(d->prg_bus[idx] | 0x2);
717 d->branchAddrPtr =
718 &(d->prg_cpu[idx]->begin.branchAddress);
721 } else { /* iso packet */
722 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
723 (packet->header[0] & 0xFFFF);
724 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
725 header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
727 d->prg_cpu[idx]->begin.control =
728 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
729 DMA_CTL_IMMEDIATE | 0x8);
730 d->prg_cpu[idx]->end.control =
731 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
732 DMA_CTL_UPDATE |
733 DMA_CTL_IRQ |
734 DMA_CTL_BRANCH |
735 packet->data_size);
736 d->prg_cpu[idx]->end.address = cpu_to_le32(
737 pci_map_single(ohci->dev, packet->data,
738 packet->data_size, PCI_DMA_TODEVICE));
740 d->prg_cpu[idx]->end.branchAddress = 0;
741 d->prg_cpu[idx]->end.status = 0;
742 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
743 " begin=%08x %08x %08x %08x\n"
744 " %08x %08x %08x %08x\n"
745 " end =%08x %08x %08x %08x",
746 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
747 d->prg_cpu[idx]->begin.control,
748 d->prg_cpu[idx]->begin.address,
749 d->prg_cpu[idx]->begin.branchAddress,
750 d->prg_cpu[idx]->begin.status,
751 d->prg_cpu[idx]->data[0],
752 d->prg_cpu[idx]->data[1],
753 d->prg_cpu[idx]->data[2],
754 d->prg_cpu[idx]->data[3],
755 d->prg_cpu[idx]->end.control,
756 d->prg_cpu[idx]->end.address,
757 d->prg_cpu[idx]->end.branchAddress,
758 d->prg_cpu[idx]->end.status);
759 if (d->branchAddrPtr)
760 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
761 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
763 d->free_prgs--;
765 /* queue the packet in the appropriate context queue */
766 list_add_tail(&packet->driver_list, &d->fifo_list);
767 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
771 * This function fills the FIFO with the (eventual) pending packets
772 * and runs or wakes up the DMA prg if necessary.
774 * The function MUST be called with the d->lock held.
776 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
778 struct hpsb_packet *packet, *ptmp;
779 int idx = d->prg_ind;
780 int z = 0;
782 /* insert the packets into the dma fifo */
783 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
784 if (!d->free_prgs)
785 break;
787 /* For the first packet only */
788 if (!z)
789 z = (packet->data_size) ? 3 : 2;
791 /* Insert the packet */
792 list_del_init(&packet->driver_list);
793 insert_packet(ohci, d, packet);
796 /* Nothing must have been done, either no free_prgs or no packets */
797 if (z == 0)
798 return;
800 /* Is the context running ? (should be unless it is
801 the first packet to be sent in this context) */
802 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
803 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
805 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
806 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
808 /* Check that the node id is valid, and not 63 */
809 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
810 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
811 else
812 reg_write(ohci, d->ctrlSet, 0x8000);
813 } else {
814 /* Wake up the dma context if necessary */
815 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
816 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
818 /* do this always, to avoid race condition */
819 reg_write(ohci, d->ctrlSet, 0x1000);
822 return;
825 /* Transmission of an async or iso packet */
826 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
828 struct ti_ohci *ohci = host->hostdata;
829 struct dma_trm_ctx *d;
830 unsigned long flags;
832 if (packet->data_size > ohci->max_packet_size) {
833 PRINT(KERN_ERR,
834 "Transmit packet size %Zd is too big",
835 packet->data_size);
836 return -EOVERFLOW;
839 if (packet->type == hpsb_raw)
840 d = &ohci->at_req_context;
841 else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
842 d = &ohci->at_resp_context;
843 else
844 d = &ohci->at_req_context;
846 spin_lock_irqsave(&d->lock,flags);
848 list_add_tail(&packet->driver_list, &d->pending_list);
850 dma_trm_flush(ohci, d);
852 spin_unlock_irqrestore(&d->lock,flags);
854 return 0;
857 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
859 struct ti_ohci *ohci = host->hostdata;
860 int retval = 0, phy_reg;
862 switch (cmd) {
863 case RESET_BUS:
864 switch (arg) {
865 case SHORT_RESET:
866 phy_reg = get_phy_reg(ohci, 5);
867 phy_reg |= 0x40;
868 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
869 break;
870 case LONG_RESET:
871 phy_reg = get_phy_reg(ohci, 1);
872 phy_reg |= 0x40;
873 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
874 break;
875 case SHORT_RESET_NO_FORCE_ROOT:
876 phy_reg = get_phy_reg(ohci, 1);
877 if (phy_reg & 0x80) {
878 phy_reg &= ~0x80;
879 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
882 phy_reg = get_phy_reg(ohci, 5);
883 phy_reg |= 0x40;
884 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
885 break;
886 case LONG_RESET_NO_FORCE_ROOT:
887 phy_reg = get_phy_reg(ohci, 1);
888 phy_reg &= ~0x80;
889 phy_reg |= 0x40;
890 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
891 break;
892 case SHORT_RESET_FORCE_ROOT:
893 phy_reg = get_phy_reg(ohci, 1);
894 if (!(phy_reg & 0x80)) {
895 phy_reg |= 0x80;
896 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
899 phy_reg = get_phy_reg(ohci, 5);
900 phy_reg |= 0x40;
901 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
902 break;
903 case LONG_RESET_FORCE_ROOT:
904 phy_reg = get_phy_reg(ohci, 1);
905 phy_reg |= 0xc0;
906 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
907 break;
908 default:
909 retval = -1;
911 break;
913 case GET_CYCLE_COUNTER:
914 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
915 break;
917 case SET_CYCLE_COUNTER:
918 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
919 break;
921 case SET_BUS_ID:
922 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
923 break;
925 case ACT_CYCLE_MASTER:
926 if (arg) {
927 /* check if we are root and other nodes are present */
928 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
929 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
931 * enable cycleTimer, cycleMaster
933 DBGMSG("Cycle master enabled");
934 reg_write(ohci, OHCI1394_LinkControlSet,
935 OHCI1394_LinkControl_CycleTimerEnable |
936 OHCI1394_LinkControl_CycleMaster);
938 } else {
939 /* disable cycleTimer, cycleMaster, cycleSource */
940 reg_write(ohci, OHCI1394_LinkControlClear,
941 OHCI1394_LinkControl_CycleTimerEnable |
942 OHCI1394_LinkControl_CycleMaster |
943 OHCI1394_LinkControl_CycleSource);
945 break;
947 case CANCEL_REQUESTS:
948 DBGMSG("Cancel request received");
949 dma_trm_reset(&ohci->at_req_context);
950 dma_trm_reset(&ohci->at_resp_context);
951 break;
953 default:
954 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
955 cmd);
956 break;
958 return retval;
961 /***********************************
962 * rawiso ISO reception *
963 ***********************************/
966 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
967 buffer is split into "blocks" (regions described by one DMA
968 descriptor). Each block must be one page or less in size, and
969 must not cross a page boundary.
971 There is one little wrinkle with buffer-fill mode: a packet that
972 starts in the final block may wrap around into the first block. But
973 the user API expects all packets to be contiguous. Our solution is
974 to keep the very last page of the DMA buffer in reserve - if a
975 packet spans the gap, we copy its tail into this page.
978 struct ohci_iso_recv {
979 struct ti_ohci *ohci;
981 struct ohci1394_iso_tasklet task;
982 int task_active;
984 enum { BUFFER_FILL_MODE = 0,
985 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
987 /* memory and PCI mapping for the DMA descriptors */
988 struct dma_prog_region prog;
989 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
991 /* how many DMA blocks fit in the buffer */
992 unsigned int nblocks;
994 /* stride of DMA blocks */
995 unsigned int buf_stride;
997 /* number of blocks to batch between interrupts */
998 int block_irq_interval;
1000 /* block that DMA will finish next */
1001 int block_dma;
1003 /* (buffer-fill only) block that the reader will release next */
1004 int block_reader;
1006 /* (buffer-fill only) bytes of buffer the reader has released,
1007 less than one block */
1008 int released_bytes;
1010 /* (buffer-fill only) buffer offset at which the next packet will appear */
1011 int dma_offset;
1013 /* OHCI DMA context control registers */
1014 u32 ContextControlSet;
1015 u32 ContextControlClear;
1016 u32 CommandPtr;
1017 u32 ContextMatch;
1020 static void ohci_iso_recv_task(unsigned long data);
1021 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1022 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1023 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1024 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1026 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1028 struct ti_ohci *ohci = iso->host->hostdata;
1029 struct ohci_iso_recv *recv;
1030 int ctx;
1031 int ret = -ENOMEM;
1033 recv = kmalloc(sizeof(*recv), GFP_KERNEL);
1034 if (!recv)
1035 return -ENOMEM;
1037 iso->hostdata = recv;
1038 recv->ohci = ohci;
1039 recv->task_active = 0;
1040 dma_prog_region_init(&recv->prog);
1041 recv->block = NULL;
1043 /* use buffer-fill mode, unless irq_interval is 1
1044 (note: multichannel requires buffer-fill) */
1046 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1047 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1048 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1049 } else {
1050 recv->dma_mode = BUFFER_FILL_MODE;
1053 /* set nblocks, buf_stride, block_irq_interval */
1055 if (recv->dma_mode == BUFFER_FILL_MODE) {
1056 recv->buf_stride = PAGE_SIZE;
1058 /* one block per page of data in the DMA buffer, minus the final guard page */
1059 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1060 if (recv->nblocks < 3) {
1061 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1062 goto err;
1065 /* iso->irq_interval is in packets - translate that to blocks */
1066 if (iso->irq_interval == 1)
1067 recv->block_irq_interval = 1;
1068 else
1069 recv->block_irq_interval = iso->irq_interval *
1070 ((recv->nblocks+1)/iso->buf_packets);
1071 if (recv->block_irq_interval*4 > recv->nblocks)
1072 recv->block_irq_interval = recv->nblocks/4;
1073 if (recv->block_irq_interval < 1)
1074 recv->block_irq_interval = 1;
1076 } else {
1077 int max_packet_size;
1079 recv->nblocks = iso->buf_packets;
1080 recv->block_irq_interval = iso->irq_interval;
1081 if (recv->block_irq_interval * 4 > iso->buf_packets)
1082 recv->block_irq_interval = iso->buf_packets / 4;
1083 if (recv->block_irq_interval < 1)
1084 recv->block_irq_interval = 1;
1086 /* choose a buffer stride */
1087 /* must be a power of 2, and <= PAGE_SIZE */
1089 max_packet_size = iso->buf_size / iso->buf_packets;
1091 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1092 recv->buf_stride *= 2);
1094 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1095 recv->buf_stride > PAGE_SIZE) {
1096 /* this shouldn't happen, but anyway... */
1097 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1098 goto err;
1102 recv->block_reader = 0;
1103 recv->released_bytes = 0;
1104 recv->block_dma = 0;
1105 recv->dma_offset = 0;
1107 /* size of DMA program = one descriptor per block */
1108 if (dma_prog_region_alloc(&recv->prog,
1109 sizeof(struct dma_cmd) * recv->nblocks,
1110 recv->ohci->dev))
1111 goto err;
1113 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1115 ohci1394_init_iso_tasklet(&recv->task,
1116 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1117 OHCI_ISO_RECEIVE,
1118 ohci_iso_recv_task, (unsigned long) iso);
1120 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1121 ret = -EBUSY;
1122 goto err;
1125 recv->task_active = 1;
1127 /* recv context registers are spaced 32 bytes apart */
1128 ctx = recv->task.context;
1129 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1130 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1131 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1132 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1134 if (iso->channel == -1) {
1135 /* clear multi-channel selection mask */
1136 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1137 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1140 /* write the DMA program */
1141 ohci_iso_recv_program(iso);
1143 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1144 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1145 recv->dma_mode == BUFFER_FILL_MODE ?
1146 "buffer-fill" : "packet-per-buffer",
1147 iso->buf_size/PAGE_SIZE, iso->buf_size,
1148 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1150 return 0;
1152 err:
1153 ohci_iso_recv_shutdown(iso);
1154 return ret;
1157 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1159 struct ohci_iso_recv *recv = iso->hostdata;
1161 /* disable interrupts */
1162 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1164 /* halt DMA */
1165 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1168 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1170 struct ohci_iso_recv *recv = iso->hostdata;
1172 if (recv->task_active) {
1173 ohci_iso_recv_stop(iso);
1174 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1175 recv->task_active = 0;
1178 dma_prog_region_free(&recv->prog);
1179 kfree(recv);
1180 iso->hostdata = NULL;
1183 /* set up a "gapped" ring buffer DMA program */
1184 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1186 struct ohci_iso_recv *recv = iso->hostdata;
1187 int blk;
1189 /* address of 'branch' field in previous DMA descriptor */
1190 u32 *prev_branch = NULL;
1192 for (blk = 0; blk < recv->nblocks; blk++) {
1193 u32 control;
1195 /* the DMA descriptor */
1196 struct dma_cmd *cmd = &recv->block[blk];
1198 /* offset of the DMA descriptor relative to the DMA prog buffer */
1199 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1201 /* offset of this packet's data within the DMA buffer */
1202 unsigned long buf_offset = blk * recv->buf_stride;
1204 if (recv->dma_mode == BUFFER_FILL_MODE) {
1205 control = 2 << 28; /* INPUT_MORE */
1206 } else {
1207 control = 3 << 28; /* INPUT_LAST */
1210 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1212 /* interrupt on last block, and at intervals */
1213 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1214 control |= 3 << 20; /* want interrupt */
1217 control |= 3 << 18; /* enable branch to address */
1218 control |= recv->buf_stride;
1220 cmd->control = cpu_to_le32(control);
1221 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1222 cmd->branchAddress = 0; /* filled in on next loop */
1223 cmd->status = cpu_to_le32(recv->buf_stride);
1225 /* link the previous descriptor to this one */
1226 if (prev_branch) {
1227 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1230 prev_branch = &cmd->branchAddress;
1233 /* the final descriptor's branch address and Z should be left at 0 */
1236 /* listen or unlisten to a specific channel (multi-channel mode only) */
1237 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1239 struct ohci_iso_recv *recv = iso->hostdata;
1240 int reg, i;
1242 if (channel < 32) {
1243 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1244 i = channel;
1245 } else {
1246 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1247 i = channel - 32;
1250 reg_write(recv->ohci, reg, (1 << i));
1252 /* issue a dummy read to force all PCI writes to be posted immediately */
1253 mb();
1254 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1257 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1259 struct ohci_iso_recv *recv = iso->hostdata;
1260 int i;
1262 for (i = 0; i < 64; i++) {
1263 if (mask & (1ULL << i)) {
1264 if (i < 32)
1265 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1266 else
1267 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1268 } else {
1269 if (i < 32)
1270 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1271 else
1272 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1276 /* issue a dummy read to force all PCI writes to be posted immediately */
1277 mb();
1278 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1281 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1283 struct ohci_iso_recv *recv = iso->hostdata;
1284 struct ti_ohci *ohci = recv->ohci;
1285 u32 command, contextMatch;
1287 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1288 wmb();
1290 /* always keep ISO headers */
1291 command = (1 << 30);
1293 if (recv->dma_mode == BUFFER_FILL_MODE)
1294 command |= (1 << 31);
1296 reg_write(recv->ohci, recv->ContextControlSet, command);
1298 /* match on specified tags */
1299 contextMatch = tag_mask << 28;
1301 if (iso->channel == -1) {
1302 /* enable multichannel reception */
1303 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1304 } else {
1305 /* listen on channel */
1306 contextMatch |= iso->channel;
1309 if (cycle != -1) {
1310 u32 seconds;
1312 /* enable cycleMatch */
1313 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1315 /* set starting cycle */
1316 cycle &= 0x1FFF;
1318 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1319 just snarf them from the current time */
1320 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1322 /* advance one second to give some extra time for DMA to start */
1323 seconds += 1;
1325 cycle |= (seconds & 3) << 13;
1327 contextMatch |= cycle << 12;
1330 if (sync != -1) {
1331 /* set sync flag on first DMA descriptor */
1332 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1333 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1335 /* match sync field */
1336 contextMatch |= (sync&0xf)<<8;
1339 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1341 /* address of first descriptor block */
1342 command = dma_prog_region_offset_to_bus(&recv->prog,
1343 recv->block_dma * sizeof(struct dma_cmd));
1344 command |= 1; /* Z=1 */
1346 reg_write(recv->ohci, recv->CommandPtr, command);
1348 /* enable interrupts */
1349 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1351 wmb();
1353 /* run */
1354 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1356 /* issue a dummy read of the cycle timer register to force
1357 all PCI writes to be posted immediately */
1358 mb();
1359 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1361 /* check RUN */
1362 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1363 PRINT(KERN_ERR,
1364 "Error starting IR DMA (ContextControl 0x%08x)\n",
1365 reg_read(recv->ohci, recv->ContextControlSet));
1366 return -1;
1369 return 0;
1372 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1374 /* re-use the DMA descriptor for the block */
1375 /* by linking the previous descriptor to it */
1377 int next_i = block;
1378 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1380 struct dma_cmd *next = &recv->block[next_i];
1381 struct dma_cmd *prev = &recv->block[prev_i];
1383 /* ignore out-of-range requests */
1384 if ((block < 0) || (block > recv->nblocks))
1385 return;
1387 /* 'next' becomes the new end of the DMA chain,
1388 so disable branch and enable interrupt */
1389 next->branchAddress = 0;
1390 next->control |= cpu_to_le32(3 << 20);
1391 next->status = cpu_to_le32(recv->buf_stride);
1393 /* link prev to next */
1394 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1395 sizeof(struct dma_cmd) * next_i)
1396 | 1); /* Z=1 */
1398 /* disable interrupt on previous DMA descriptor, except at intervals */
1399 if ((prev_i % recv->block_irq_interval) == 0) {
1400 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1401 } else {
1402 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1404 wmb();
1406 /* wake up DMA in case it fell asleep */
1407 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1410 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1411 struct hpsb_iso_packet_info *info)
1413 /* release the memory where the packet was */
1414 recv->released_bytes += info->total_len;
1416 /* have we released enough memory for one block? */
1417 while (recv->released_bytes > recv->buf_stride) {
1418 ohci_iso_recv_release_block(recv, recv->block_reader);
1419 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1420 recv->released_bytes -= recv->buf_stride;
1424 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1426 struct ohci_iso_recv *recv = iso->hostdata;
1427 if (recv->dma_mode == BUFFER_FILL_MODE) {
1428 ohci_iso_recv_bufferfill_release(recv, info);
1429 } else {
1430 ohci_iso_recv_release_block(recv, info - iso->infos);
1434 /* parse all packets from blocks that have been fully received */
1435 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1437 int wake = 0;
1438 int runaway = 0;
1439 struct ti_ohci *ohci = recv->ohci;
1441 while (1) {
1442 /* we expect the next parsable packet to begin at recv->dma_offset */
1443 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1445 unsigned int offset;
1446 unsigned short len, cycle, total_len;
1447 unsigned char channel, tag, sy;
1449 unsigned char *p = iso->data_buf.kvirt;
1451 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1453 /* don't loop indefinitely */
1454 if (runaway++ > 100000) {
1455 atomic_inc(&iso->overflows);
1456 PRINT(KERN_ERR,
1457 "IR DMA error - Runaway during buffer parsing!\n");
1458 break;
1461 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1462 if (this_block == recv->block_dma)
1463 break;
1465 wake = 1;
1467 /* parse data length, tag, channel, and sy */
1469 /* note: we keep our own local copies of 'len' and 'offset'
1470 so the user can't mess with them by poking in the mmap area */
1472 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1474 if (len > 4096) {
1475 PRINT(KERN_ERR,
1476 "IR DMA error - bogus 'len' value %u\n", len);
1479 channel = p[recv->dma_offset+1] & 0x3F;
1480 tag = p[recv->dma_offset+1] >> 6;
1481 sy = p[recv->dma_offset+0] & 0xF;
1483 /* advance to data payload */
1484 recv->dma_offset += 4;
1486 /* check for wrap-around */
1487 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1488 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1491 /* dma_offset now points to the first byte of the data payload */
1492 offset = recv->dma_offset;
1494 /* advance to xferStatus/timeStamp */
1495 recv->dma_offset += len;
1497 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1498 /* payload is padded to 4 bytes */
1499 if (len % 4) {
1500 recv->dma_offset += 4 - (len%4);
1501 total_len += 4 - (len%4);
1504 /* check for wrap-around */
1505 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1506 /* uh oh, the packet data wraps from the last
1507 to the first DMA block - make the packet
1508 contiguous by copying its "tail" into the
1509 guard page */
1511 int guard_off = recv->buf_stride*recv->nblocks;
1512 int tail_len = len - (guard_off - offset);
1514 if (tail_len > 0 && tail_len < recv->buf_stride) {
1515 memcpy(iso->data_buf.kvirt + guard_off,
1516 iso->data_buf.kvirt,
1517 tail_len);
1520 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1523 /* parse timestamp */
1524 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1525 cycle &= 0x1FFF;
1527 /* advance to next packet */
1528 recv->dma_offset += 4;
1530 /* check for wrap-around */
1531 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1532 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1535 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1538 if (wake)
1539 hpsb_iso_wake(iso);
1542 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1544 int loop;
1545 struct ti_ohci *ohci = recv->ohci;
1547 /* loop over all blocks */
1548 for (loop = 0; loop < recv->nblocks; loop++) {
1550 /* check block_dma to see if it's done */
1551 struct dma_cmd *im = &recv->block[recv->block_dma];
1553 /* check the DMA descriptor for new writes to xferStatus */
1554 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1556 /* rescount is the number of bytes *remaining to be written* in the block */
1557 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1559 unsigned char event = xferstatus & 0x1F;
1561 if (!event) {
1562 /* nothing has happened to this block yet */
1563 break;
1566 if (event != 0x11) {
1567 atomic_inc(&iso->overflows);
1568 PRINT(KERN_ERR,
1569 "IR DMA error - OHCI error code 0x%02x\n", event);
1572 if (rescount != 0) {
1573 /* the card is still writing to this block;
1574 we can't touch it until it's done */
1575 break;
1578 /* OK, the block is finished... */
1580 /* sync our view of the block */
1581 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1583 /* reset the DMA descriptor */
1584 im->status = recv->buf_stride;
1586 /* advance block_dma */
1587 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1589 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1590 atomic_inc(&iso->overflows);
1591 DBGMSG("ISO reception overflow - "
1592 "ran out of DMA blocks");
1596 /* parse any packets that have arrived */
1597 ohci_iso_recv_bufferfill_parse(iso, recv);
1600 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1602 int count;
1603 int wake = 0;
1604 struct ti_ohci *ohci = recv->ohci;
1606 /* loop over the entire buffer */
1607 for (count = 0; count < recv->nblocks; count++) {
1608 u32 packet_len = 0;
1610 /* pointer to the DMA descriptor */
1611 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1613 /* check the DMA descriptor for new writes to xferStatus */
1614 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1615 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1617 unsigned char event = xferstatus & 0x1F;
1619 if (!event) {
1620 /* this packet hasn't come in yet; we are done for now */
1621 goto out;
1624 if (event == 0x11) {
1625 /* packet received successfully! */
1627 /* rescount is the number of bytes *remaining* in the packet buffer,
1628 after the packet was written */
1629 packet_len = recv->buf_stride - rescount;
1631 } else if (event == 0x02) {
1632 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1633 } else if (event) {
1634 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1637 /* sync our view of the buffer */
1638 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1640 /* record the per-packet info */
1642 /* iso header is 8 bytes ahead of the data payload */
1643 unsigned char *hdr;
1645 unsigned int offset;
1646 unsigned short cycle;
1647 unsigned char channel, tag, sy;
1649 offset = iso->pkt_dma * recv->buf_stride;
1650 hdr = iso->data_buf.kvirt + offset;
1652 /* skip iso header */
1653 offset += 8;
1654 packet_len -= 8;
1656 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1657 channel = hdr[5] & 0x3F;
1658 tag = hdr[5] >> 6;
1659 sy = hdr[4] & 0xF;
1661 hpsb_iso_packet_received(iso, offset, packet_len,
1662 recv->buf_stride, cycle, channel, tag, sy);
1665 /* reset the DMA descriptor */
1666 il->status = recv->buf_stride;
1668 wake = 1;
1669 recv->block_dma = iso->pkt_dma;
1672 out:
1673 if (wake)
1674 hpsb_iso_wake(iso);
1677 static void ohci_iso_recv_task(unsigned long data)
1679 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1680 struct ohci_iso_recv *recv = iso->hostdata;
1682 if (recv->dma_mode == BUFFER_FILL_MODE)
1683 ohci_iso_recv_bufferfill_task(iso, recv);
1684 else
1685 ohci_iso_recv_packetperbuf_task(iso, recv);
1688 /***********************************
1689 * rawiso ISO transmission *
1690 ***********************************/
1692 struct ohci_iso_xmit {
1693 struct ti_ohci *ohci;
1694 struct dma_prog_region prog;
1695 struct ohci1394_iso_tasklet task;
1696 int task_active;
1697 int last_cycle;
1698 atomic_t skips;
1700 u32 ContextControlSet;
1701 u32 ContextControlClear;
1702 u32 CommandPtr;
1705 /* transmission DMA program:
1706 one OUTPUT_MORE_IMMEDIATE for the IT header
1707 one OUTPUT_LAST for the buffer data */
1709 struct iso_xmit_cmd {
1710 struct dma_cmd output_more_immediate;
1711 u8 iso_hdr[8];
1712 u32 unused[2];
1713 struct dma_cmd output_last;
1716 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1717 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1718 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1719 static void ohci_iso_xmit_task(unsigned long data);
1721 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1723 struct ohci_iso_xmit *xmit;
1724 unsigned int prog_size;
1725 int ctx;
1726 int ret = -ENOMEM;
1728 xmit = kmalloc(sizeof(*xmit), GFP_KERNEL);
1729 if (!xmit)
1730 return -ENOMEM;
1732 iso->hostdata = xmit;
1733 xmit->ohci = iso->host->hostdata;
1734 xmit->task_active = 0;
1735 xmit->last_cycle = -1;
1736 atomic_set(&iso->skips, 0);
1738 dma_prog_region_init(&xmit->prog);
1740 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1742 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1743 goto err;
1745 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1746 ohci_iso_xmit_task, (unsigned long) iso);
1748 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1749 ret = -EBUSY;
1750 goto err;
1753 xmit->task_active = 1;
1755 /* xmit context registers are spaced 16 bytes apart */
1756 ctx = xmit->task.context;
1757 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1758 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1759 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1761 return 0;
1763 err:
1764 ohci_iso_xmit_shutdown(iso);
1765 return ret;
1768 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1770 struct ohci_iso_xmit *xmit = iso->hostdata;
1771 struct ti_ohci *ohci = xmit->ohci;
1773 /* disable interrupts */
1774 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1776 /* halt DMA */
1777 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1778 PRINT(KERN_ERR,
1779 "you probably exceeded the OHCI card's bandwidth limit - "
1780 "reload the module and reduce xmit bandwidth");
1784 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1786 struct ohci_iso_xmit *xmit = iso->hostdata;
1788 if (xmit->task_active) {
1789 ohci_iso_xmit_stop(iso);
1790 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1791 xmit->task_active = 0;
1794 dma_prog_region_free(&xmit->prog);
1795 kfree(xmit);
1796 iso->hostdata = NULL;
1799 static void ohci_iso_xmit_task(unsigned long data)
1801 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1802 struct ohci_iso_xmit *xmit = iso->hostdata;
1803 struct ti_ohci *ohci = xmit->ohci;
1804 int wake = 0;
1805 int count;
1807 /* check the whole buffer if necessary, starting at pkt_dma */
1808 for (count = 0; count < iso->buf_packets; count++) {
1809 int cycle;
1811 /* DMA descriptor */
1812 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
1814 /* check for new writes to xferStatus */
1815 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
1816 u8 event = xferstatus & 0x1F;
1818 if (!event) {
1819 /* packet hasn't been sent yet; we are done for now */
1820 break;
1823 if (event != 0x11)
1824 PRINT(KERN_ERR,
1825 "IT DMA error - OHCI error code 0x%02x\n", event);
1827 /* at least one packet went out, so wake up the writer */
1828 wake = 1;
1830 /* parse cycle */
1831 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
1833 if (xmit->last_cycle > -1) {
1834 int cycle_diff = cycle - xmit->last_cycle;
1835 int skip;
1837 /* unwrap */
1838 if (cycle_diff < 0) {
1839 cycle_diff += 8000;
1840 if (cycle_diff < 0)
1841 PRINT(KERN_ERR, "bogus cycle diff %d\n",
1842 cycle_diff);
1845 skip = cycle_diff - 1;
1846 if (skip > 0) {
1847 DBGMSG("skipped %d cycles without packet loss", skip);
1848 atomic_add(skip, &iso->skips);
1851 xmit->last_cycle = cycle;
1853 /* tell the subsystem the packet has gone out */
1854 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
1856 /* reset the DMA descriptor for next time */
1857 cmd->output_last.status = 0;
1860 if (wake)
1861 hpsb_iso_wake(iso);
1864 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1866 struct ohci_iso_xmit *xmit = iso->hostdata;
1867 struct ti_ohci *ohci = xmit->ohci;
1869 int next_i, prev_i;
1870 struct iso_xmit_cmd *next, *prev;
1872 unsigned int offset;
1873 unsigned short len;
1874 unsigned char tag, sy;
1876 /* check that the packet doesn't cross a page boundary
1877 (we could allow this if we added OUTPUT_MORE descriptor support) */
1878 if (cross_bound(info->offset, info->len)) {
1879 PRINT(KERN_ERR,
1880 "rawiso xmit: packet %u crosses a page boundary",
1881 iso->first_packet);
1882 return -EINVAL;
1885 offset = info->offset;
1886 len = info->len;
1887 tag = info->tag;
1888 sy = info->sy;
1890 /* sync up the card's view of the buffer */
1891 dma_region_sync_for_device(&iso->data_buf, offset, len);
1893 /* append first_packet to the DMA chain */
1894 /* by linking the previous descriptor to it */
1895 /* (next will become the new end of the DMA chain) */
1897 next_i = iso->first_packet;
1898 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
1900 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
1901 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
1903 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
1904 memset(next, 0, sizeof(struct iso_xmit_cmd));
1905 next->output_more_immediate.control = cpu_to_le32(0x02000008);
1907 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
1909 /* tcode = 0xA, and sy */
1910 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
1912 /* tag and channel number */
1913 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
1915 /* transmission speed */
1916 next->iso_hdr[2] = iso->speed & 0x7;
1918 /* payload size */
1919 next->iso_hdr[6] = len & 0xFF;
1920 next->iso_hdr[7] = len >> 8;
1922 /* set up the OUTPUT_LAST */
1923 next->output_last.control = cpu_to_le32(1 << 28);
1924 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
1925 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
1926 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
1927 next->output_last.control |= cpu_to_le32(len);
1929 /* payload bus address */
1930 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
1932 /* leave branchAddress at zero for now */
1934 /* re-write the previous DMA descriptor to chain to this one */
1936 /* set prev branch address to point to next (Z=3) */
1937 prev->output_last.branchAddress = cpu_to_le32(
1938 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
1941 * Link the skip address to this descriptor itself. This causes a
1942 * context to skip a cycle whenever lost cycles or FIFO overruns occur,
1943 * without dropping the data at that point the application should then
1944 * decide whether this is an error condition or not. Some protocols
1945 * can deal with this by dropping some rate-matching padding packets.
1947 next->output_more_immediate.branchAddress =
1948 prev->output_last.branchAddress;
1950 /* disable interrupt, unless required by the IRQ interval */
1951 if (prev_i % iso->irq_interval) {
1952 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
1953 } else {
1954 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
1957 wmb();
1959 /* wake DMA in case it is sleeping */
1960 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
1962 /* issue a dummy read of the cycle timer to force all PCI
1963 writes to be posted immediately */
1964 mb();
1965 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
1967 return 0;
1970 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
1972 struct ohci_iso_xmit *xmit = iso->hostdata;
1973 struct ti_ohci *ohci = xmit->ohci;
1975 /* clear out the control register */
1976 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
1977 wmb();
1979 /* address and length of first descriptor block (Z=3) */
1980 reg_write(xmit->ohci, xmit->CommandPtr,
1981 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
1983 /* cycle match */
1984 if (cycle != -1) {
1985 u32 start = cycle & 0x1FFF;
1987 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1988 just snarf them from the current time */
1989 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1991 /* advance one second to give some extra time for DMA to start */
1992 seconds += 1;
1994 start |= (seconds & 3) << 13;
1996 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
1999 /* enable interrupts */
2000 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2002 /* run */
2003 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2004 mb();
2006 /* wait 100 usec to give the card time to go active */
2007 udelay(100);
2009 /* check the RUN bit */
2010 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2011 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2012 reg_read(xmit->ohci, xmit->ContextControlSet));
2013 return -1;
2016 return 0;
2019 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2022 switch(cmd) {
2023 case XMIT_INIT:
2024 return ohci_iso_xmit_init(iso);
2025 case XMIT_START:
2026 return ohci_iso_xmit_start(iso, arg);
2027 case XMIT_STOP:
2028 ohci_iso_xmit_stop(iso);
2029 return 0;
2030 case XMIT_QUEUE:
2031 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2032 case XMIT_SHUTDOWN:
2033 ohci_iso_xmit_shutdown(iso);
2034 return 0;
2036 case RECV_INIT:
2037 return ohci_iso_recv_init(iso);
2038 case RECV_START: {
2039 int *args = (int*) arg;
2040 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2042 case RECV_STOP:
2043 ohci_iso_recv_stop(iso);
2044 return 0;
2045 case RECV_RELEASE:
2046 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2047 return 0;
2048 case RECV_FLUSH:
2049 ohci_iso_recv_task((unsigned long) iso);
2050 return 0;
2051 case RECV_SHUTDOWN:
2052 ohci_iso_recv_shutdown(iso);
2053 return 0;
2054 case RECV_LISTEN_CHANNEL:
2055 ohci_iso_recv_change_channel(iso, arg, 1);
2056 return 0;
2057 case RECV_UNLISTEN_CHANNEL:
2058 ohci_iso_recv_change_channel(iso, arg, 0);
2059 return 0;
2060 case RECV_SET_CHANNEL_MASK:
2061 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2062 return 0;
2064 default:
2065 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2066 cmd);
2067 break;
2069 return -EINVAL;
2072 /***************************************
2073 * IEEE-1394 functionality section END *
2074 ***************************************/
2077 /********************************************************
2078 * Global stuff (interrupt handler, init/shutdown code) *
2079 ********************************************************/
2081 static void dma_trm_reset(struct dma_trm_ctx *d)
2083 unsigned long flags;
2084 LIST_HEAD(packet_list);
2085 struct ti_ohci *ohci = d->ohci;
2086 struct hpsb_packet *packet, *ptmp;
2088 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2090 /* Lock the context, reset it and release it. Move the packets
2091 * that were pending in the context to packet_list and free
2092 * them after releasing the lock. */
2094 spin_lock_irqsave(&d->lock, flags);
2096 list_splice_init(&d->fifo_list, &packet_list);
2097 list_splice_init(&d->pending_list, &packet_list);
2099 d->branchAddrPtr = NULL;
2100 d->sent_ind = d->prg_ind;
2101 d->free_prgs = d->num_desc;
2103 spin_unlock_irqrestore(&d->lock, flags);
2105 if (list_empty(&packet_list))
2106 return;
2108 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2110 /* Now process subsystem callbacks for the packets from this
2111 * context. */
2112 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2113 list_del_init(&packet->driver_list);
2114 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2118 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2119 quadlet_t rx_event,
2120 quadlet_t tx_event)
2122 struct ohci1394_iso_tasklet *t;
2123 unsigned long mask;
2124 unsigned long flags;
2126 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
2128 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2129 mask = 1 << t->context;
2131 if (t->type == OHCI_ISO_TRANSMIT) {
2132 if (tx_event & mask)
2133 tasklet_schedule(&t->tasklet);
2134 } else {
2135 /* OHCI_ISO_RECEIVE or OHCI_ISO_MULTICHANNEL_RECEIVE */
2136 if (rx_event & mask)
2137 tasklet_schedule(&t->tasklet);
2141 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
2144 static irqreturn_t ohci_irq_handler(int irq, void *dev_id)
2146 quadlet_t event, node_id;
2147 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2148 struct hpsb_host *host = ohci->host;
2149 int phyid = -1, isroot = 0;
2150 unsigned long flags;
2152 /* Read and clear the interrupt event register. Don't clear
2153 * the busReset event, though. This is done when we get the
2154 * selfIDComplete interrupt. */
2155 spin_lock_irqsave(&ohci->event_lock, flags);
2156 event = reg_read(ohci, OHCI1394_IntEventClear);
2157 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2158 spin_unlock_irqrestore(&ohci->event_lock, flags);
2160 if (!event)
2161 return IRQ_NONE;
2163 /* If event is ~(u32)0 cardbus card was ejected. In this case
2164 * we just return, and clean up in the ohci1394_pci_remove
2165 * function. */
2166 if (event == ~(u32) 0) {
2167 DBGMSG("Device removed.");
2168 return IRQ_NONE;
2171 DBGMSG("IntEvent: %08x", event);
2173 if (event & OHCI1394_unrecoverableError) {
2174 int ctx;
2175 PRINT(KERN_ERR, "Unrecoverable error!");
2177 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2178 PRINT(KERN_ERR, "Async Req Tx Context died: "
2179 "ctrl[%08x] cmdptr[%08x]",
2180 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2181 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2183 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2184 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2185 "ctrl[%08x] cmdptr[%08x]",
2186 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2187 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2189 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2190 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2191 "ctrl[%08x] cmdptr[%08x]",
2192 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2193 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2195 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2196 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2197 "ctrl[%08x] cmdptr[%08x]",
2198 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2199 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2201 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2202 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2203 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2204 "ctrl[%08x] cmdptr[%08x]", ctx,
2205 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2206 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2209 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2210 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2211 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2212 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2213 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2214 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2215 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2218 event &= ~OHCI1394_unrecoverableError;
2220 if (event & OHCI1394_postedWriteErr) {
2221 PRINT(KERN_ERR, "physical posted write error");
2222 /* no recovery strategy yet, had to involve protocol drivers */
2223 event &= ~OHCI1394_postedWriteErr;
2225 if (event & OHCI1394_cycleTooLong) {
2226 if(printk_ratelimit())
2227 PRINT(KERN_WARNING, "isochronous cycle too long");
2228 else
2229 DBGMSG("OHCI1394_cycleTooLong");
2230 reg_write(ohci, OHCI1394_LinkControlSet,
2231 OHCI1394_LinkControl_CycleMaster);
2232 event &= ~OHCI1394_cycleTooLong;
2234 if (event & OHCI1394_cycleInconsistent) {
2235 /* We subscribe to the cycleInconsistent event only to
2236 * clear the corresponding event bit... otherwise,
2237 * isochronous cycleMatch DMA won't work. */
2238 DBGMSG("OHCI1394_cycleInconsistent");
2239 event &= ~OHCI1394_cycleInconsistent;
2241 if (event & OHCI1394_busReset) {
2242 /* The busReset event bit can't be cleared during the
2243 * selfID phase, so we disable busReset interrupts, to
2244 * avoid burying the cpu in interrupt requests. */
2245 spin_lock_irqsave(&ohci->event_lock, flags);
2246 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2248 if (ohci->check_busreset) {
2249 int loop_count = 0;
2251 udelay(10);
2253 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2254 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2256 spin_unlock_irqrestore(&ohci->event_lock, flags);
2257 udelay(10);
2258 spin_lock_irqsave(&ohci->event_lock, flags);
2260 /* The loop counter check is to prevent the driver
2261 * from remaining in this state forever. For the
2262 * initial bus reset, the loop continues for ever
2263 * and the system hangs, until some device is plugged-in
2264 * or out manually into a port! The forced reset seems
2265 * to solve this problem. This mainly effects nForce2. */
2266 if (loop_count > 10000) {
2267 ohci_devctl(host, RESET_BUS, LONG_RESET);
2268 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2269 loop_count = 0;
2272 loop_count++;
2275 spin_unlock_irqrestore(&ohci->event_lock, flags);
2276 if (!host->in_bus_reset) {
2277 DBGMSG("irq_handler: Bus reset requested");
2279 /* Subsystem call */
2280 hpsb_bus_reset(ohci->host);
2282 event &= ~OHCI1394_busReset;
2284 if (event & OHCI1394_reqTxComplete) {
2285 struct dma_trm_ctx *d = &ohci->at_req_context;
2286 DBGMSG("Got reqTxComplete interrupt "
2287 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2288 if (reg_read(ohci, d->ctrlSet) & 0x800)
2289 ohci1394_stop_context(ohci, d->ctrlClear,
2290 "reqTxComplete");
2291 else
2292 dma_trm_tasklet((unsigned long)d);
2293 //tasklet_schedule(&d->task);
2294 event &= ~OHCI1394_reqTxComplete;
2296 if (event & OHCI1394_respTxComplete) {
2297 struct dma_trm_ctx *d = &ohci->at_resp_context;
2298 DBGMSG("Got respTxComplete interrupt "
2299 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2300 if (reg_read(ohci, d->ctrlSet) & 0x800)
2301 ohci1394_stop_context(ohci, d->ctrlClear,
2302 "respTxComplete");
2303 else
2304 tasklet_schedule(&d->task);
2305 event &= ~OHCI1394_respTxComplete;
2307 if (event & OHCI1394_RQPkt) {
2308 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2309 DBGMSG("Got RQPkt interrupt status=0x%08X",
2310 reg_read(ohci, d->ctrlSet));
2311 if (reg_read(ohci, d->ctrlSet) & 0x800)
2312 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2313 else
2314 tasklet_schedule(&d->task);
2315 event &= ~OHCI1394_RQPkt;
2317 if (event & OHCI1394_RSPkt) {
2318 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2319 DBGMSG("Got RSPkt interrupt status=0x%08X",
2320 reg_read(ohci, d->ctrlSet));
2321 if (reg_read(ohci, d->ctrlSet) & 0x800)
2322 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2323 else
2324 tasklet_schedule(&d->task);
2325 event &= ~OHCI1394_RSPkt;
2327 if (event & OHCI1394_isochRx) {
2328 quadlet_t rx_event;
2330 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2331 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2332 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2333 event &= ~OHCI1394_isochRx;
2335 if (event & OHCI1394_isochTx) {
2336 quadlet_t tx_event;
2338 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2339 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2340 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2341 event &= ~OHCI1394_isochTx;
2343 if (event & OHCI1394_selfIDComplete) {
2344 if (host->in_bus_reset) {
2345 node_id = reg_read(ohci, OHCI1394_NodeID);
2347 if (!(node_id & 0x80000000)) {
2348 PRINT(KERN_ERR,
2349 "SelfID received, but NodeID invalid "
2350 "(probably new bus reset occurred): %08X",
2351 node_id);
2352 goto selfid_not_valid;
2355 phyid = node_id & 0x0000003f;
2356 isroot = (node_id & 0x40000000) != 0;
2358 DBGMSG("SelfID interrupt received "
2359 "(phyid %d, %s)", phyid,
2360 (isroot ? "root" : "not root"));
2362 handle_selfid(ohci, host, phyid, isroot);
2364 /* Clear the bus reset event and re-enable the
2365 * busReset interrupt. */
2366 spin_lock_irqsave(&ohci->event_lock, flags);
2367 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2368 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2369 spin_unlock_irqrestore(&ohci->event_lock, flags);
2371 /* Turn on phys dma reception.
2373 * TODO: Enable some sort of filtering management.
2375 if (phys_dma) {
2376 reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2377 0xffffffff);
2378 reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2379 0xffffffff);
2382 DBGMSG("PhyReqFilter=%08x%08x",
2383 reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2384 reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
2386 hpsb_selfid_complete(host, phyid, isroot);
2387 } else
2388 PRINT(KERN_ERR,
2389 "SelfID received outside of bus reset sequence");
2391 selfid_not_valid:
2392 event &= ~OHCI1394_selfIDComplete;
2395 /* Make sure we handle everything, just in case we accidentally
2396 * enabled an interrupt that we didn't write a handler for. */
2397 if (event)
2398 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2399 event);
2401 return IRQ_HANDLED;
2404 /* Put the buffer back into the dma context */
2405 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2407 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2408 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2410 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2411 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2412 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2413 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2415 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2416 * context program descriptors before it sees the wakeup bit set. */
2417 wmb();
2419 /* wake up the dma context if necessary */
2420 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2421 PRINT(KERN_INFO,
2422 "Waking dma ctx=%d ... processing is probably too slow",
2423 d->ctx);
2426 /* do this always, to avoid race condition */
2427 reg_write(ohci, d->ctrlSet, 0x1000);
2430 #define cond_le32_to_cpu(data, noswap) \
2431 (noswap ? data : le32_to_cpu(data))
2433 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2434 -1, 0, -1, 0, -1, -1, 16, -1};
2437 * Determine the length of a packet in the buffer
2438 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2440 static inline int packet_length(struct dma_rcv_ctx *d, int idx,
2441 quadlet_t *buf_ptr, int offset,
2442 unsigned char tcode, int noswap)
2444 int length = -1;
2446 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2447 length = TCODE_SIZE[tcode];
2448 if (length == 0) {
2449 if (offset + 12 >= d->buf_size) {
2450 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2451 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2452 } else {
2453 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2455 length += 20;
2457 } else if (d->type == DMA_CTX_ISO) {
2458 /* Assumption: buffer fill mode with header/trailer */
2459 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2462 if (length > 0 && length % 4)
2463 length += 4 - (length % 4);
2465 return length;
2468 /* Tasklet that processes dma receive buffers */
2469 static void dma_rcv_tasklet (unsigned long data)
2471 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2472 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2473 unsigned int split_left, idx, offset, rescount;
2474 unsigned char tcode;
2475 int length, bytes_left, ack;
2476 unsigned long flags;
2477 quadlet_t *buf_ptr;
2478 char *split_ptr;
2479 char msg[256];
2481 spin_lock_irqsave(&d->lock, flags);
2483 idx = d->buf_ind;
2484 offset = d->buf_offset;
2485 buf_ptr = d->buf_cpu[idx] + offset/4;
2487 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2488 bytes_left = d->buf_size - rescount - offset;
2490 while (bytes_left > 0) {
2491 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2493 /* packet_length() will return < 4 for an error */
2494 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2496 if (length < 4) { /* something is wrong */
2497 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2498 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2499 d->ctx, length);
2500 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2501 spin_unlock_irqrestore(&d->lock, flags);
2502 return;
2505 /* The first case is where we have a packet that crosses
2506 * over more than one descriptor. The next case is where
2507 * it's all in the first descriptor. */
2508 if ((offset + length) > d->buf_size) {
2509 DBGMSG("Split packet rcv'd");
2510 if (length > d->split_buf_size) {
2511 ohci1394_stop_context(ohci, d->ctrlClear,
2512 "Split packet size exceeded");
2513 d->buf_ind = idx;
2514 d->buf_offset = offset;
2515 spin_unlock_irqrestore(&d->lock, flags);
2516 return;
2519 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2520 == d->buf_size) {
2521 /* Other part of packet not written yet.
2522 * this should never happen I think
2523 * anyway we'll get it on the next call. */
2524 PRINT(KERN_INFO,
2525 "Got only half a packet!");
2526 d->buf_ind = idx;
2527 d->buf_offset = offset;
2528 spin_unlock_irqrestore(&d->lock, flags);
2529 return;
2532 split_left = length;
2533 split_ptr = (char *)d->spb;
2534 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2535 split_left -= d->buf_size-offset;
2536 split_ptr += d->buf_size-offset;
2537 insert_dma_buffer(d, idx);
2538 idx = (idx+1) % d->num_desc;
2539 buf_ptr = d->buf_cpu[idx];
2540 offset=0;
2542 while (split_left >= d->buf_size) {
2543 memcpy(split_ptr,buf_ptr,d->buf_size);
2544 split_ptr += d->buf_size;
2545 split_left -= d->buf_size;
2546 insert_dma_buffer(d, idx);
2547 idx = (idx+1) % d->num_desc;
2548 buf_ptr = d->buf_cpu[idx];
2551 if (split_left > 0) {
2552 memcpy(split_ptr, buf_ptr, split_left);
2553 offset = split_left;
2554 buf_ptr += offset/4;
2556 } else {
2557 DBGMSG("Single packet rcv'd");
2558 memcpy(d->spb, buf_ptr, length);
2559 offset += length;
2560 buf_ptr += length/4;
2561 if (offset==d->buf_size) {
2562 insert_dma_buffer(d, idx);
2563 idx = (idx+1) % d->num_desc;
2564 buf_ptr = d->buf_cpu[idx];
2565 offset=0;
2569 /* We get one phy packet to the async descriptor for each
2570 * bus reset. We always ignore it. */
2571 if (tcode != OHCI1394_TCODE_PHY) {
2572 if (!ohci->no_swap_incoming)
2573 header_le32_to_cpu(d->spb, tcode);
2574 DBGMSG("Packet received from node"
2575 " %d ack=0x%02X spd=%d tcode=0x%X"
2576 " length=%d ctx=%d tlabel=%d",
2577 (d->spb[1]>>16)&0x3f,
2578 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2579 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2580 tcode, length, d->ctx,
2581 (d->spb[0]>>10)&0x3f);
2583 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2584 == 0x11) ? 1 : 0;
2586 hpsb_packet_received(ohci->host, d->spb,
2587 length-4, ack);
2589 #ifdef OHCI1394_DEBUG
2590 else
2591 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2592 d->ctx);
2593 #endif
2595 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2597 bytes_left = d->buf_size - rescount - offset;
2601 d->buf_ind = idx;
2602 d->buf_offset = offset;
2604 spin_unlock_irqrestore(&d->lock, flags);
2607 /* Bottom half that processes sent packets */
2608 static void dma_trm_tasklet (unsigned long data)
2610 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2611 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2612 struct hpsb_packet *packet, *ptmp;
2613 unsigned long flags;
2614 u32 status, ack;
2615 size_t datasize;
2617 spin_lock_irqsave(&d->lock, flags);
2619 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2620 datasize = packet->data_size;
2621 if (datasize && packet->type != hpsb_raw)
2622 status = le32_to_cpu(
2623 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2624 else
2625 status = le32_to_cpu(
2626 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2628 if (status == 0)
2629 /* this packet hasn't been sent yet*/
2630 break;
2632 #ifdef OHCI1394_DEBUG
2633 if (datasize)
2634 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2635 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2636 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2637 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2638 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2639 status&0x1f, (status>>5)&0x3,
2640 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2641 d->ctx);
2642 else
2643 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2644 "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2645 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2646 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2647 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2648 status&0x1f, (status>>5)&0x3,
2649 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2650 d->ctx);
2651 else
2652 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2653 "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2654 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2655 >>16)&0x3f,
2656 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2657 >>4)&0xf,
2658 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2659 >>10)&0x3f,
2660 status&0x1f, (status>>5)&0x3,
2661 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2662 d->ctx);
2663 #endif
2665 if (status & 0x10) {
2666 ack = status & 0xf;
2667 } else {
2668 switch (status & 0x1f) {
2669 case EVT_NO_STATUS: /* that should never happen */
2670 case EVT_RESERVED_A: /* that should never happen */
2671 case EVT_LONG_PACKET: /* that should never happen */
2672 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2673 ack = ACKX_SEND_ERROR;
2674 break;
2675 case EVT_MISSING_ACK:
2676 ack = ACKX_TIMEOUT;
2677 break;
2678 case EVT_UNDERRUN:
2679 ack = ACKX_SEND_ERROR;
2680 break;
2681 case EVT_OVERRUN: /* that should never happen */
2682 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2683 ack = ACKX_SEND_ERROR;
2684 break;
2685 case EVT_DESCRIPTOR_READ:
2686 case EVT_DATA_READ:
2687 case EVT_DATA_WRITE:
2688 ack = ACKX_SEND_ERROR;
2689 break;
2690 case EVT_BUS_RESET: /* that should never happen */
2691 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2692 ack = ACKX_SEND_ERROR;
2693 break;
2694 case EVT_TIMEOUT:
2695 ack = ACKX_TIMEOUT;
2696 break;
2697 case EVT_TCODE_ERR:
2698 ack = ACKX_SEND_ERROR;
2699 break;
2700 case EVT_RESERVED_B: /* that should never happen */
2701 case EVT_RESERVED_C: /* that should never happen */
2702 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2703 ack = ACKX_SEND_ERROR;
2704 break;
2705 case EVT_UNKNOWN:
2706 case EVT_FLUSHED:
2707 ack = ACKX_SEND_ERROR;
2708 break;
2709 default:
2710 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2711 ack = ACKX_SEND_ERROR;
2712 BUG();
2716 list_del_init(&packet->driver_list);
2717 hpsb_packet_sent(ohci->host, packet, ack);
2719 if (datasize)
2720 pci_unmap_single(ohci->dev,
2721 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2722 datasize, PCI_DMA_TODEVICE);
2724 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2725 d->free_prgs++;
2728 dma_trm_flush(ohci, d);
2730 spin_unlock_irqrestore(&d->lock, flags);
2733 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2735 int i;
2736 struct ti_ohci *ohci = d->ohci;
2738 if (ohci == NULL)
2739 return;
2741 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2743 if (d->buf_cpu) {
2744 for (i=0; i<d->num_desc; i++)
2745 if (d->buf_cpu[i] && d->buf_bus[i])
2746 pci_free_consistent(
2747 ohci->dev, d->buf_size,
2748 d->buf_cpu[i], d->buf_bus[i]);
2749 kfree(d->buf_cpu);
2750 kfree(d->buf_bus);
2752 if (d->prg_cpu) {
2753 for (i=0; i<d->num_desc; i++)
2754 if (d->prg_cpu[i] && d->prg_bus[i])
2755 pci_pool_free(d->prg_pool, d->prg_cpu[i],
2756 d->prg_bus[i]);
2757 pci_pool_destroy(d->prg_pool);
2758 kfree(d->prg_cpu);
2759 kfree(d->prg_bus);
2761 kfree(d->spb);
2763 /* Mark this context as freed. */
2764 d->ohci = NULL;
2767 static int
2768 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2769 enum context_type type, int ctx, int num_desc,
2770 int buf_size, int split_buf_size, int context_base)
2772 int i, len;
2773 static int num_allocs;
2774 static char pool_name[20];
2776 d->ohci = ohci;
2777 d->type = type;
2778 d->ctx = ctx;
2780 d->num_desc = num_desc;
2781 d->buf_size = buf_size;
2782 d->split_buf_size = split_buf_size;
2784 d->ctrlSet = 0;
2785 d->ctrlClear = 0;
2786 d->cmdPtr = 0;
2788 d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2789 d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
2791 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2792 PRINT(KERN_ERR, "Failed to allocate %s", "DMA buffer");
2793 free_dma_rcv_ctx(d);
2794 return -ENOMEM;
2797 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2798 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
2800 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2801 PRINT(KERN_ERR, "Failed to allocate %s", "DMA prg");
2802 free_dma_rcv_ctx(d);
2803 return -ENOMEM;
2806 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2808 if (d->spb == NULL) {
2809 PRINT(KERN_ERR, "Failed to allocate %s", "split buffer");
2810 free_dma_rcv_ctx(d);
2811 return -ENOMEM;
2814 len = sprintf(pool_name, "ohci1394_rcv_prg");
2815 sprintf(pool_name+len, "%d", num_allocs);
2816 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2817 sizeof(struct dma_cmd), 4, 0);
2818 if(d->prg_pool == NULL)
2820 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2821 free_dma_rcv_ctx(d);
2822 return -ENOMEM;
2824 num_allocs++;
2826 for (i=0; i<d->num_desc; i++) {
2827 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
2828 d->buf_size,
2829 d->buf_bus+i);
2831 if (d->buf_cpu[i] != NULL) {
2832 memset(d->buf_cpu[i], 0, d->buf_size);
2833 } else {
2834 PRINT(KERN_ERR,
2835 "Failed to allocate %s", "DMA buffer");
2836 free_dma_rcv_ctx(d);
2837 return -ENOMEM;
2840 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
2842 if (d->prg_cpu[i] != NULL) {
2843 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
2844 } else {
2845 PRINT(KERN_ERR,
2846 "Failed to allocate %s", "DMA prg");
2847 free_dma_rcv_ctx(d);
2848 return -ENOMEM;
2852 spin_lock_init(&d->lock);
2854 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
2855 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
2856 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
2858 tasklet_init(&d->task, dma_rcv_tasklet, (unsigned long) d);
2859 return 0;
2862 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
2864 int i;
2865 struct ti_ohci *ohci = d->ohci;
2867 if (ohci == NULL)
2868 return;
2870 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
2872 if (d->prg_cpu) {
2873 for (i=0; i<d->num_desc; i++)
2874 if (d->prg_cpu[i] && d->prg_bus[i])
2875 pci_pool_free(d->prg_pool, d->prg_cpu[i],
2876 d->prg_bus[i]);
2877 pci_pool_destroy(d->prg_pool);
2878 kfree(d->prg_cpu);
2879 kfree(d->prg_bus);
2882 /* Mark this context as freed. */
2883 d->ohci = NULL;
2886 static int
2887 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
2888 enum context_type type, int ctx, int num_desc,
2889 int context_base)
2891 int i, len;
2892 static char pool_name[20];
2893 static int num_allocs=0;
2895 d->ohci = ohci;
2896 d->type = type;
2897 d->ctx = ctx;
2898 d->num_desc = num_desc;
2899 d->ctrlSet = 0;
2900 d->ctrlClear = 0;
2901 d->cmdPtr = 0;
2903 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
2904 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
2906 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2907 PRINT(KERN_ERR, "Failed to allocate %s", "AT DMA prg");
2908 free_dma_trm_ctx(d);
2909 return -ENOMEM;
2912 len = sprintf(pool_name, "ohci1394_trm_prg");
2913 sprintf(pool_name+len, "%d", num_allocs);
2914 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2915 sizeof(struct at_dma_prg), 4, 0);
2916 if (d->prg_pool == NULL) {
2917 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2918 free_dma_trm_ctx(d);
2919 return -ENOMEM;
2921 num_allocs++;
2923 for (i = 0; i < d->num_desc; i++) {
2924 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
2926 if (d->prg_cpu[i] != NULL) {
2927 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
2928 } else {
2929 PRINT(KERN_ERR,
2930 "Failed to allocate %s", "AT DMA prg");
2931 free_dma_trm_ctx(d);
2932 return -ENOMEM;
2936 spin_lock_init(&d->lock);
2938 /* initialize tasklet */
2939 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
2940 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
2941 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
2942 tasklet_init(&d->task, dma_trm_tasklet, (unsigned long)d);
2943 return 0;
2946 static void ohci_set_hw_config_rom(struct hpsb_host *host, __be32 *config_rom)
2948 struct ti_ohci *ohci = host->hostdata;
2950 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
2951 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
2953 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
2957 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
2958 quadlet_t data, quadlet_t compare)
2960 struct ti_ohci *ohci = host->hostdata;
2961 int i;
2963 reg_write(ohci, OHCI1394_CSRData, data);
2964 reg_write(ohci, OHCI1394_CSRCompareData, compare);
2965 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
2967 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
2968 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
2969 break;
2971 mdelay(1);
2974 return reg_read(ohci, OHCI1394_CSRData);
2977 static struct hpsb_host_driver ohci1394_driver = {
2978 .owner = THIS_MODULE,
2979 .name = OHCI1394_DRIVER_NAME,
2980 .set_hw_config_rom = ohci_set_hw_config_rom,
2981 .transmit_packet = ohci_transmit,
2982 .devctl = ohci_devctl,
2983 .isoctl = ohci_isoctl,
2984 .hw_csr_reg = ohci_hw_csr_reg,
2987 /***********************************
2988 * PCI Driver Interface functions *
2989 ***********************************/
2991 #ifdef CONFIG_PPC_PMAC
2992 static void ohci1394_pmac_on(struct pci_dev *dev)
2994 if (machine_is(powermac)) {
2995 struct device_node *ofn = pci_device_to_OF_node(dev);
2997 if (ofn) {
2998 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
2999 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3004 static void ohci1394_pmac_off(struct pci_dev *dev)
3006 if (machine_is(powermac)) {
3007 struct device_node *ofn = pci_device_to_OF_node(dev);
3009 if (ofn) {
3010 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3011 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3015 #else
3016 #define ohci1394_pmac_on(dev)
3017 #define ohci1394_pmac_off(dev)
3018 #endif /* CONFIG_PPC_PMAC */
3020 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3021 const struct pci_device_id *ent)
3023 struct hpsb_host *host;
3024 struct ti_ohci *ohci; /* shortcut to currently handled device */
3025 resource_size_t ohci_base;
3026 int err = -ENOMEM;
3028 ohci1394_pmac_on(dev);
3029 if (pci_enable_device(dev)) {
3030 PRINT_G(KERN_ERR, "Failed to enable OHCI hardware");
3031 err = -ENXIO;
3032 goto err;
3034 pci_set_master(dev);
3036 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3037 if (!host) {
3038 PRINT_G(KERN_ERR, "Failed to allocate %s", "host structure");
3039 goto err;
3041 ohci = host->hostdata;
3042 ohci->dev = dev;
3043 ohci->host = host;
3044 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3045 host->pdev = dev;
3046 pci_set_drvdata(dev, ohci);
3048 /* We don't want hardware swapping */
3049 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3051 /* Some oddball Apple controllers do not order the selfid
3052 * properly, so we make up for it here. */
3053 #ifndef __LITTLE_ENDIAN
3054 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3055 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3056 ohci->no_swap_incoming = 1;
3057 ohci->selfid_swap = 0;
3058 } else
3059 ohci->selfid_swap = 1;
3060 #endif
3063 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3064 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3065 #endif
3067 /* These chipsets require a bit of extra care when checking after
3068 * a busreset. */
3069 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3070 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3071 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3072 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3073 ohci->check_busreset = 1;
3075 /* We hardwire the MMIO length, since some CardBus adaptors
3076 * fail to report the right length. Anyway, the ohci spec
3077 * clearly says it's 2kb, so this shouldn't be a problem. */
3078 ohci_base = pci_resource_start(dev, 0);
3079 if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
3080 PRINT(KERN_WARNING, "PCI resource length of 0x%llx too small!",
3081 (unsigned long long)pci_resource_len(dev, 0));
3083 if (!request_mem_region(ohci_base, OHCI1394_REGISTER_SIZE,
3084 OHCI1394_DRIVER_NAME)) {
3085 PRINT_G(KERN_ERR, "MMIO resource (0x%llx - 0x%llx) unavailable",
3086 (unsigned long long)ohci_base,
3087 (unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
3088 goto err;
3090 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3092 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3093 if (ohci->registers == NULL) {
3094 PRINT_G(KERN_ERR, "Failed to remap registers");
3095 err = -ENXIO;
3096 goto err;
3098 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3099 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3101 /* csr_config rom allocation */
3102 ohci->csr_config_rom_cpu =
3103 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3104 &ohci->csr_config_rom_bus);
3105 if (ohci->csr_config_rom_cpu == NULL) {
3106 PRINT_G(KERN_ERR, "Failed to allocate %s", "buffer config rom");
3107 goto err;
3109 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3111 /* self-id dma buffer allocation */
3112 ohci->selfid_buf_cpu =
3113 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3114 &ohci->selfid_buf_bus);
3115 if (ohci->selfid_buf_cpu == NULL) {
3116 PRINT_G(KERN_ERR, "Failed to allocate %s", "self-ID buffer");
3117 goto err;
3119 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3121 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3122 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3123 "8Kb boundary... may cause problems on some CXD3222 chip",
3124 ohci->selfid_buf_cpu);
3126 /* No self-id errors at startup */
3127 ohci->self_id_errors = 0;
3129 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3130 /* AR DMA request context allocation */
3131 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3132 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3133 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3134 OHCI1394_AsReqRcvContextBase) < 0) {
3135 PRINT_G(KERN_ERR, "Failed to allocate %s", "AR Req context");
3136 goto err;
3138 /* AR DMA response context allocation */
3139 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3140 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3141 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3142 OHCI1394_AsRspRcvContextBase) < 0) {
3143 PRINT_G(KERN_ERR, "Failed to allocate %s", "AR Resp context");
3144 goto err;
3146 /* AT DMA request context */
3147 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3148 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3149 OHCI1394_AsReqTrContextBase) < 0) {
3150 PRINT_G(KERN_ERR, "Failed to allocate %s", "AT Req context");
3151 goto err;
3153 /* AT DMA response context */
3154 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3155 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3156 OHCI1394_AsRspTrContextBase) < 0) {
3157 PRINT_G(KERN_ERR, "Failed to allocate %s", "AT Resp context");
3158 goto err;
3160 /* Start off with a soft reset, to clear everything to a sane
3161 * state. */
3162 ohci_soft_reset(ohci);
3164 /* Now enable LPS, which we need in order to start accessing
3165 * most of the registers. In fact, on some cards (ALI M5251),
3166 * accessing registers in the SClk domain without LPS enabled
3167 * will lock up the machine. */
3168 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3170 /* Disable and clear interrupts */
3171 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3172 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3174 /* Flush MMIO writes and wait to make sure we have full link enabled. */
3175 reg_read(ohci, OHCI1394_Version);
3176 msleep(50);
3178 /* Determine the number of available IR and IT contexts. */
3179 ohci->nb_iso_rcv_ctx =
3180 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3181 ohci->nb_iso_xmit_ctx =
3182 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3184 /* Set the usage bits for non-existent contexts so they can't
3185 * be allocated */
3186 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3187 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3189 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3190 spin_lock_init(&ohci->iso_tasklet_list_lock);
3191 ohci->ISO_channel_usage = 0;
3192 spin_lock_init(&ohci->IR_channel_lock);
3194 spin_lock_init(&ohci->event_lock);
3197 * interrupts are disabled, all right, but... due to IRQF_SHARED we
3198 * might get called anyway. We'll see no event, of course, but
3199 * we need to get to that "no event", so enough should be initialized
3200 * by that point.
3202 err = request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
3203 OHCI1394_DRIVER_NAME, ohci);
3204 if (err) {
3205 PRINT_G(KERN_ERR, "Failed to allocate interrupt %d", dev->irq);
3206 goto err;
3208 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3209 ohci_initialize(ohci);
3211 /* Set certain csr values */
3212 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3213 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3214 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3215 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3216 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3218 if (phys_dma) {
3219 host->low_addr_space =
3220 (u64) reg_read(ohci, OHCI1394_PhyUpperBound) << 16;
3221 if (!host->low_addr_space)
3222 host->low_addr_space = OHCI1394_PHYS_UPPER_BOUND_FIXED;
3224 host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
3226 /* Tell the highlevel this host is ready */
3227 if (hpsb_add_host(host)) {
3228 PRINT_G(KERN_ERR, "Failed to register host with highlevel");
3229 goto err;
3231 ohci->init_state = OHCI_INIT_DONE;
3233 return 0;
3234 err:
3235 ohci1394_pci_remove(dev);
3236 return err;
3239 static void ohci1394_pci_remove(struct pci_dev *dev)
3241 struct ti_ohci *ohci;
3242 struct device *device;
3244 ohci = pci_get_drvdata(dev);
3245 if (!ohci)
3246 goto out;
3248 device = get_device(&ohci->host->device);
3250 switch (ohci->init_state) {
3251 case OHCI_INIT_DONE:
3252 hpsb_remove_host(ohci->host);
3254 /* Clear out BUS Options */
3255 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3256 reg_write(ohci, OHCI1394_BusOptions,
3257 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3258 0x00ff0000);
3259 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3261 case OHCI_INIT_HAVE_IRQ:
3262 /* Clear interrupt registers */
3263 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3264 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3265 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3266 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3267 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3268 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3270 /* Disable IRM Contender */
3271 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3273 /* Clear link control register */
3274 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3276 /* Let all other nodes know to ignore us */
3277 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3279 /* Soft reset before we start - this disables
3280 * interrupts and clears linkEnable and LPS. */
3281 ohci_soft_reset(ohci);
3282 free_irq(dev->irq, ohci);
3284 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3285 /* The ohci_soft_reset() stops all DMA contexts, so we
3286 * dont need to do this. */
3287 free_dma_rcv_ctx(&ohci->ar_req_context);
3288 free_dma_rcv_ctx(&ohci->ar_resp_context);
3289 free_dma_trm_ctx(&ohci->at_req_context);
3290 free_dma_trm_ctx(&ohci->at_resp_context);
3292 case OHCI_INIT_HAVE_SELFID_BUFFER:
3293 pci_free_consistent(dev, OHCI1394_SI_DMA_BUF_SIZE,
3294 ohci->selfid_buf_cpu,
3295 ohci->selfid_buf_bus);
3297 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3298 pci_free_consistent(dev, OHCI_CONFIG_ROM_LEN,
3299 ohci->csr_config_rom_cpu,
3300 ohci->csr_config_rom_bus);
3302 case OHCI_INIT_HAVE_IOMAPPING:
3303 iounmap(ohci->registers);
3305 case OHCI_INIT_HAVE_MEM_REGION:
3306 release_mem_region(pci_resource_start(dev, 0),
3307 OHCI1394_REGISTER_SIZE);
3309 case OHCI_INIT_ALLOC_HOST:
3310 pci_set_drvdata(dev, NULL);
3313 if (device)
3314 put_device(device);
3315 out:
3316 ohci1394_pmac_off(dev);
3319 #ifdef CONFIG_PM
3320 static int ohci1394_pci_suspend(struct pci_dev *dev, pm_message_t state)
3322 int err;
3323 struct ti_ohci *ohci = pci_get_drvdata(dev);
3325 if (!ohci) {
3326 printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
3327 OHCI1394_DRIVER_NAME);
3328 return -ENXIO;
3330 DBGMSG("suspend called");
3332 /* Clear the async DMA contexts and stop using the controller */
3333 hpsb_bus_reset(ohci->host);
3335 /* See ohci1394_pci_remove() for comments on this sequence */
3336 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3337 reg_write(ohci, OHCI1394_BusOptions,
3338 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3339 0x00ff0000);
3340 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3341 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3342 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3343 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3344 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3345 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3346 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3347 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3348 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3349 ohci_soft_reset(ohci);
3351 free_irq(dev->irq, ohci);
3352 err = pci_save_state(dev);
3353 if (err) {
3354 PRINT(KERN_ERR, "pci_save_state failed with %d", err);
3355 return err;
3357 err = pci_set_power_state(dev, pci_choose_state(dev, state));
3358 if (err)
3359 DBGMSG("pci_set_power_state failed with %d", err);
3360 ohci1394_pmac_off(dev);
3362 return 0;
3365 static int ohci1394_pci_resume(struct pci_dev *dev)
3367 int err;
3368 struct ti_ohci *ohci = pci_get_drvdata(dev);
3370 if (!ohci) {
3371 printk(KERN_ERR "%s: tried to resume nonexisting host\n",
3372 OHCI1394_DRIVER_NAME);
3373 return -ENXIO;
3375 DBGMSG("resume called");
3377 ohci1394_pmac_on(dev);
3378 pci_set_power_state(dev, PCI_D0);
3379 pci_restore_state(dev);
3380 err = pci_enable_device(dev);
3381 if (err) {
3382 PRINT(KERN_ERR, "pci_enable_device failed with %d", err);
3383 return err;
3386 /* See ohci1394_pci_probe() for comments on this sequence */
3387 ohci_soft_reset(ohci);
3388 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3389 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3390 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3391 reg_read(ohci, OHCI1394_Version);
3392 msleep(50);
3394 err = request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
3395 OHCI1394_DRIVER_NAME, ohci);
3396 if (err) {
3397 PRINT_G(KERN_ERR, "Failed to allocate interrupt %d", dev->irq);
3398 return err;
3401 ohci_initialize(ohci);
3403 hpsb_resume_host(ohci->host);
3404 return 0;
3406 #endif /* CONFIG_PM */
3408 static struct pci_device_id ohci1394_pci_tbl[] = {
3410 .class = PCI_CLASS_SERIAL_FIREWIRE_OHCI,
3411 .class_mask = PCI_ANY_ID,
3412 .vendor = PCI_ANY_ID,
3413 .device = PCI_ANY_ID,
3414 .subvendor = PCI_ANY_ID,
3415 .subdevice = PCI_ANY_ID,
3417 { 0, },
3420 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3422 static struct pci_driver ohci1394_pci_driver = {
3423 .name = OHCI1394_DRIVER_NAME,
3424 .id_table = ohci1394_pci_tbl,
3425 .probe = ohci1394_pci_probe,
3426 .remove = ohci1394_pci_remove,
3427 #ifdef CONFIG_PM
3428 .resume = ohci1394_pci_resume,
3429 .suspend = ohci1394_pci_suspend,
3430 #endif
3433 /***********************************
3434 * OHCI1394 Video Interface *
3435 ***********************************/
3437 /* essentially the only purpose of this code is to allow another
3438 module to hook into ohci's interrupt handler */
3440 /* returns zero if successful, one if DMA context is locked up */
3441 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3443 int i=0;
3445 /* stop the channel program if it's still running */
3446 reg_write(ohci, reg, 0x8000);
3448 /* Wait until it effectively stops */
3449 while (reg_read(ohci, reg) & 0x400) {
3450 i++;
3451 if (i>5000) {
3452 PRINT(KERN_ERR,
3453 "Runaway loop while stopping context: %s...", msg ? msg : "");
3454 return 1;
3457 mb();
3458 udelay(10);
3460 if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3461 return 0;
3464 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3465 void (*func)(unsigned long), unsigned long data)
3467 tasklet_init(&tasklet->tasklet, func, data);
3468 tasklet->type = type;
3469 /* We init the tasklet->link field, so we can list_del() it
3470 * without worrying whether it was added to the list or not. */
3471 INIT_LIST_HEAD(&tasklet->link);
3474 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3475 struct ohci1394_iso_tasklet *tasklet)
3477 unsigned long flags, *usage;
3478 int n, i, r = -EBUSY;
3480 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3481 n = ohci->nb_iso_xmit_ctx;
3482 usage = &ohci->it_ctx_usage;
3484 else {
3485 n = ohci->nb_iso_rcv_ctx;
3486 usage = &ohci->ir_ctx_usage;
3488 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3489 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3490 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3491 return r;
3496 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3498 for (i = 0; i < n; i++)
3499 if (!test_and_set_bit(i, usage)) {
3500 tasklet->context = i;
3501 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3502 r = 0;
3503 break;
3506 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3508 return r;
3511 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3512 struct ohci1394_iso_tasklet *tasklet)
3514 unsigned long flags;
3516 tasklet_kill(&tasklet->tasklet);
3518 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3520 if (tasklet->type == OHCI_ISO_TRANSMIT)
3521 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3522 else {
3523 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3525 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3526 clear_bit(0, &ohci->ir_multichannel_used);
3530 list_del(&tasklet->link);
3532 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3535 EXPORT_SYMBOL(ohci1394_stop_context);
3536 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3537 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3538 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3540 /***********************************
3541 * General module initialization *
3542 ***********************************/
3544 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3545 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3546 MODULE_LICENSE("GPL");
3548 static void __exit ohci1394_cleanup (void)
3550 pci_unregister_driver(&ohci1394_pci_driver);
3553 static int __init ohci1394_init(void)
3555 return pci_register_driver(&ohci1394_pci_driver);
3558 module_init(ohci1394_init);
3559 module_exit(ohci1394_cleanup);