Merge with Linux 2.5.74.
[linux-2.6/linux-mips.git] / drivers / ieee1394 / ohci1394.c
blob53c22dafeae941dc6e30592e49ed83cad8f5e0cb
1 /*
2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
28 * . Iso Receive
29 * . DMA mmap for iso receive
30 * . Config ROM generation
32 * Things implemented, but still in test phase:
33 * . Iso Transmit
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
36 * Things not implemented:
37 * . DMA error recovery
39 * Known bugs:
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
44 /*
45 * Acknowledgments:
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
50 * Andreas Tobler <toa@pop.agri.ch>
51 * . Updated proc_fs calls
53 * Emilie Chung <emilie.chung@axis.com>
54 * . Tip on Async Request Filter
56 * Pascal Drolet <pascal.drolet@informission.ca>
57 * . Various tips for optimization and functionnalities
59 * Robert Ficklin <rficklin@westengineering.com>
60 * . Loop in irq_handler
62 * James Goodwin <jamesg@Filanet.com>
63 * . Various tips on initialization, self-id reception, etc.
65 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
66 * . Apple PowerBook detection
68 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
69 * . Reset the board properly before leaving + misc cleanups
71 * Leon van Stuivenberg <leonvs@iae.nl>
72 * . Bug fixes
74 * Ben Collins <bcollins@debian.org>
75 * . Working big-endian support
76 * . Updated to 2.4.x module scheme (PCI aswell)
77 * . Removed procfs support since it trashes random mem
78 * . Config ROM generation
80 * Manfred Weihs <weihs@ict.tuwien.ac.at>
81 * . Reworked code for initiating bus resets
82 * (long, short, with or without hold-off)
84 * Nandu Santhi <contactnandu@users.sourceforge.net>
85 * . Added support for nVidia nForce2 onboard Firewire chipset
89 #include <linux/config.h>
90 #include <linux/kernel.h>
91 #include <linux/list.h>
92 #include <linux/slab.h>
93 #include <linux/interrupt.h>
94 #include <linux/wait.h>
95 #include <linux/errno.h>
96 #include <linux/module.h>
97 #include <linux/moduleparam.h>
98 #include <linux/pci.h>
99 #include <linux/fs.h>
100 #include <linux/poll.h>
101 #include <asm/byteorder.h>
102 #include <asm/atomic.h>
103 #include <asm/uaccess.h>
104 #include <linux/delay.h>
105 #include <linux/spinlock.h>
107 #include <asm/pgtable.h>
108 #include <asm/page.h>
109 #include <linux/sched.h>
110 #include <linux/types.h>
111 #include <linux/vmalloc.h>
112 #include <linux/init.h>
114 #ifdef CONFIG_PPC_PMAC
115 #include <asm/machdep.h>
116 #include <asm/pmac_feature.h>
117 #include <asm/prom.h>
118 #include <asm/pci-bridge.h>
119 #endif
121 #include "ieee1394.h"
122 #include "ieee1394_types.h"
123 #include "hosts.h"
124 #include "dma.h"
125 #include "iso.h"
126 #include "ieee1394_core.h"
127 #include "highlevel.h"
128 #include "ohci1394.h"
130 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
131 #define OHCI1394_DEBUG
132 #endif
134 #ifdef DBGMSG
135 #undef DBGMSG
136 #endif
138 #ifdef OHCI1394_DEBUG
139 #define DBGMSG(card, fmt, args...) \
140 printk(KERN_INFO "%s_%d: " fmt "\n" , OHCI1394_DRIVER_NAME, card , ## args)
141 #else
142 #define DBGMSG(card, fmt, args...)
143 #endif
145 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
146 #define OHCI_DMA_ALLOC(fmt, args...) \
147 HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
148 ++global_outstanding_dmas, ## args)
149 #define OHCI_DMA_FREE(fmt, args...) \
150 HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
151 --global_outstanding_dmas, ## args)
152 static int global_outstanding_dmas = 0;
153 #else
154 #define OHCI_DMA_ALLOC(fmt, args...)
155 #define OHCI_DMA_FREE(fmt, args...)
156 #endif
158 /* print general (card independent) information */
159 #define PRINT_G(level, fmt, args...) \
160 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
162 /* print card specific information */
163 #define PRINT(level, card, fmt, args...) \
164 printk(level "%s_%d: " fmt "\n" , OHCI1394_DRIVER_NAME, card , ## args)
166 static char version[] __devinitdata =
167 "$Rev: 986 $ Ben Collins <bcollins@debian.org>";
169 /* Module Parameters */
170 static int phys_dma = 1;
171 module_param(phys_dma, int, 0644);
172 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
174 static void dma_trm_tasklet(unsigned long data);
175 static void dma_trm_reset(struct dma_trm_ctx *d);
177 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
178 enum context_type type, int ctx, int num_desc,
179 int buf_size, int split_buf_size, int context_base);
180 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
182 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
183 enum context_type type, int ctx, int num_desc,
184 int context_base);
186 static void ohci1394_pci_remove(struct pci_dev *pdev);
188 #ifndef __LITTLE_ENDIAN
189 static unsigned hdr_sizes[] =
191 3, /* TCODE_WRITEQ */
192 4, /* TCODE_WRITEB */
193 3, /* TCODE_WRITE_RESPONSE */
194 0, /* ??? */
195 3, /* TCODE_READQ */
196 4, /* TCODE_READB */
197 3, /* TCODE_READQ_RESPONSE */
198 4, /* TCODE_READB_RESPONSE */
199 1, /* TCODE_CYCLE_START (???) */
200 4, /* TCODE_LOCK_REQUEST */
201 2, /* TCODE_ISO_DATA */
202 4, /* TCODE_LOCK_RESPONSE */
205 /* Swap headers */
206 static inline void packet_swab(quadlet_t *data, int tcode)
208 size_t size = hdr_sizes[tcode];
210 if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
211 return;
213 while (size--)
214 data[size] = swab32(data[size]);
216 #else
217 /* Don't waste cycles on same sex byte swaps */
218 #define packet_swab(w,x)
219 #endif /* !LITTLE_ENDIAN */
221 /***********************************
222 * IEEE-1394 functionality section *
223 ***********************************/
225 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
227 int i;
228 unsigned long flags;
229 quadlet_t r;
231 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
233 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
235 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
236 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
237 break;
239 mdelay(1);
242 r = reg_read(ohci, OHCI1394_PhyControl);
244 if (i >= OHCI_LOOP_COUNT)
245 PRINT (KERN_ERR, ohci->id, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
246 r, r & 0x80000000, i);
248 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
250 return (r & 0x00ff0000) >> 16;
253 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
255 int i;
256 unsigned long flags;
257 u32 r = 0;
259 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
261 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
263 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
264 r = reg_read(ohci, OHCI1394_PhyControl);
265 if (!(r & 0x00004000))
266 break;
268 mdelay(1);
271 if (i == OHCI_LOOP_COUNT)
272 PRINT (KERN_ERR, ohci->id, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
273 r, r & 0x00004000, i);
275 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
277 return;
280 /* Or's our value into the current value */
281 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
283 u8 old;
285 old = get_phy_reg (ohci, addr);
286 old |= data;
287 set_phy_reg (ohci, addr, old);
289 return;
292 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
293 int phyid, int isroot)
295 quadlet_t *q = ohci->selfid_buf_cpu;
296 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
297 size_t size;
298 quadlet_t q0, q1;
300 /* Check status of self-id reception */
302 if (ohci->selfid_swap)
303 q0 = le32_to_cpu(q[0]);
304 else
305 q0 = q[0];
307 if ((self_id_count & 0x80000000) ||
308 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
309 PRINT(KERN_ERR, ohci->id,
310 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
311 self_id_count, q0, ohci->self_id_errors);
313 /* Tip by James Goodwin <jamesg@Filanet.com>:
314 * We had an error, generate another bus reset in response. */
315 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
316 set_phy_reg_mask (ohci, 1, 0x40);
317 ohci->self_id_errors++;
318 } else {
319 PRINT(KERN_ERR, ohci->id,
320 "Too many errors on SelfID error reception, giving up!");
322 return;
325 /* SelfID Ok, reset error counter. */
326 ohci->self_id_errors = 0;
328 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
329 q++;
331 while (size > 0) {
332 if (ohci->selfid_swap) {
333 q0 = le32_to_cpu(q[0]);
334 q1 = le32_to_cpu(q[1]);
335 } else {
336 q0 = q[0];
337 q1 = q[1];
340 if (q0 == ~q1) {
341 DBGMSG (ohci->id, "SelfID packet 0x%x received", q0);
342 hpsb_selfid_received(host, cpu_to_be32(q0));
343 if (((q0 & 0x3f000000) >> 24) == phyid)
344 DBGMSG (ohci->id, "SelfID for this node is 0x%08x", q0);
345 } else {
346 PRINT(KERN_ERR, ohci->id,
347 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
349 q += 2;
350 size -= 2;
353 DBGMSG(ohci->id, "SelfID complete");
355 return;
358 static void ohci_soft_reset(struct ti_ohci *ohci) {
359 int i;
361 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
363 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
364 if (!reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset)
365 break;
366 mdelay(1);
368 DBGMSG (ohci->id, "Soft reset finished");
371 static int run_context(struct ti_ohci *ohci, int reg, char *msg)
373 u32 nodeId;
375 /* check that the node id is valid */
376 nodeId = reg_read(ohci, OHCI1394_NodeID);
377 if (!(nodeId&0x80000000)) {
378 PRINT(KERN_ERR, ohci->id,
379 "Running dma failed because Node ID is not valid");
380 return -1;
383 /* check that the node number != 63 */
384 if ((nodeId&0x3f)==63) {
385 PRINT(KERN_ERR, ohci->id,
386 "Running dma failed because Node ID == 63");
387 return -1;
390 /* Run the dma context */
391 reg_write(ohci, reg, 0x8000);
393 if (msg) PRINT(KERN_DEBUG, ohci->id, "%s", msg);
395 return 0;
398 /* Generate the dma receive prgs and start the context */
399 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
401 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
402 int i;
404 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
406 for (i=0; i<d->num_desc; i++) {
407 u32 c;
409 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
410 if (generate_irq)
411 c |= DMA_CTL_IRQ;
413 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
415 /* End of descriptor list? */
416 if (i + 1 < d->num_desc) {
417 d->prg_cpu[i]->branchAddress =
418 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
419 } else {
420 d->prg_cpu[i]->branchAddress =
421 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
424 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
425 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
428 d->buf_ind = 0;
429 d->buf_offset = 0;
431 if (d->type == DMA_CTX_ISO) {
432 /* Clear contextControl */
433 reg_write(ohci, d->ctrlClear, 0xffffffff);
435 /* Set bufferFill, isochHeader, multichannel for IR context */
436 reg_write(ohci, d->ctrlSet, 0xd0000000);
438 /* Set the context match register to match on all tags */
439 reg_write(ohci, d->ctxtMatch, 0xf0000000);
441 /* Clear the multi channel mask high and low registers */
442 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
443 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
445 /* Set up isoRecvIntMask to generate interrupts */
446 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
449 /* Tell the controller where the first AR program is */
450 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
452 /* Run context */
453 reg_write(ohci, d->ctrlSet, 0x00008000);
455 DBGMSG(ohci->id, "Receive DMA ctx=%d initialized", d->ctx);
458 /* Initialize the dma transmit context */
459 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
461 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
463 /* Stop the context */
464 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
466 d->prg_ind = 0;
467 d->sent_ind = 0;
468 d->free_prgs = d->num_desc;
469 d->branchAddrPtr = NULL;
470 INIT_LIST_HEAD(&d->fifo_list);
471 INIT_LIST_HEAD(&d->pending_list);
473 if (d->type == DMA_CTX_ISO) {
474 /* enable interrupts */
475 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
478 DBGMSG(ohci->id, "Transmit DMA ctx=%d initialized", d->ctx);
481 /* Count the number of available iso contexts */
482 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
484 int i,ctx=0;
485 u32 tmp;
487 reg_write(ohci, reg, 0xffffffff);
488 tmp = reg_read(ohci, reg);
490 DBGMSG(ohci->id,"Iso contexts reg: %08x implemented: %08x", reg, tmp);
492 /* Count the number of contexts */
493 for (i=0; i<32; i++) {
494 if (tmp & 1) ctx++;
495 tmp >>= 1;
497 return ctx;
500 static void ohci_init_config_rom(struct ti_ohci *ohci);
502 /* Global initialization */
503 static void ohci_initialize(struct ti_ohci *ohci)
505 quadlet_t buf;
507 spin_lock_init(&ohci->phy_reg_lock);
508 spin_lock_init(&ohci->event_lock);
510 /* Put some defaults to these undefined bus options */
511 buf = reg_read(ohci, OHCI1394_BusOptions);
512 buf |= 0xE0000000; /* Enable IRMC, CMC and ISC */
513 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
514 buf &= ~0x18000000; /* Disable PMC and BMC */
515 reg_write(ohci, OHCI1394_BusOptions, buf);
517 /* Set the bus number */
518 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
520 /* Enable posted writes */
521 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
523 /* Clear link control register */
524 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
526 /* Enable cycle timer and cycle master and set the IRM
527 * contender bit in our self ID packets. */
528 reg_write(ohci, OHCI1394_LinkControlSet, 0x00300000);
529 set_phy_reg_mask(ohci, 4, 0xc0);
531 /* Clear interrupt registers */
532 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
533 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
535 /* Set up self-id dma buffer */
536 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
538 /* enable self-id dma */
539 reg_write(ohci, OHCI1394_LinkControlSet, 0x00000200);
541 /* Set the Config ROM mapping register */
542 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
544 /* Initialize the Config ROM */
545 ohci_init_config_rom(ohci);
547 /* Now get our max packet size */
548 ohci->max_packet_size =
549 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
551 /* Don't accept phy packets into AR request context */
552 reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
554 /* Clear the interrupt mask */
555 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
556 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
558 /* Clear the interrupt mask */
559 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
560 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
562 /* Initialize AR dma */
563 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
564 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
566 /* Initialize AT dma */
567 initialize_dma_trm_ctx(&ohci->at_req_context);
568 initialize_dma_trm_ctx(&ohci->at_resp_context);
571 * Accept AT requests from all nodes. This probably
572 * will have to be controlled from the subsystem
573 * on a per node basis.
575 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
577 /* Specify AT retries */
578 reg_write(ohci, OHCI1394_ATRetries,
579 OHCI1394_MAX_AT_REQ_RETRIES |
580 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
581 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
583 /* We don't want hardware swapping */
584 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
586 /* Enable interrupts */
587 reg_write(ohci, OHCI1394_IntMaskSet,
588 OHCI1394_unrecoverableError |
589 OHCI1394_masterIntEnable |
590 OHCI1394_busReset |
591 OHCI1394_selfIDComplete |
592 OHCI1394_RSPkt |
593 OHCI1394_RQPkt |
594 OHCI1394_respTxComplete |
595 OHCI1394_reqTxComplete |
596 OHCI1394_isochRx |
597 OHCI1394_isochTx |
598 OHCI1394_cycleInconsistent);
600 /* Enable link */
601 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
603 buf = reg_read(ohci, OHCI1394_Version);
604 PRINT(KERN_INFO, ohci->id, "OHCI-1394 %d.%d (PCI): IRQ=[%d] "
605 "MMIO=[%lx-%lx] Max Packet=[%d]",
606 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
607 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), ohci->dev->irq,
608 pci_resource_start(ohci->dev, 0),
609 pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
610 ohci->max_packet_size);
614 * Insert a packet in the DMA fifo and generate the DMA prg
615 * FIXME: rewrite the program in order to accept packets crossing
616 * page boundaries.
617 * check also that a single dma descriptor doesn't cross a
618 * page boundary.
620 static void insert_packet(struct ti_ohci *ohci,
621 struct dma_trm_ctx *d, struct hpsb_packet *packet)
623 u32 cycleTimer;
624 int idx = d->prg_ind;
626 DBGMSG(ohci->id, "Inserting packet for node " NODE_BUS_FMT
627 ", tlabel=%d, tcode=0x%x, speed=%d",
628 NODE_BUS_ARGS(packet->node_id), packet->tlabel,
629 packet->tcode, packet->speed_code);
631 d->prg_cpu[idx]->begin.address = 0;
632 d->prg_cpu[idx]->begin.branchAddress = 0;
634 if (d->type == DMA_CTX_ASYNC_RESP) {
636 * For response packets, we need to put a timeout value in
637 * the 16 lower bits of the status... let's try 1 sec timeout
639 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
640 d->prg_cpu[idx]->begin.status = cpu_to_le32(
641 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
642 ((cycleTimer&0x01fff000)>>12));
644 DBGMSG(ohci->id, "cycleTimer: %08x timeStamp: %08x",
645 cycleTimer, d->prg_cpu[idx]->begin.status);
646 } else
647 d->prg_cpu[idx]->begin.status = 0;
649 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
651 if (packet->type == hpsb_raw) {
652 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
653 d->prg_cpu[idx]->data[1] = packet->header[0];
654 d->prg_cpu[idx]->data[2] = packet->header[1];
655 } else {
656 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
657 (packet->header[0] & 0xFFFF);
659 if (packet->tcode == TCODE_ISO_DATA) {
660 /* Sending an async stream packet */
661 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
662 } else {
663 /* Sending a normal async request or response */
664 d->prg_cpu[idx]->data[1] =
665 (packet->header[1] & 0xFFFF) |
666 (packet->header[0] & 0xFFFF0000);
667 d->prg_cpu[idx]->data[2] = packet->header[2];
668 d->prg_cpu[idx]->data[3] = packet->header[3];
670 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
673 if (packet->data_size) { /* block transmit */
674 if (packet->tcode == TCODE_STREAM_DATA){
675 d->prg_cpu[idx]->begin.control =
676 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
677 DMA_CTL_IMMEDIATE | 0x8);
678 } else {
679 d->prg_cpu[idx]->begin.control =
680 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
681 DMA_CTL_IMMEDIATE | 0x10);
683 d->prg_cpu[idx]->end.control =
684 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
685 DMA_CTL_IRQ |
686 DMA_CTL_BRANCH |
687 packet->data_size);
689 * Check that the packet data buffer
690 * does not cross a page boundary.
692 if (cross_bound((unsigned long)packet->data,
693 packet->data_size)>0) {
694 /* FIXME: do something about it */
695 PRINT(KERN_ERR, ohci->id,
696 "%s: packet data addr: %p size %Zd bytes "
697 "cross page boundary", __FUNCTION__,
698 packet->data, packet->data_size);
701 d->prg_cpu[idx]->end.address = cpu_to_le32(
702 pci_map_single(ohci->dev, packet->data,
703 packet->data_size,
704 PCI_DMA_TODEVICE));
705 OHCI_DMA_ALLOC("single, block transmit packet");
707 d->prg_cpu[idx]->end.branchAddress = 0;
708 d->prg_cpu[idx]->end.status = 0;
709 if (d->branchAddrPtr)
710 *(d->branchAddrPtr) =
711 cpu_to_le32(d->prg_bus[idx] | 0x3);
712 d->branchAddrPtr =
713 &(d->prg_cpu[idx]->end.branchAddress);
714 } else { /* quadlet transmit */
715 if (packet->type == hpsb_raw)
716 d->prg_cpu[idx]->begin.control =
717 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
718 DMA_CTL_IMMEDIATE |
719 DMA_CTL_IRQ |
720 DMA_CTL_BRANCH |
721 (packet->header_size + 4));
722 else
723 d->prg_cpu[idx]->begin.control =
724 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
725 DMA_CTL_IMMEDIATE |
726 DMA_CTL_IRQ |
727 DMA_CTL_BRANCH |
728 packet->header_size);
730 if (d->branchAddrPtr)
731 *(d->branchAddrPtr) =
732 cpu_to_le32(d->prg_bus[idx] | 0x2);
733 d->branchAddrPtr =
734 &(d->prg_cpu[idx]->begin.branchAddress);
737 } else { /* iso packet */
738 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
739 (packet->header[0] & 0xFFFF);
740 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
741 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
743 d->prg_cpu[idx]->begin.control =
744 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
745 DMA_CTL_IMMEDIATE | 0x8);
746 d->prg_cpu[idx]->end.control =
747 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
748 DMA_CTL_UPDATE |
749 DMA_CTL_IRQ |
750 DMA_CTL_BRANCH |
751 packet->data_size);
752 d->prg_cpu[idx]->end.address = cpu_to_le32(
753 pci_map_single(ohci->dev, packet->data,
754 packet->data_size, PCI_DMA_TODEVICE));
755 OHCI_DMA_ALLOC("single, iso transmit packet");
757 d->prg_cpu[idx]->end.branchAddress = 0;
758 d->prg_cpu[idx]->end.status = 0;
759 DBGMSG(ohci->id, "Iso xmit context info: header[%08x %08x]\n"
760 " begin=%08x %08x %08x %08x\n"
761 " %08x %08x %08x %08x\n"
762 " end =%08x %08x %08x %08x",
763 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
764 d->prg_cpu[idx]->begin.control,
765 d->prg_cpu[idx]->begin.address,
766 d->prg_cpu[idx]->begin.branchAddress,
767 d->prg_cpu[idx]->begin.status,
768 d->prg_cpu[idx]->data[0],
769 d->prg_cpu[idx]->data[1],
770 d->prg_cpu[idx]->data[2],
771 d->prg_cpu[idx]->data[3],
772 d->prg_cpu[idx]->end.control,
773 d->prg_cpu[idx]->end.address,
774 d->prg_cpu[idx]->end.branchAddress,
775 d->prg_cpu[idx]->end.status);
776 if (d->branchAddrPtr)
777 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
778 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
780 d->free_prgs--;
782 /* queue the packet in the appropriate context queue */
783 list_add_tail(&packet->driver_list, &d->fifo_list);
784 d->prg_ind = (d->prg_ind+1)%d->num_desc;
788 * This function fills the FIFO with the (eventual) pending packets
789 * and runs or wakes up the DMA prg if necessary.
791 * The function MUST be called with the d->lock held.
793 static int dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
795 struct hpsb_packet *p;
796 int idx,z;
798 if (list_empty(&d->pending_list) || d->free_prgs == 0)
799 return 0;
801 p = driver_packet(d->pending_list.next);
802 idx = d->prg_ind;
803 z = (p->data_size) ? 3 : 2;
805 /* insert the packets into the dma fifo */
806 while (d->free_prgs > 0 && !list_empty(&d->pending_list)) {
807 struct hpsb_packet *p = driver_packet(d->pending_list.next);
808 list_del(&p->driver_list);
809 insert_packet(ohci, d, p);
812 if (d->free_prgs == 0)
813 DBGMSG(ohci->id, "Transmit DMA FIFO ctx=%d is full... waiting", d->ctx);
815 /* Is the context running ? (should be unless it is
816 the first packet to be sent in this context) */
817 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
818 DBGMSG(ohci->id,"Starting transmit DMA ctx=%d",d->ctx);
819 reg_write(ohci, d->cmdPtr, d->prg_bus[idx]|z);
820 run_context(ohci, d->ctrlSet, NULL);
822 else {
823 /* Wake up the dma context if necessary */
824 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
825 DBGMSG(ohci->id,"Waking transmit DMA ctx=%d",d->ctx);
828 /* do this always, to avoid race condition */
829 reg_write(ohci, d->ctrlSet, 0x1000);
831 return 1;
834 /* Transmission of an async or iso packet */
835 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
837 struct ti_ohci *ohci = host->hostdata;
838 struct dma_trm_ctx *d;
839 unsigned long flags;
841 if (packet->data_size > ohci->max_packet_size) {
842 PRINT(KERN_ERR, ohci->id,
843 "Transmit packet size %Zd is too big",
844 packet->data_size);
845 return 0;
848 /* Decide whether we have an iso, a request, or a response packet */
849 if (packet->type == hpsb_raw)
850 d = &ohci->at_req_context;
851 else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
852 /* The legacy IT DMA context is initialized on first
853 * use. However, the alloc cannot be run from
854 * interrupt context, so we bail out if that is the
855 * case. I don't see anyone sending ISO packets from
856 * interrupt context anyway... */
858 if (ohci->it_legacy_context.ohci == NULL) {
859 if (in_interrupt()) {
860 PRINT(KERN_ERR, ohci->id,
861 "legacy IT context cannot be initialized during interrupt");
862 return 0;
865 if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
866 DMA_CTX_ISO, 0, IT_NUM_DESC,
867 OHCI1394_IsoXmitContextBase) < 0) {
868 PRINT(KERN_ERR, ohci->id,
869 "error initializing legacy IT context");
870 return 0;
873 initialize_dma_trm_ctx(&ohci->it_legacy_context);
876 d = &ohci->it_legacy_context;
877 } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
878 d = &ohci->at_resp_context;
879 else
880 d = &ohci->at_req_context;
882 spin_lock_irqsave(&d->lock,flags);
884 list_add_tail(&packet->driver_list, &d->pending_list);
886 dma_trm_flush(ohci, d);
888 spin_unlock_irqrestore(&d->lock,flags);
890 return 1;
893 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
895 struct ti_ohci *ohci = host->hostdata;
896 int retval = 0;
897 unsigned long flags;
898 int phy_reg;
900 switch (cmd) {
901 case RESET_BUS:
902 switch (arg) {
903 case SHORT_RESET:
904 phy_reg = get_phy_reg(ohci, 5);
905 phy_reg |= 0x40;
906 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
907 break;
908 case LONG_RESET:
909 phy_reg = get_phy_reg(ohci, 1);
910 phy_reg |= 0x40;
911 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
912 break;
913 case SHORT_RESET_NO_FORCE_ROOT:
914 phy_reg = get_phy_reg(ohci, 1);
915 if (phy_reg & 0x80) {
916 phy_reg &= ~0x80;
917 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
920 phy_reg = get_phy_reg(ohci, 5);
921 phy_reg |= 0x40;
922 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
923 break;
924 case LONG_RESET_NO_FORCE_ROOT:
925 phy_reg = get_phy_reg(ohci, 1);
926 phy_reg &= ~0x80;
927 phy_reg |= 0x40;
928 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
929 break;
930 case SHORT_RESET_FORCE_ROOT:
931 phy_reg = get_phy_reg(ohci, 1);
932 if (!(phy_reg & 0x80)) {
933 phy_reg |= 0x80;
934 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
937 phy_reg = get_phy_reg(ohci, 5);
938 phy_reg |= 0x40;
939 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
940 break;
941 case LONG_RESET_FORCE_ROOT:
942 phy_reg = get_phy_reg(ohci, 1);
943 phy_reg |= 0xc0;
944 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
945 break;
946 default:
947 retval = -1;
949 break;
951 case GET_CYCLE_COUNTER:
952 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
953 break;
955 case SET_CYCLE_COUNTER:
956 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
957 break;
959 case SET_BUS_ID:
960 PRINT(KERN_ERR, ohci->id, "devctl command SET_BUS_ID err");
961 break;
963 case ACT_CYCLE_MASTER:
964 if (arg) {
965 /* check if we are root and other nodes are present */
966 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
967 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
969 * enable cycleTimer, cycleMaster
971 DBGMSG(ohci->id, "Cycle master enabled");
972 reg_write(ohci, OHCI1394_LinkControlSet,
973 0x00300000);
975 } else {
976 /* disable cycleTimer, cycleMaster, cycleSource */
977 reg_write(ohci, OHCI1394_LinkControlClear, 0x00700000);
979 break;
981 case CANCEL_REQUESTS:
982 DBGMSG(ohci->id, "Cancel request received");
983 dma_trm_reset(&ohci->at_req_context);
984 dma_trm_reset(&ohci->at_resp_context);
985 break;
987 case ISO_LISTEN_CHANNEL:
989 u64 mask;
991 if (arg<0 || arg>63) {
992 PRINT(KERN_ERR, ohci->id,
993 "%s: IS0 listen channel %d is out of range",
994 __FUNCTION__, arg);
995 return -EFAULT;
998 /* activate the legacy IR context */
999 if (ohci->ir_legacy_context.ohci == NULL) {
1000 if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
1001 DMA_CTX_ISO, 0, IR_NUM_DESC,
1002 IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
1003 OHCI1394_IsoRcvContextBase) < 0) {
1004 PRINT(KERN_ERR, ohci->id, "%s: failed to allocate an IR context",
1005 __FUNCTION__);
1006 return -ENOMEM;
1008 ohci->ir_legacy_channels = 0;
1009 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1011 DBGMSG(ohci->id, "ISO receive legacy context activated");
1014 mask = (u64)0x1<<arg;
1016 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1018 if (ohci->ISO_channel_usage & mask) {
1019 PRINT(KERN_ERR, ohci->id,
1020 "%s: IS0 listen channel %d is already used",
1021 __FUNCTION__, arg);
1022 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1023 return -EFAULT;
1026 ohci->ISO_channel_usage |= mask;
1027 ohci->ir_legacy_channels |= mask;
1029 if (arg>31)
1030 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1031 1<<(arg-32));
1032 else
1033 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1034 1<<arg);
1036 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1037 DBGMSG(ohci->id, "Listening enabled on channel %d", arg);
1038 break;
1040 case ISO_UNLISTEN_CHANNEL:
1042 u64 mask;
1044 if (arg<0 || arg>63) {
1045 PRINT(KERN_ERR, ohci->id,
1046 "%s: IS0 unlisten channel %d is out of range",
1047 __FUNCTION__, arg);
1048 return -EFAULT;
1051 mask = (u64)0x1<<arg;
1053 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1055 if (!(ohci->ISO_channel_usage & mask)) {
1056 PRINT(KERN_ERR, ohci->id,
1057 "%s: IS0 unlisten channel %d is not used",
1058 __FUNCTION__, arg);
1059 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1060 return -EFAULT;
1063 ohci->ISO_channel_usage &= ~mask;
1064 ohci->ir_legacy_channels &= ~mask;
1066 if (arg>31)
1067 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1068 1<<(arg-32));
1069 else
1070 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1071 1<<arg);
1073 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1074 DBGMSG(ohci->id, "Listening disabled on channel %d", arg);
1076 if (ohci->ir_legacy_channels == 0) {
1077 free_dma_rcv_ctx(&ohci->ir_legacy_context);
1078 DBGMSG(ohci->id, "ISO receive legacy context deactivated");
1080 break;
1082 default:
1083 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1084 cmd);
1085 break;
1087 return retval;
1090 /***********************************
1091 * rawiso ISO reception *
1092 ***********************************/
1095 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1096 buffer is split into "blocks" (regions described by one DMA
1097 descriptor). Each block must be one page or less in size, and
1098 must not cross a page boundary.
1100 There is one little wrinkle with buffer-fill mode: a packet that
1101 starts in the final block may wrap around into the first block. But
1102 the user API expects all packets to be contiguous. Our solution is
1103 to keep the very last page of the DMA buffer in reserve - if a
1104 packet spans the gap, we copy its tail into this page.
1107 struct ohci_iso_recv {
1108 struct ti_ohci *ohci;
1110 struct ohci1394_iso_tasklet task;
1111 int task_active;
1113 enum { BUFFER_FILL_MODE,
1114 PACKET_PER_BUFFER_MODE } dma_mode;
1116 /* memory and PCI mapping for the DMA descriptors */
1117 struct dma_prog_region prog;
1118 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1120 /* how many DMA blocks fit in the buffer */
1121 unsigned int nblocks;
1123 /* stride of DMA blocks */
1124 unsigned int buf_stride;
1126 /* number of blocks to batch between interrupts */
1127 int block_irq_interval;
1129 /* block that DMA will finish next */
1130 int block_dma;
1132 /* (buffer-fill only) block that the reader will release next */
1133 int block_reader;
1135 /* (buffer-fill only) bytes of buffer the reader has released,
1136 less than one block */
1137 int released_bytes;
1139 /* (buffer-fill only) buffer offset at which the next packet will appear */
1140 int dma_offset;
1142 /* OHCI DMA context control registers */
1143 u32 ContextControlSet;
1144 u32 ContextControlClear;
1145 u32 CommandPtr;
1146 u32 ContextMatch;
1149 static void ohci_iso_recv_bufferfill_task(unsigned long data);
1150 static void ohci_iso_recv_packetperbuf_task(unsigned long data);
1151 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1152 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1153 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1154 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1156 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1158 struct ti_ohci *ohci = iso->host->hostdata;
1159 struct ohci_iso_recv *recv;
1160 int ctx;
1161 int ret = -ENOMEM;
1163 recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1164 if (!recv)
1165 return -ENOMEM;
1167 iso->hostdata = recv;
1168 recv->ohci = ohci;
1169 recv->task_active = 0;
1170 dma_prog_region_init(&recv->prog);
1171 recv->block = NULL;
1173 /* use buffer-fill mode, unless irq_interval is 1
1174 (note: multichannel requires buffer-fill) */
1176 if (iso->irq_interval == 1 && iso->channel != -1) {
1177 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1178 } else {
1179 recv->dma_mode = BUFFER_FILL_MODE;
1182 /* set nblocks, buf_stride, block_irq_interval */
1184 if (recv->dma_mode == BUFFER_FILL_MODE) {
1185 recv->buf_stride = PAGE_SIZE;
1187 /* one block per page of data in the DMA buffer, minus the final guard page */
1188 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1189 if (recv->nblocks < 3) {
1190 DBGMSG(ohci->id, "ohci_iso_recv_init: DMA buffer too small");
1191 goto err;
1194 /* iso->irq_interval is in packets - translate that to blocks */
1195 /* (err, sort of... 1 is always the safest value) */
1196 recv->block_irq_interval = iso->irq_interval / recv->nblocks;
1197 if (recv->block_irq_interval*4 > recv->nblocks)
1198 recv->block_irq_interval = recv->nblocks/4;
1199 if (recv->block_irq_interval < 1)
1200 recv->block_irq_interval = 1;
1202 } else {
1203 int max_packet_size;
1205 recv->nblocks = iso->buf_packets;
1206 recv->block_irq_interval = 1;
1208 /* choose a buffer stride */
1209 /* must be a power of 2, and <= PAGE_SIZE */
1211 max_packet_size = iso->buf_size / iso->buf_packets;
1213 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1214 recv->buf_stride *= 2);
1216 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1217 recv->buf_stride > PAGE_SIZE) {
1218 /* this shouldn't happen, but anyway... */
1219 DBGMSG(ohci->id, "ohci_iso_recv_init: problem choosing a buffer stride");
1220 goto err;
1224 recv->block_reader = 0;
1225 recv->released_bytes = 0;
1226 recv->block_dma = 0;
1227 recv->dma_offset = 0;
1229 /* size of DMA program = one descriptor per block */
1230 if (dma_prog_region_alloc(&recv->prog,
1231 sizeof(struct dma_cmd) * recv->nblocks,
1232 recv->ohci->dev))
1233 goto err;
1235 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1237 ohci1394_init_iso_tasklet(&recv->task,
1238 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1239 OHCI_ISO_RECEIVE,
1240 recv->dma_mode == BUFFER_FILL_MODE ?
1241 ohci_iso_recv_bufferfill_task :
1242 ohci_iso_recv_packetperbuf_task,
1243 (unsigned long) iso);
1245 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0)
1246 goto err;
1248 recv->task_active = 1;
1250 /* recv context registers are spaced 32 bytes apart */
1251 ctx = recv->task.context;
1252 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1253 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1254 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1255 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1257 if (iso->channel == -1) {
1258 /* clear multi-channel selection mask */
1259 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1260 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1263 /* write the DMA program */
1264 ohci_iso_recv_program(iso);
1266 DBGMSG(ohci->id, "ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1267 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1268 recv->dma_mode == BUFFER_FILL_MODE ?
1269 "buffer-fill" : "packet-per-buffer",
1270 iso->buf_size/PAGE_SIZE, iso->buf_size,
1271 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1273 return 0;
1275 err:
1276 ohci_iso_recv_shutdown(iso);
1277 return ret;
1280 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1282 struct ohci_iso_recv *recv = iso->hostdata;
1284 /* disable interrupts */
1285 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1287 /* halt DMA */
1288 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1291 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1293 struct ohci_iso_recv *recv = iso->hostdata;
1295 if (recv->task_active) {
1296 ohci_iso_recv_stop(iso);
1297 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1298 recv->task_active = 0;
1301 dma_prog_region_free(&recv->prog);
1302 kfree(recv);
1303 iso->hostdata = NULL;
1306 /* set up a "gapped" ring buffer DMA program */
1307 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1309 struct ohci_iso_recv *recv = iso->hostdata;
1310 int blk;
1312 /* address of 'branch' field in previous DMA descriptor */
1313 u32 *prev_branch = NULL;
1315 for (blk = 0; blk < recv->nblocks; blk++) {
1316 u32 control;
1318 /* the DMA descriptor */
1319 struct dma_cmd *cmd = &recv->block[blk];
1321 /* offset of the DMA descriptor relative to the DMA prog buffer */
1322 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1324 /* offset of this packet's data within the DMA buffer */
1325 unsigned long buf_offset = blk * recv->buf_stride;
1327 if (recv->dma_mode == BUFFER_FILL_MODE) {
1328 control = 2 << 28; /* INPUT_MORE */
1329 } else {
1330 control = 3 << 28; /* INPUT_LAST */
1333 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1335 /* interrupt on last block, and at intervals */
1336 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1337 control |= 3 << 20; /* want interrupt */
1340 control |= 3 << 18; /* enable branch to address */
1341 control |= recv->buf_stride;
1343 cmd->control = cpu_to_le32(control);
1344 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1345 cmd->branchAddress = 0; /* filled in on next loop */
1346 cmd->status = cpu_to_le32(recv->buf_stride);
1348 /* link the previous descriptor to this one */
1349 if (prev_branch) {
1350 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1353 prev_branch = &cmd->branchAddress;
1356 /* the final descriptor's branch address and Z should be left at 0 */
1359 /* listen or unlisten to a specific channel (multi-channel mode only) */
1360 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1362 struct ohci_iso_recv *recv = iso->hostdata;
1363 int reg, i;
1365 if (channel < 32) {
1366 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1367 i = channel;
1368 } else {
1369 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1370 i = channel - 32;
1373 reg_write(recv->ohci, reg, (1 << i));
1375 /* issue a dummy read to force all PCI writes to be posted immediately */
1376 mb();
1377 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1380 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1382 struct ohci_iso_recv *recv = iso->hostdata;
1383 int i;
1385 for (i = 0; i < 64; i++) {
1386 if (mask & (1ULL << i)) {
1387 if (i < 32)
1388 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1389 else
1390 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1391 } else {
1392 if (i < 32)
1393 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1394 else
1395 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1399 /* issue a dummy read to force all PCI writes to be posted immediately */
1400 mb();
1401 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1404 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1406 struct ohci_iso_recv *recv = iso->hostdata;
1407 u32 command, contextMatch;
1409 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1410 wmb();
1412 /* always keep ISO headers */
1413 command = (1 << 30);
1415 if (recv->dma_mode == BUFFER_FILL_MODE)
1416 command |= (1 << 31);
1418 reg_write(recv->ohci, recv->ContextControlSet, command);
1420 /* match on specified tags */
1421 contextMatch = tag_mask << 28;
1423 if (iso->channel == -1) {
1424 /* enable multichannel reception */
1425 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1426 } else {
1427 /* listen on channel */
1428 contextMatch |= iso->channel;
1431 if (cycle != -1) {
1432 u32 seconds;
1434 /* enable cycleMatch */
1435 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1437 /* set starting cycle */
1438 cycle &= 0x1FFF;
1440 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1441 just snarf them from the current time */
1442 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1444 /* advance one second to give some extra time for DMA to start */
1445 seconds += 1;
1447 cycle |= (seconds & 3) << 13;
1449 contextMatch |= cycle << 12;
1452 if (sync != -1) {
1453 /* set sync flag on first DMA descriptor */
1454 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1455 cmd->control |= DMA_CTL_WAIT;
1457 /* match sync field */
1458 contextMatch |= (sync&0xf)<<8;
1461 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1463 /* address of first descriptor block */
1464 command = dma_prog_region_offset_to_bus(&recv->prog,
1465 recv->block_dma * sizeof(struct dma_cmd));
1466 command |= 1; /* Z=1 */
1468 reg_write(recv->ohci, recv->CommandPtr, command);
1470 /* enable interrupts */
1471 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1473 wmb();
1475 /* run */
1476 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1478 /* issue a dummy read of the cycle timer register to force
1479 all PCI writes to be posted immediately */
1480 mb();
1481 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1483 /* check RUN */
1484 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1485 PRINT(KERN_ERR, recv->ohci->id,
1486 "Error starting IR DMA (ContextControl 0x%08x)\n",
1487 reg_read(recv->ohci, recv->ContextControlSet));
1488 return -1;
1491 return 0;
1494 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1496 /* re-use the DMA descriptor for the block */
1497 /* by linking the previous descriptor to it */
1499 int next_i = block;
1500 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1502 struct dma_cmd *next = &recv->block[next_i];
1503 struct dma_cmd *prev = &recv->block[prev_i];
1505 /* 'next' becomes the new end of the DMA chain,
1506 so disable branch and enable interrupt */
1507 next->branchAddress = 0;
1508 next->control |= cpu_to_le32(3 << 20);
1509 next->status = cpu_to_le32(recv->buf_stride);
1511 /* link prev to next */
1512 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1513 sizeof(struct dma_cmd) * next_i)
1514 | 1); /* Z=1 */
1516 /* disable interrupt on previous DMA descriptor, except at intervals */
1517 if ((prev_i % recv->block_irq_interval) == 0) {
1518 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1519 } else {
1520 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1522 wmb();
1524 /* wake up DMA in case it fell asleep */
1525 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1528 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1529 struct hpsb_iso_packet_info *info)
1531 int len;
1533 /* release the memory where the packet was */
1534 len = info->len;
1536 /* add the wasted space for padding to 4 bytes */
1537 if (len % 4)
1538 len += 4 - (len % 4);
1540 /* add 8 bytes for the OHCI DMA data format overhead */
1541 len += 8;
1543 recv->released_bytes += len;
1545 /* have we released enough memory for one block? */
1546 while (recv->released_bytes > recv->buf_stride) {
1547 ohci_iso_recv_release_block(recv, recv->block_reader);
1548 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1549 recv->released_bytes -= recv->buf_stride;
1553 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1555 struct ohci_iso_recv *recv = iso->hostdata;
1556 if (recv->dma_mode == BUFFER_FILL_MODE) {
1557 ohci_iso_recv_bufferfill_release(recv, info);
1558 } else {
1559 ohci_iso_recv_release_block(recv, info - iso->infos);
1563 /* parse all packets from blocks that have been fully received */
1564 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1566 int wake = 0;
1567 int runaway = 0;
1569 while (1) {
1570 /* we expect the next parsable packet to begin at recv->dma_offset */
1571 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1573 unsigned int offset;
1574 unsigned short len, cycle;
1575 unsigned char channel, tag, sy;
1577 unsigned char *p = iso->data_buf.kvirt;
1579 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1581 /* don't loop indefinitely */
1582 if (runaway++ > 100000) {
1583 atomic_inc(&iso->overflows);
1584 PRINT(KERN_ERR, recv->ohci->id,
1585 "IR DMA error - Runaway during buffer parsing!\n");
1586 break;
1589 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1590 if (this_block == recv->block_dma)
1591 break;
1593 wake = 1;
1595 /* parse data length, tag, channel, and sy */
1597 /* note: we keep our own local copies of 'len' and 'offset'
1598 so the user can't mess with them by poking in the mmap area */
1600 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1602 if (len > 4096) {
1603 PRINT(KERN_ERR, recv->ohci->id,
1604 "IR DMA error - bogus 'len' value %u\n", len);
1607 channel = p[recv->dma_offset+1] & 0x3F;
1608 tag = p[recv->dma_offset+1] >> 6;
1609 sy = p[recv->dma_offset+0] & 0xF;
1611 /* advance to data payload */
1612 recv->dma_offset += 4;
1614 /* check for wrap-around */
1615 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1616 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1619 /* dma_offset now points to the first byte of the data payload */
1620 offset = recv->dma_offset;
1622 /* advance to xferStatus/timeStamp */
1623 recv->dma_offset += len;
1625 /* payload is padded to 4 bytes */
1626 if (len % 4) {
1627 recv->dma_offset += 4 - (len%4);
1630 /* check for wrap-around */
1631 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1632 /* uh oh, the packet data wraps from the last
1633 to the first DMA block - make the packet
1634 contiguous by copying its "tail" into the
1635 guard page */
1637 int guard_off = recv->buf_stride*recv->nblocks;
1638 int tail_len = len - (guard_off - offset);
1640 if (tail_len > 0 && tail_len < recv->buf_stride) {
1641 memcpy(iso->data_buf.kvirt + guard_off,
1642 iso->data_buf.kvirt,
1643 tail_len);
1646 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1649 /* parse timestamp */
1650 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1651 cycle &= 0x1FFF;
1653 /* advance to next packet */
1654 recv->dma_offset += 4;
1656 /* check for wrap-around */
1657 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1658 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1661 hpsb_iso_packet_received(iso, offset, len, cycle, channel, tag, sy);
1664 if (wake)
1665 hpsb_iso_wake(iso);
1668 static void ohci_iso_recv_bufferfill_task(unsigned long data)
1670 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1671 struct ohci_iso_recv *recv = iso->hostdata;
1673 int loop;
1675 /* loop over all blocks */
1676 for (loop = 0; loop < recv->nblocks; loop++) {
1678 /* check block_dma to see if it's done */
1680 struct dma_cmd *im = &recv->block[recv->block_dma];
1682 /* check the DMA descriptor for new writes to xferStatus */
1683 u16 xferstatus = im->status >> 16;
1685 /* rescount is the number of bytes *remaining to be written* in the block */
1686 u16 rescount = im->status & 0xFFFF;
1688 unsigned char event = xferstatus & 0x1F;
1690 if (!event) {
1691 /* nothing has happened to this block yet */
1692 break;
1695 if (event != 0x11) {
1696 atomic_inc(&iso->overflows);
1697 PRINT(KERN_ERR, recv->ohci->id,
1698 "IR DMA error - OHCI error code 0x%02x\n", event);
1701 if (rescount != 0) {
1702 /* the card is still writing to this block;
1703 we can't touch it until it's done */
1704 break;
1707 /* OK, the block is finished... */
1709 /* sync our view of the block */
1710 dma_region_sync(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1712 /* reset the DMA descriptor */
1713 im->status = recv->buf_stride;
1715 /* advance block_dma */
1716 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1718 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1719 atomic_inc(&iso->overflows);
1720 DBGMSG(recv->ohci->id, "ISO reception overflow - "
1721 "ran out of DMA blocks");
1725 /* parse any packets that have arrived */
1726 ohci_iso_recv_bufferfill_parse(iso, recv);
1729 static void ohci_iso_recv_packetperbuf_task(unsigned long data)
1731 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1732 struct ohci_iso_recv *recv = iso->hostdata;
1733 int count;
1734 int wake = 0;
1736 /* loop over the entire buffer */
1737 for (count = 0; count < recv->nblocks; count++) {
1738 u32 packet_len = 0;
1740 /* pointer to the DMA descriptor */
1741 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1743 /* check the DMA descriptor for new writes to xferStatus */
1744 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1745 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1747 unsigned char event = xferstatus & 0x1F;
1749 if (!event) {
1750 /* this packet hasn't come in yet; we are done for now */
1751 goto out;
1754 if (event == 0x11) {
1755 /* packet received successfully! */
1757 /* rescount is the number of bytes *remaining* in the packet buffer,
1758 after the packet was written */
1759 packet_len = recv->buf_stride - rescount;
1761 } else if (event == 0x02) {
1762 PRINT(KERN_ERR, recv->ohci->id, "IR DMA error - packet too long for buffer\n");
1763 } else if (event) {
1764 PRINT(KERN_ERR, recv->ohci->id, "IR DMA error - OHCI error code 0x%02x\n", event);
1767 /* sync our view of the buffer */
1768 dma_region_sync(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1770 /* record the per-packet info */
1772 /* iso header is 8 bytes ahead of the data payload */
1773 unsigned char *hdr;
1775 unsigned int offset;
1776 unsigned short cycle;
1777 unsigned char channel, tag, sy;
1779 offset = iso->pkt_dma * recv->buf_stride;
1780 hdr = iso->data_buf.kvirt + offset;
1782 /* skip iso header */
1783 offset += 8;
1784 packet_len -= 8;
1786 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1787 channel = hdr[5] & 0x3F;
1788 tag = hdr[5] >> 6;
1789 sy = hdr[4] & 0xF;
1791 hpsb_iso_packet_received(iso, offset, packet_len, cycle, channel, tag, sy);
1794 /* reset the DMA descriptor */
1795 il->status = recv->buf_stride;
1797 wake = 1;
1798 recv->block_dma = iso->pkt_dma;
1801 out:
1802 if (wake)
1803 hpsb_iso_wake(iso);
1807 /***********************************
1808 * rawiso ISO transmission *
1809 ***********************************/
1811 struct ohci_iso_xmit {
1812 struct ti_ohci *ohci;
1813 struct dma_prog_region prog;
1814 struct ohci1394_iso_tasklet task;
1815 int task_active;
1817 u32 ContextControlSet;
1818 u32 ContextControlClear;
1819 u32 CommandPtr;
1822 /* transmission DMA program:
1823 one OUTPUT_MORE_IMMEDIATE for the IT header
1824 one OUTPUT_LAST for the buffer data */
1826 struct iso_xmit_cmd {
1827 struct dma_cmd output_more_immediate;
1828 u8 iso_hdr[8];
1829 u32 unused[2];
1830 struct dma_cmd output_last;
1833 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1834 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1835 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1836 static void ohci_iso_xmit_task(unsigned long data);
1838 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1840 struct ohci_iso_xmit *xmit;
1841 unsigned int prog_size;
1842 int ctx;
1843 int ret = -ENOMEM;
1845 xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1846 if (!xmit)
1847 return -ENOMEM;
1849 iso->hostdata = xmit;
1850 xmit->ohci = iso->host->hostdata;
1851 xmit->task_active = 0;
1853 dma_prog_region_init(&xmit->prog);
1855 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1857 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1858 goto err;
1860 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1861 ohci_iso_xmit_task, (unsigned long) iso);
1863 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0)
1864 goto err;
1866 xmit->task_active = 1;
1868 /* xmit context registers are spaced 16 bytes apart */
1869 ctx = xmit->task.context;
1870 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1871 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1872 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1874 return 0;
1876 err:
1877 ohci_iso_xmit_shutdown(iso);
1878 return ret;
1881 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1883 struct ohci_iso_xmit *xmit = iso->hostdata;
1885 /* disable interrupts */
1886 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1888 /* halt DMA */
1889 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1890 /* XXX the DMA context will lock up if you try to send too much data! */
1891 PRINT(KERN_ERR, xmit->ohci->id,
1892 "you probably exceeded the OHCI card's bandwidth limit - "
1893 "reload the module and reduce xmit bandwidth");
1897 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1899 struct ohci_iso_xmit *xmit = iso->hostdata;
1901 if (xmit->task_active) {
1902 ohci_iso_xmit_stop(iso);
1903 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1904 xmit->task_active = 0;
1907 dma_prog_region_free(&xmit->prog);
1908 kfree(xmit);
1909 iso->hostdata = NULL;
1912 static void ohci_iso_xmit_task(unsigned long data)
1914 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1915 struct ohci_iso_xmit *xmit = iso->hostdata;
1916 int wake = 0;
1917 int count;
1919 /* check the whole buffer if necessary, starting at pkt_dma */
1920 for (count = 0; count < iso->buf_packets; count++) {
1921 int cycle;
1923 /* DMA descriptor */
1924 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
1926 /* check for new writes to xferStatus */
1927 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
1928 u8 event = xferstatus & 0x1F;
1930 if (!event) {
1931 /* packet hasn't been sent yet; we are done for now */
1932 break;
1935 if (event != 0x11)
1936 PRINT(KERN_ERR, xmit->ohci->id,
1937 "IT DMA error - OHCI error code 0x%02x\n", event);
1939 /* at least one packet went out, so wake up the writer */
1940 wake = 1;
1942 /* parse cycle */
1943 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
1945 /* tell the subsystem the packet has gone out */
1946 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
1948 /* reset the DMA descriptor for next time */
1949 cmd->output_last.status = 0;
1952 if (wake)
1953 hpsb_iso_wake(iso);
1956 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1958 struct ohci_iso_xmit *xmit = iso->hostdata;
1960 int next_i, prev_i;
1961 struct iso_xmit_cmd *next, *prev;
1963 unsigned int offset;
1964 unsigned short len;
1965 unsigned char tag, sy;
1967 /* check that the packet doesn't cross a page boundary
1968 (we could allow this if we added OUTPUT_MORE descriptor support) */
1969 if (cross_bound(info->offset, info->len)) {
1970 PRINT(KERN_ERR, xmit->ohci->id,
1971 "rawiso xmit: packet %u crosses a page boundary",
1972 iso->first_packet);
1973 return -EINVAL;
1976 offset = info->offset;
1977 len = info->len;
1978 tag = info->tag;
1979 sy = info->sy;
1981 /* sync up the card's view of the buffer */
1982 dma_region_sync(&iso->data_buf, offset, len);
1984 /* append first_packet to the DMA chain */
1985 /* by linking the previous descriptor to it */
1986 /* (next will become the new end of the DMA chain) */
1988 next_i = iso->first_packet;
1989 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
1991 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
1992 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
1994 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
1995 memset(next, 0, sizeof(struct iso_xmit_cmd));
1996 next->output_more_immediate.control = cpu_to_le32(0x02000008);
1998 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2000 /* tcode = 0xA, and sy */
2001 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2003 /* tag and channel number */
2004 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2006 /* transmission speed */
2007 next->iso_hdr[2] = iso->speed & 0x7;
2009 /* payload size */
2010 next->iso_hdr[6] = len & 0xFF;
2011 next->iso_hdr[7] = len >> 8;
2013 /* set up the OUTPUT_LAST */
2014 next->output_last.control = cpu_to_le32(1 << 28);
2015 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2016 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2017 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2018 next->output_last.control |= cpu_to_le32(len);
2020 /* payload bus address */
2021 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2023 /* leave branchAddress at zero for now */
2025 /* re-write the previous DMA descriptor to chain to this one */
2027 /* set prev branch address to point to next (Z=3) */
2028 prev->output_last.branchAddress = cpu_to_le32(
2029 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2031 /* disable interrupt, unless required by the IRQ interval */
2032 if (prev_i % iso->irq_interval) {
2033 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2034 } else {
2035 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2038 wmb();
2040 /* wake DMA in case it is sleeping */
2041 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2043 /* issue a dummy read of the cycle timer to force all PCI
2044 writes to be posted immediately */
2045 mb();
2046 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2048 return 0;
2051 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2053 struct ohci_iso_xmit *xmit = iso->hostdata;
2055 /* clear out the control register */
2056 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2057 wmb();
2059 /* address and length of first descriptor block (Z=3) */
2060 reg_write(xmit->ohci, xmit->CommandPtr,
2061 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2063 /* cycle match */
2064 if (cycle != -1) {
2065 u32 start = cycle & 0x1FFF;
2067 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2068 just snarf them from the current time */
2069 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2071 /* advance one second to give some extra time for DMA to start */
2072 seconds += 1;
2074 start |= (seconds & 3) << 13;
2076 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2079 /* enable interrupts */
2080 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2082 /* run */
2083 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2084 mb();
2086 /* wait 100 usec to give the card time to go active */
2087 udelay(100);
2089 /* check the RUN bit */
2090 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2091 PRINT(KERN_ERR, xmit->ohci->id, "Error starting IT DMA (ContextControl 0x%08x)\n",
2092 reg_read(xmit->ohci, xmit->ContextControlSet));
2093 return -1;
2096 return 0;
2099 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2102 switch(cmd) {
2103 case XMIT_INIT:
2104 return ohci_iso_xmit_init(iso);
2105 case XMIT_START:
2106 return ohci_iso_xmit_start(iso, arg);
2107 case XMIT_STOP:
2108 ohci_iso_xmit_stop(iso);
2109 return 0;
2110 case XMIT_QUEUE:
2111 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2112 case XMIT_SHUTDOWN:
2113 ohci_iso_xmit_shutdown(iso);
2114 return 0;
2116 case RECV_INIT:
2117 return ohci_iso_recv_init(iso);
2118 case RECV_START: {
2119 int *args = (int*) arg;
2120 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2122 case RECV_STOP:
2123 ohci_iso_recv_stop(iso);
2124 return 0;
2125 case RECV_RELEASE:
2126 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2127 return 0;
2128 case RECV_SHUTDOWN:
2129 ohci_iso_recv_shutdown(iso);
2130 return 0;
2131 case RECV_LISTEN_CHANNEL:
2132 ohci_iso_recv_change_channel(iso, arg, 1);
2133 return 0;
2134 case RECV_UNLISTEN_CHANNEL:
2135 ohci_iso_recv_change_channel(iso, arg, 0);
2136 return 0;
2137 case RECV_SET_CHANNEL_MASK:
2138 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2139 return 0;
2141 default:
2142 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2143 cmd);
2144 break;
2146 return -EINVAL;
2149 /***************************************
2150 * IEEE-1394 functionality section END *
2151 ***************************************/
2154 /********************************************************
2155 * Global stuff (interrupt handler, init/shutdown code) *
2156 ********************************************************/
2158 static void dma_trm_reset(struct dma_trm_ctx *d)
2160 unsigned long flags;
2161 LIST_HEAD(packet_list);
2163 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2165 /* Lock the context, reset it and release it. Move the packets
2166 * that were pending in the context to packet_list and free
2167 * them after releasing the lock. */
2169 spin_lock_irqsave(&d->lock, flags);
2171 list_splice(&d->fifo_list, &packet_list);
2172 list_splice(&d->pending_list, &packet_list);
2173 INIT_LIST_HEAD(&d->fifo_list);
2174 INIT_LIST_HEAD(&d->pending_list);
2176 d->branchAddrPtr = NULL;
2177 d->sent_ind = d->prg_ind;
2178 d->free_prgs = d->num_desc;
2180 spin_unlock_irqrestore(&d->lock, flags);
2182 /* Now process subsystem callbacks for the packets from the
2183 * context. */
2185 while (!list_empty(&packet_list)) {
2186 struct hpsb_packet *p = driver_packet(packet_list.next);
2187 PRINT(KERN_INFO, d->ohci->id,
2188 "AT dma reset ctx=%d, aborting transmission", d->ctx);
2189 list_del(&p->driver_list);
2190 hpsb_packet_sent(d->ohci->host, p, ACKX_ABORTED);
2194 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2195 quadlet_t rx_event,
2196 quadlet_t tx_event)
2198 struct list_head *lh;
2199 struct ohci1394_iso_tasklet *t;
2200 unsigned long mask;
2202 spin_lock(&ohci->iso_tasklet_list_lock);
2204 list_for_each(lh, &ohci->iso_tasklet_list) {
2205 t = list_entry(lh, struct ohci1394_iso_tasklet, link);
2206 mask = 1 << t->context;
2208 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2209 tasklet_schedule(&t->tasklet);
2210 else if (rx_event & mask)
2211 tasklet_schedule(&t->tasklet);
2214 spin_unlock(&ohci->iso_tasklet_list_lock);
2218 static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2219 struct pt_regs *regs_are_unused)
2221 quadlet_t event, node_id;
2222 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2223 struct hpsb_host *host = ohci->host;
2224 int phyid = -1, isroot = 0;
2225 unsigned long flags;
2227 /* Read and clear the interrupt event register. Don't clear
2228 * the busReset event, though. This is done when we get the
2229 * selfIDComplete interrupt. */
2230 spin_lock_irqsave(&ohci->event_lock, flags);
2231 event = reg_read(ohci, OHCI1394_IntEventClear);
2232 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2233 spin_unlock_irqrestore(&ohci->event_lock, flags);
2235 if (!event)
2236 return IRQ_NONE;
2238 DBGMSG(ohci->id, "IntEvent: %08x", event);
2240 if (event & OHCI1394_unrecoverableError) {
2241 int ctx;
2242 PRINT(KERN_ERR, ohci->id, "Unrecoverable error!");
2244 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2245 PRINT(KERN_ERR, ohci->id, "Async Req Tx Context died: "
2246 "ctrl[%08x] cmdptr[%08x]",
2247 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2248 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2250 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2251 PRINT(KERN_ERR, ohci->id, "Async Rsp Tx Context died: "
2252 "ctrl[%08x] cmdptr[%08x]",
2253 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2254 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2256 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2257 PRINT(KERN_ERR, ohci->id, "Async Req Rcv Context died: "
2258 "ctrl[%08x] cmdptr[%08x]",
2259 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2260 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2262 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2263 PRINT(KERN_ERR, ohci->id, "Async Rsp Rcv Context died: "
2264 "ctrl[%08x] cmdptr[%08x]",
2265 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2266 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2268 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2269 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2270 PRINT(KERN_ERR, ohci->id, "Iso Xmit %d Context died: "
2271 "ctrl[%08x] cmdptr[%08x]", ctx,
2272 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2273 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2276 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2277 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2278 PRINT(KERN_ERR, ohci->id, "Iso Recv %d Context died: "
2279 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2280 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2281 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2282 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2285 event &= ~OHCI1394_unrecoverableError;
2288 if (event & OHCI1394_cycleInconsistent) {
2289 /* We subscribe to the cycleInconsistent event only to
2290 * clear the corresponding event bit... otherwise,
2291 * isochronous cycleMatch DMA won't work. */
2292 DBGMSG(ohci->id, "OHCI1394_cycleInconsistent");
2293 event &= ~OHCI1394_cycleInconsistent;
2296 if (event & OHCI1394_busReset) {
2297 /* The busReset event bit can't be cleared during the
2298 * selfID phase, so we disable busReset interrupts, to
2299 * avoid burying the cpu in interrupt requests. */
2300 spin_lock_irqsave(&ohci->event_lock, flags);
2301 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2303 if (ohci->check_busreset) {
2304 int loop_count = 0;
2306 udelay(10);
2308 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2309 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2311 spin_unlock_irqrestore(&ohci->event_lock, flags);
2312 udelay(10);
2313 spin_lock_irqsave(&ohci->event_lock, flags);
2315 /* The loop counter check is to prevent the driver
2316 * from remaining in this state forever. For the
2317 * initial bus reset, the loop continues for ever
2318 * and the system hangs, until some device is plugged-in
2319 * or out manually into a port! The forced reset seems
2320 * to solve this problem. This mainly effects nForce2. */
2321 if (loop_count > 10000) {
2322 ohci_devctl(host, RESET_BUS, LONG_RESET);
2323 DBGMSG(ohci->id, "Detected bus-reset loop. Forced a bus reset!");
2324 loop_count = 0;
2327 loop_count++;
2330 spin_unlock_irqrestore(&ohci->event_lock, flags);
2331 if (!host->in_bus_reset) {
2332 DBGMSG(ohci->id, "irq_handler: Bus reset requested");
2334 /* Subsystem call */
2335 hpsb_bus_reset(ohci->host);
2337 event &= ~OHCI1394_busReset;
2340 /* XXX: We need a way to also queue the OHCI1394_reqTxComplete,
2341 * but for right now we simply run it upon reception, to make sure
2342 * we get sent acks before response packets. This sucks mainly
2343 * because it halts the interrupt handler. */
2344 if (event & OHCI1394_reqTxComplete) {
2345 struct dma_trm_ctx *d = &ohci->at_req_context;
2346 DBGMSG(ohci->id, "Got reqTxComplete interrupt "
2347 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2348 if (reg_read(ohci, d->ctrlSet) & 0x800)
2349 ohci1394_stop_context(ohci, d->ctrlClear,
2350 "reqTxComplete");
2351 else
2352 dma_trm_tasklet ((unsigned long)d);
2353 event &= ~OHCI1394_reqTxComplete;
2355 if (event & OHCI1394_respTxComplete) {
2356 struct dma_trm_ctx *d = &ohci->at_resp_context;
2357 DBGMSG(ohci->id, "Got respTxComplete interrupt "
2358 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2359 if (reg_read(ohci, d->ctrlSet) & 0x800)
2360 ohci1394_stop_context(ohci, d->ctrlClear,
2361 "respTxComplete");
2362 else
2363 tasklet_schedule(&d->task);
2364 event &= ~OHCI1394_respTxComplete;
2366 if (event & OHCI1394_RQPkt) {
2367 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2368 DBGMSG(ohci->id, "Got RQPkt interrupt status=0x%08X",
2369 reg_read(ohci, d->ctrlSet));
2370 if (reg_read(ohci, d->ctrlSet) & 0x800)
2371 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2372 else
2373 tasklet_schedule(&d->task);
2374 event &= ~OHCI1394_RQPkt;
2376 if (event & OHCI1394_RSPkt) {
2377 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2378 DBGMSG(ohci->id, "Got RSPkt interrupt status=0x%08X",
2379 reg_read(ohci, d->ctrlSet));
2380 if (reg_read(ohci, d->ctrlSet) & 0x800)
2381 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2382 else
2383 tasklet_schedule(&d->task);
2384 event &= ~OHCI1394_RSPkt;
2386 if (event & OHCI1394_isochRx) {
2387 quadlet_t rx_event;
2389 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2390 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2391 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2392 event &= ~OHCI1394_isochRx;
2394 if (event & OHCI1394_isochTx) {
2395 quadlet_t tx_event;
2397 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2398 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2399 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2400 event &= ~OHCI1394_isochTx;
2402 if (event & OHCI1394_selfIDComplete) {
2403 if (host->in_bus_reset) {
2404 node_id = reg_read(ohci, OHCI1394_NodeID);
2406 if (!(node_id & 0x80000000)) {
2407 PRINT(KERN_ERR, ohci->id,
2408 "SelfID received, but NodeID invalid "
2409 "(probably new bus reset occurred): %08X",
2410 node_id);
2411 goto selfid_not_valid;
2414 phyid = node_id & 0x0000003f;
2415 isroot = (node_id & 0x40000000) != 0;
2417 DBGMSG(ohci->id,
2418 "SelfID interrupt received "
2419 "(phyid %d, %s)", phyid,
2420 (isroot ? "root" : "not root"));
2422 handle_selfid(ohci, host, phyid, isroot);
2424 /* Clear the bus reset event and re-enable the
2425 * busReset interrupt. */
2426 spin_lock_irqsave(&ohci->event_lock, flags);
2427 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2428 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2429 spin_unlock_irqrestore(&ohci->event_lock, flags);
2431 /* Accept Physical requests from all nodes. */
2432 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0xffffffff);
2433 reg_write(ohci,OHCI1394_AsReqFilterLoSet, 0xffffffff);
2435 /* Turn on phys dma reception.
2437 * TODO: Enable some sort of filtering management.
2439 if (phys_dma) {
2440 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0xffffffff);
2441 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0xffffffff);
2442 reg_write(ohci,OHCI1394_PhyUpperBound, 0xffff0000);
2443 } else {
2444 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0x00000000);
2445 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0x00000000);
2448 DBGMSG(ohci->id, "PhyReqFilter=%08x%08x",
2449 reg_read(ohci,OHCI1394_PhyReqFilterHiSet),
2450 reg_read(ohci,OHCI1394_PhyReqFilterLoSet));
2452 hpsb_selfid_complete(host, phyid, isroot);
2453 } else
2454 PRINT(KERN_ERR, ohci->id,
2455 "SelfID received outside of bus reset sequence");
2457 selfid_not_valid:
2458 event &= ~OHCI1394_selfIDComplete;
2461 /* Make sure we handle everything, just in case we accidentally
2462 * enabled an interrupt that we didn't write a handler for. */
2463 if (event)
2464 PRINT(KERN_ERR, ohci->id, "Unhandled interrupt(s) 0x%08x",
2465 event);
2467 return IRQ_HANDLED;
2470 /* Put the buffer back into the dma context */
2471 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2473 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2474 DBGMSG(ohci->id, "Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2476 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2477 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2478 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2479 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2481 /* wake up the dma context if necessary */
2482 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2483 PRINT(KERN_INFO, ohci->id,
2484 "Waking dma ctx=%d ... processing is probably too slow",
2485 d->ctx);
2488 /* do this always, to avoid race condition */
2489 reg_write(ohci, d->ctrlSet, 0x1000);
2492 #define cond_le32_to_cpu(data, noswap) \
2493 (noswap ? data : le32_to_cpu(data))
2495 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2496 -1, 0, -1, 0, -1, -1, 16, -1};
2499 * Determine the length of a packet in the buffer
2500 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2502 static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2503 int offset, unsigned char tcode, int noswap)
2505 int length = -1;
2507 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2508 length = TCODE_SIZE[tcode];
2509 if (length == 0) {
2510 if (offset + 12 >= d->buf_size) {
2511 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2512 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2513 } else {
2514 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2516 length += 20;
2518 } else if (d->type == DMA_CTX_ISO) {
2519 /* Assumption: buffer fill mode with header/trailer */
2520 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2523 if (length > 0 && length % 4)
2524 length += 4 - (length % 4);
2526 return length;
2529 /* Tasklet that processes dma receive buffers */
2530 static void dma_rcv_tasklet (unsigned long data)
2532 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2533 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2534 unsigned int split_left, idx, offset, rescount;
2535 unsigned char tcode;
2536 int length, bytes_left, ack;
2537 unsigned long flags;
2538 quadlet_t *buf_ptr;
2539 char *split_ptr;
2540 char msg[256];
2542 spin_lock_irqsave(&d->lock, flags);
2544 idx = d->buf_ind;
2545 offset = d->buf_offset;
2546 buf_ptr = d->buf_cpu[idx] + offset/4;
2548 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2549 bytes_left = d->buf_size - rescount - offset;
2551 while (bytes_left > 0) {
2552 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2554 /* packet_length() will return < 4 for an error */
2555 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2557 if (length < 4) { /* something is wrong */
2558 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2559 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2560 d->ctx, length);
2561 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2562 spin_unlock_irqrestore(&d->lock, flags);
2563 return;
2566 /* The first case is where we have a packet that crosses
2567 * over more than one descriptor. The next case is where
2568 * it's all in the first descriptor. */
2569 if ((offset + length) > d->buf_size) {
2570 DBGMSG(ohci->id,"Split packet rcv'd");
2571 if (length > d->split_buf_size) {
2572 ohci1394_stop_context(ohci, d->ctrlClear,
2573 "Split packet size exceeded");
2574 d->buf_ind = idx;
2575 d->buf_offset = offset;
2576 spin_unlock_irqrestore(&d->lock, flags);
2577 return;
2580 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2581 == d->buf_size) {
2582 /* Other part of packet not written yet.
2583 * this should never happen I think
2584 * anyway we'll get it on the next call. */
2585 PRINT(KERN_INFO, ohci->id,
2586 "Got only half a packet!");
2587 d->buf_ind = idx;
2588 d->buf_offset = offset;
2589 spin_unlock_irqrestore(&d->lock, flags);
2590 return;
2593 split_left = length;
2594 split_ptr = (char *)d->spb;
2595 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2596 split_left -= d->buf_size-offset;
2597 split_ptr += d->buf_size-offset;
2598 insert_dma_buffer(d, idx);
2599 idx = (idx+1) % d->num_desc;
2600 buf_ptr = d->buf_cpu[idx];
2601 offset=0;
2603 while (split_left >= d->buf_size) {
2604 memcpy(split_ptr,buf_ptr,d->buf_size);
2605 split_ptr += d->buf_size;
2606 split_left -= d->buf_size;
2607 insert_dma_buffer(d, idx);
2608 idx = (idx+1) % d->num_desc;
2609 buf_ptr = d->buf_cpu[idx];
2612 if (split_left > 0) {
2613 memcpy(split_ptr, buf_ptr, split_left);
2614 offset = split_left;
2615 buf_ptr += offset/4;
2617 } else {
2618 DBGMSG(ohci->id,"Single packet rcv'd");
2619 memcpy(d->spb, buf_ptr, length);
2620 offset += length;
2621 buf_ptr += length/4;
2622 if (offset==d->buf_size) {
2623 insert_dma_buffer(d, idx);
2624 idx = (idx+1) % d->num_desc;
2625 buf_ptr = d->buf_cpu[idx];
2626 offset=0;
2630 /* We get one phy packet to the async descriptor for each
2631 * bus reset. We always ignore it. */
2632 if (tcode != OHCI1394_TCODE_PHY) {
2633 if (!ohci->no_swap_incoming)
2634 packet_swab(d->spb, tcode);
2635 DBGMSG(ohci->id, "Packet received from node"
2636 " %d ack=0x%02X spd=%d tcode=0x%X"
2637 " length=%d ctx=%d tlabel=%d",
2638 (d->spb[1]>>16)&0x3f,
2639 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2640 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2641 tcode, length, d->ctx,
2642 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>10)&0x3f);
2644 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2645 == 0x11) ? 1 : 0;
2647 hpsb_packet_received(ohci->host, d->spb,
2648 length-4, ack);
2650 #ifdef OHCI1394_DEBUG
2651 else
2652 PRINT (KERN_DEBUG, ohci->id, "Got phy packet ctx=%d ... discarded",
2653 d->ctx);
2654 #endif
2656 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2658 bytes_left = d->buf_size - rescount - offset;
2662 d->buf_ind = idx;
2663 d->buf_offset = offset;
2665 spin_unlock_irqrestore(&d->lock, flags);
2668 /* Bottom half that processes sent packets */
2669 static void dma_trm_tasklet (unsigned long data)
2671 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2672 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2673 struct hpsb_packet *packet;
2674 unsigned long flags;
2675 u32 status, ack;
2676 size_t datasize;
2678 spin_lock_irqsave(&d->lock, flags);
2680 while (!list_empty(&d->fifo_list)) {
2681 packet = driver_packet(d->fifo_list.next);
2682 datasize = packet->data_size;
2683 if (datasize && packet->type != hpsb_raw)
2684 status = le32_to_cpu(
2685 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2686 else
2687 status = le32_to_cpu(
2688 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2690 if (status == 0)
2691 /* this packet hasn't been sent yet*/
2692 break;
2694 #ifdef OHCI1394_DEBUG
2695 if (datasize)
2696 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2697 DBGMSG(ohci->id,
2698 "Stream packet sent to channel %d tcode=0x%X "
2699 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2700 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2701 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2702 status&0x1f, (status>>5)&0x3,
2703 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2704 d->ctx);
2705 else
2706 DBGMSG(ohci->id,
2707 "Packet sent to node %d tcode=0x%X tLabel="
2708 "0x%02X ack=0x%X spd=%d dataLength=%d ctx=%d",
2709 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2710 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2711 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2712 status&0x1f, (status>>5)&0x3,
2713 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2714 d->ctx);
2715 else
2716 DBGMSG(ohci->id,
2717 "Packet sent to node %d tcode=0x%X tLabel="
2718 "0x%02X ack=0x%X spd=%d data=0x%08X ctx=%d",
2719 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2720 >>16)&0x3f,
2721 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2722 >>4)&0xf,
2723 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2724 >>10)&0x3f,
2725 status&0x1f, (status>>5)&0x3,
2726 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2727 d->ctx);
2728 #endif
2730 if (status & 0x10) {
2731 ack = status & 0xf;
2732 } else {
2733 switch (status & 0x1f) {
2734 case EVT_NO_STATUS: /* that should never happen */
2735 case EVT_RESERVED_A: /* that should never happen */
2736 case EVT_LONG_PACKET: /* that should never happen */
2737 PRINT(KERN_WARNING, ohci->id, "Received OHCI evt_* error 0x%x", status & 0x1f);
2738 ack = ACKX_SEND_ERROR;
2739 break;
2740 case EVT_MISSING_ACK:
2741 ack = ACKX_TIMEOUT;
2742 break;
2743 case EVT_UNDERRUN:
2744 ack = ACKX_SEND_ERROR;
2745 break;
2746 case EVT_OVERRUN: /* that should never happen */
2747 PRINT(KERN_WARNING, ohci->id, "Received OHCI evt_* error 0x%x", status & 0x1f);
2748 ack = ACKX_SEND_ERROR;
2749 break;
2750 case EVT_DESCRIPTOR_READ:
2751 case EVT_DATA_READ:
2752 case EVT_DATA_WRITE:
2753 ack = ACKX_SEND_ERROR;
2754 break;
2755 case EVT_BUS_RESET: /* that should never happen */
2756 PRINT(KERN_WARNING, ohci->id, "Received OHCI evt_* error 0x%x", status & 0x1f);
2757 ack = ACKX_SEND_ERROR;
2758 break;
2759 case EVT_TIMEOUT:
2760 ack = ACKX_TIMEOUT;
2761 break;
2762 case EVT_TCODE_ERR:
2763 ack = ACKX_SEND_ERROR;
2764 break;
2765 case EVT_RESERVED_B: /* that should never happen */
2766 case EVT_RESERVED_C: /* that should never happen */
2767 PRINT(KERN_WARNING, ohci->id, "Received OHCI evt_* error 0x%x", status & 0x1f);
2768 ack = ACKX_SEND_ERROR;
2769 break;
2770 case EVT_UNKNOWN:
2771 case EVT_FLUSHED:
2772 ack = ACKX_SEND_ERROR;
2773 break;
2774 default:
2775 PRINT(KERN_ERR, ohci->id, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2776 ack = ACKX_SEND_ERROR;
2777 BUG();
2781 list_del(&packet->driver_list);
2782 hpsb_packet_sent(ohci->host, packet, ack);
2784 if (datasize) {
2785 pci_unmap_single(ohci->dev,
2786 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2787 datasize, PCI_DMA_TODEVICE);
2788 OHCI_DMA_FREE("single Xmit data packet");
2791 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2792 d->free_prgs++;
2795 dma_trm_flush(ohci, d);
2797 spin_unlock_irqrestore(&d->lock, flags);
2800 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2802 int i;
2804 if (d->ohci == NULL)
2805 return;
2807 DBGMSG(d->ohci->id, "Freeing dma_rcv_ctx %d", d->ctx);
2809 if (d->ctrlClear) {
2810 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2812 if (d->type == DMA_CTX_ISO) {
2813 /* disable interrupts */
2814 reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2815 ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2816 } else {
2817 tasklet_kill(&d->task);
2821 if (d->buf_cpu) {
2822 for (i=0; i<d->num_desc; i++)
2823 if (d->buf_cpu[i] && d->buf_bus[i]) {
2824 pci_free_consistent(
2825 d->ohci->dev, d->buf_size,
2826 d->buf_cpu[i], d->buf_bus[i]);
2827 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2829 kfree(d->buf_cpu);
2830 kfree(d->buf_bus);
2832 if (d->prg_cpu) {
2833 for (i=0; i<d->num_desc; i++)
2834 if (d->prg_cpu[i] && d->prg_bus[i]) {
2835 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2836 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2838 pci_pool_destroy(d->prg_pool);
2839 OHCI_DMA_FREE("dma_rcv prg pool");
2840 kfree(d->prg_cpu);
2841 kfree(d->prg_bus);
2843 if (d->spb) kfree(d->spb);
2845 /* Mark this context as freed. */
2846 d->ohci = NULL;
2849 static int
2850 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2851 enum context_type type, int ctx, int num_desc,
2852 int buf_size, int split_buf_size, int context_base)
2854 int i;
2856 d->ohci = ohci;
2857 d->type = type;
2858 d->ctx = ctx;
2860 d->num_desc = num_desc;
2861 d->buf_size = buf_size;
2862 d->split_buf_size = split_buf_size;
2864 d->ctrlSet = 0;
2865 d->ctrlClear = 0;
2866 d->cmdPtr = 0;
2868 d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_KERNEL);
2869 d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
2871 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2872 PRINT(KERN_ERR, ohci->id, "Failed to allocate dma buffer");
2873 free_dma_rcv_ctx(d);
2874 return -ENOMEM;
2876 memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
2877 memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
2879 d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*),
2880 GFP_KERNEL);
2881 d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
2883 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2884 PRINT(KERN_ERR, ohci->id, "Failed to allocate dma prg");
2885 free_dma_rcv_ctx(d);
2886 return -ENOMEM;
2888 memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*));
2889 memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
2891 d->spb = kmalloc(d->split_buf_size, GFP_KERNEL);
2893 if (d->spb == NULL) {
2894 PRINT(KERN_ERR, ohci->id, "Failed to allocate split buffer");
2895 free_dma_rcv_ctx(d);
2896 return -ENOMEM;
2899 d->prg_pool = pci_pool_create("ohci1394 rcv prg", ohci->dev,
2900 sizeof(struct dma_cmd), 4, 0);
2901 OHCI_DMA_ALLOC("dma_rcv prg pool");
2903 for (i=0; i<d->num_desc; i++) {
2904 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
2905 d->buf_size,
2906 d->buf_bus+i);
2907 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
2909 if (d->buf_cpu[i] != NULL) {
2910 memset(d->buf_cpu[i], 0, d->buf_size);
2911 } else {
2912 PRINT(KERN_ERR, ohci->id,
2913 "Failed to allocate dma buffer");
2914 free_dma_rcv_ctx(d);
2915 return -ENOMEM;
2918 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
2919 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
2921 if (d->prg_cpu[i] != NULL) {
2922 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
2923 } else {
2924 PRINT(KERN_ERR, ohci->id,
2925 "Failed to allocate dma prg");
2926 free_dma_rcv_ctx(d);
2927 return -ENOMEM;
2931 spin_lock_init(&d->lock);
2933 if (type == DMA_CTX_ISO) {
2934 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
2935 OHCI_ISO_MULTICHANNEL_RECEIVE,
2936 dma_rcv_tasklet, (unsigned long) d);
2937 if (ohci1394_register_iso_tasklet(ohci,
2938 &ohci->ir_legacy_tasklet) < 0) {
2939 PRINT(KERN_ERR, ohci->id, "No IR DMA context available");
2940 free_dma_rcv_ctx(d);
2941 return -EBUSY;
2944 /* the IR context can be assigned to any DMA context
2945 * by ohci1394_register_iso_tasklet */
2946 d->ctx = ohci->ir_legacy_tasklet.context;
2947 d->ctrlSet = OHCI1394_IsoRcvContextControlSet + 32*d->ctx;
2948 d->ctrlClear = OHCI1394_IsoRcvContextControlClear + 32*d->ctx;
2949 d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
2950 d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
2951 } else {
2952 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
2953 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
2954 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
2956 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
2959 return 0;
2962 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
2964 int i;
2966 if (d->ohci == NULL)
2967 return;
2969 DBGMSG(d->ohci->id, "Freeing dma_trm_ctx %d", d->ctx);
2971 if (d->ctrlClear) {
2972 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2974 if (d->type == DMA_CTX_ISO) {
2975 /* disable interrupts */
2976 reg_write(d->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << d->ctx);
2977 ohci1394_unregister_iso_tasklet(d->ohci,
2978 &d->ohci->it_legacy_tasklet);
2979 } else {
2980 tasklet_kill(&d->task);
2984 if (d->prg_cpu) {
2985 for (i=0; i<d->num_desc; i++)
2986 if (d->prg_cpu[i] && d->prg_bus[i]) {
2987 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2988 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
2990 pci_pool_destroy(d->prg_pool);
2991 OHCI_DMA_FREE("dma_trm prg pool");
2992 kfree(d->prg_cpu);
2993 kfree(d->prg_bus);
2996 /* Mark this context as freed. */
2997 d->ohci = NULL;
3000 static int
3001 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3002 enum context_type type, int ctx, int num_desc,
3003 int context_base)
3005 int i;
3007 d->ohci = ohci;
3008 d->type = type;
3009 d->ctx = ctx;
3010 d->num_desc = num_desc;
3011 d->ctrlSet = 0;
3012 d->ctrlClear = 0;
3013 d->cmdPtr = 0;
3015 d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*),
3016 GFP_KERNEL);
3017 d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
3019 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3020 PRINT(KERN_ERR, ohci->id, "Failed to allocate at dma prg");
3021 free_dma_trm_ctx(d);
3022 return -ENOMEM;
3024 memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
3025 memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
3027 d->prg_pool = pci_pool_create("ohci1394 trm prg", ohci->dev,
3028 sizeof(struct at_dma_prg), 4, 0);
3029 OHCI_DMA_ALLOC("dma_rcv prg pool");
3031 for (i = 0; i < d->num_desc; i++) {
3032 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3033 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3035 if (d->prg_cpu[i] != NULL) {
3036 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3037 } else {
3038 PRINT(KERN_ERR, ohci->id,
3039 "Failed to allocate at dma prg");
3040 free_dma_trm_ctx(d);
3041 return -ENOMEM;
3045 spin_lock_init(&d->lock);
3047 /* initialize tasklet */
3048 if (type == DMA_CTX_ISO) {
3049 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3050 dma_trm_tasklet, (unsigned long) d);
3051 if (ohci1394_register_iso_tasklet(ohci,
3052 &ohci->it_legacy_tasklet) < 0) {
3053 PRINT(KERN_ERR, ohci->id, "No IT DMA context available");
3054 free_dma_trm_ctx(d);
3055 return -EBUSY;
3058 /* IT can be assigned to any context by register_iso_tasklet */
3059 d->ctx = ohci->it_legacy_tasklet.context;
3060 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3061 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3062 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3063 } else {
3064 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3065 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3066 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3067 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3070 return 0;
3073 static u16 ohci_crc16 (u32 *ptr, int length)
3075 int shift;
3076 u32 crc, sum, data;
3078 crc = 0;
3079 for (; length > 0; length--) {
3080 data = be32_to_cpu(*ptr++);
3081 for (shift = 28; shift >= 0; shift -= 4) {
3082 sum = ((crc >> 12) ^ (data >> shift)) & 0x000f;
3083 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ sum;
3085 crc &= 0xffff;
3087 return crc;
3090 /* Config ROM macro implementation influenced by NetBSD OHCI driver */
3092 struct config_rom_unit {
3093 u32 *start;
3094 u32 *refer;
3095 int length;
3096 int refunit;
3099 struct config_rom_ptr {
3100 u32 *data;
3101 int unitnum;
3102 struct config_rom_unit unitdir[10];
3105 #define cf_put_1quad(cr, q) (((cr)->data++)[0] = cpu_to_be32(q))
3107 #define cf_put_4bytes(cr, b1, b2, b3, b4) \
3108 (((cr)->data++)[0] = cpu_to_be32(((b1) << 24) | ((b2) << 16) | ((b3) << 8) | (b4)))
3110 #define cf_put_keyval(cr, key, val) (((cr)->data++)[0] = cpu_to_be32(((key) << 24) | (val)))
3112 static inline void cf_put_str(struct config_rom_ptr *cr, const char *str)
3114 int t;
3115 char fourb[4];
3117 while (str[0]) {
3118 memset(fourb, 0, 4);
3119 for (t = 0; t < 4 && str[t]; t++)
3120 fourb[t] = str[t];
3121 cf_put_4bytes(cr, fourb[0], fourb[1], fourb[2], fourb[3]);
3122 str += strlen(str) < 4 ? strlen(str) : 4;
3124 return;
3127 static inline void cf_put_crc16(struct config_rom_ptr *cr, int unit)
3129 *cr->unitdir[unit].start =
3130 cpu_to_be32((cr->unitdir[unit].length << 16) |
3131 ohci_crc16(cr->unitdir[unit].start + 1,
3132 cr->unitdir[unit].length));
3135 static inline void cf_unit_begin(struct config_rom_ptr *cr, int unit)
3137 if (cr->unitdir[unit].refer != NULL) {
3138 *cr->unitdir[unit].refer |=
3139 cpu_to_be32 (cr->data - cr->unitdir[unit].refer);
3140 cf_put_crc16(cr, cr->unitdir[unit].refunit);
3142 cr->unitnum = unit;
3143 cr->unitdir[unit].start = cr->data++;
3146 static inline void cf_put_refer(struct config_rom_ptr *cr, char key, int unit)
3148 cr->unitdir[unit].refer = cr->data;
3149 cr->unitdir[unit].refunit = cr->unitnum;
3150 (cr->data++)[0] = cpu_to_be32(key << 24);
3153 static inline void cf_unit_end(struct config_rom_ptr *cr)
3155 cr->unitdir[cr->unitnum].length = cr->data -
3156 (cr->unitdir[cr->unitnum].start + 1);
3157 cf_put_crc16(cr, cr->unitnum);
3160 /* End of NetBSD derived code. */
3162 static void ohci_init_config_rom(struct ti_ohci *ohci)
3164 struct config_rom_ptr cr;
3166 memset(&cr, 0, sizeof(cr));
3167 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3169 cr.data = ohci->csr_config_rom_cpu;
3171 /* Bus info block */
3172 cf_unit_begin(&cr, 0);
3173 cf_put_1quad(&cr, reg_read(ohci, OHCI1394_BusID));
3174 cf_put_1quad(&cr, reg_read(ohci, OHCI1394_BusOptions));
3175 cf_put_1quad(&cr, reg_read(ohci, OHCI1394_GUIDHi));
3176 cf_put_1quad(&cr, reg_read(ohci, OHCI1394_GUIDLo));
3177 cf_unit_end(&cr);
3179 DBGMSG(ohci->id, "GUID: %08x:%08x", reg_read(ohci, OHCI1394_GUIDHi),
3180 reg_read(ohci, OHCI1394_GUIDLo));
3182 /* IEEE P1212 suggests the initial ROM header CRC should only
3183 * cover the header itself (and not the entire ROM). Since we do
3184 * this, then we can make our bus_info_len the same as the CRC
3185 * length. */
3186 ohci->csr_config_rom_cpu[0] |= cpu_to_be32(
3187 (be32_to_cpu(ohci->csr_config_rom_cpu[0]) & 0x00ff0000) << 8);
3188 reg_write(ohci, OHCI1394_ConfigROMhdr,
3189 be32_to_cpu(ohci->csr_config_rom_cpu[0]));
3191 /* Root directory */
3192 cf_unit_begin(&cr, 1);
3193 /* Vendor ID */
3194 cf_put_keyval(&cr, 0x03, reg_read(ohci,OHCI1394_VendorID) & 0xFFFFFF);
3195 cf_put_refer(&cr, 0x81, 2); /* Textual description unit */
3196 cf_put_keyval(&cr, 0x0c, 0x0083c0); /* Node capabilities */
3197 /* NOTE: Add other unit referers here, and append at bottom */
3198 cf_unit_end(&cr);
3200 /* Textual description - "Linux 1394" */
3201 cf_unit_begin(&cr, 2);
3202 cf_put_keyval(&cr, 0, 0);
3203 cf_put_1quad(&cr, 0);
3204 cf_put_str(&cr, "Linux OHCI-1394");
3205 cf_unit_end(&cr);
3207 ohci->csr_config_rom_length = cr.data - ohci->csr_config_rom_cpu;
3210 static size_t ohci_get_rom(struct hpsb_host *host, quadlet_t **ptr)
3212 struct ti_ohci *ohci=host->hostdata;
3214 DBGMSG(ohci->id, "request csr_rom address: %p",
3215 ohci->csr_config_rom_cpu);
3217 *ptr = ohci->csr_config_rom_cpu;
3219 return ohci->csr_config_rom_length * 4;
3222 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3223 quadlet_t data, quadlet_t compare)
3225 struct ti_ohci *ohci = host->hostdata;
3226 int i;
3228 reg_write(ohci, OHCI1394_CSRData, data);
3229 reg_write(ohci, OHCI1394_CSRCompareData, compare);
3230 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3232 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3233 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3234 break;
3236 mdelay(1);
3239 return reg_read(ohci, OHCI1394_CSRData);
3242 static struct hpsb_host_driver ohci1394_driver = {
3243 .owner = THIS_MODULE,
3244 .name = OHCI1394_DRIVER_NAME,
3245 .get_rom = ohci_get_rom,
3246 .transmit_packet = ohci_transmit,
3247 .devctl = ohci_devctl,
3248 .isoctl = ohci_isoctl,
3249 .hw_csr_reg = ohci_hw_csr_reg,
3254 /***********************************
3255 * PCI Driver Interface functions *
3256 ***********************************/
3258 #define FAIL(err, fmt, args...) \
3259 do { \
3260 PRINT_G(KERN_ERR, fmt , ## args); \
3261 ohci1394_pci_remove(dev); \
3262 return err; \
3263 } while (0)
3265 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3266 const struct pci_device_id *ent)
3268 static unsigned int card_id_counter = 0;
3269 static int version_printed = 0;
3271 struct hpsb_host *host;
3272 struct ti_ohci *ohci; /* shortcut to currently handled device */
3273 unsigned long ohci_base;
3275 if (version_printed++ == 0)
3276 PRINT_G(KERN_INFO, "%s", version);
3278 if (pci_enable_device(dev))
3279 FAIL(-ENXIO, "Failed to enable OHCI hardware %d",
3280 card_id_counter++);
3281 pci_set_master(dev);
3283 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci));
3284 if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3286 ohci = host->hostdata;
3287 ohci->id = card_id_counter++;
3288 ohci->dev = dev;
3289 ohci->host = host;
3290 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3291 host->pdev = dev;
3292 pci_set_drvdata(dev, ohci);
3294 /* We don't want hardware swapping */
3295 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3297 /* Some oddball Apple controllers do not order the selfid
3298 * properly, so we make up for it here. */
3299 #ifndef __LITTLE_ENDIAN
3300 /* XXX: Need a better way to check this. I'm wondering if we can
3301 * read the values of the OHCI1394_PCI_HCI_Control and the
3302 * noByteSwapData registers to see if they were not cleared to
3303 * zero. Should this work? Obviously it's not defined what these
3304 * registers will read when they aren't supported. Bleh! */
3305 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3306 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3307 ohci->no_swap_incoming = 1;
3308 ohci->selfid_swap = 0;
3309 } else
3310 ohci->selfid_swap = 1;
3311 #endif
3313 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3314 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3315 #endif
3317 /* These chipsets require a bit of extra care when checking after
3318 * a busreset. */
3319 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3320 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3321 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3322 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3323 ohci->check_busreset = 1;
3325 /* We hardwire the MMIO length, since some CardBus adaptors
3326 * fail to report the right length. Anyway, the ohci spec
3327 * clearly says it's 2kb, so this shouldn't be a problem. */
3328 ohci_base = pci_resource_start(dev, 0);
3329 if (pci_resource_len(dev, 0) != OHCI1394_REGISTER_SIZE)
3330 PRINT(KERN_WARNING, ohci->id, "Unexpected PCI resource length of %lx!",
3331 pci_resource_len(dev, 0));
3333 /* Seems PCMCIA handles this internally. Not sure why. Seems
3334 * pretty bogus to force a driver to special case this. */
3335 #ifndef PCMCIA
3336 if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3337 FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
3338 ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
3339 #endif
3340 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3342 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3343 if (ohci->registers == NULL)
3344 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3345 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3346 DBGMSG(ohci->id, "Remapped memory spaces reg 0x%p", ohci->registers);
3348 /* csr_config rom allocation */
3349 ohci->csr_config_rom_cpu =
3350 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3351 &ohci->csr_config_rom_bus);
3352 OHCI_DMA_ALLOC("consistent csr_config_rom");
3353 if (ohci->csr_config_rom_cpu == NULL)
3354 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3355 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3357 /* self-id dma buffer allocation */
3358 ohci->selfid_buf_cpu =
3359 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3360 &ohci->selfid_buf_bus);
3361 OHCI_DMA_ALLOC("consistent selfid_buf");
3363 if (ohci->selfid_buf_cpu == NULL)
3364 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3365 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3367 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3368 PRINT(KERN_INFO, ohci->id, "SelfID buffer %p is not aligned on "
3369 "8Kb boundary... may cause problems on some CXD3222 chip",
3370 ohci->selfid_buf_cpu);
3372 /* No self-id errors at startup */
3373 ohci->self_id_errors = 0;
3375 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3376 /* AR DMA request context allocation */
3377 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3378 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3379 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3380 OHCI1394_AsReqRcvContextBase) < 0)
3381 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3383 /* AR DMA response context allocation */
3384 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3385 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3386 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3387 OHCI1394_AsRspRcvContextBase) < 0)
3388 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3390 /* AT DMA request context */
3391 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3392 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3393 OHCI1394_AsReqTrContextBase) < 0)
3394 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3396 /* AT DMA response context */
3397 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3398 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3399 OHCI1394_AsRspTrContextBase) < 0)
3400 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3402 /* Start off with a soft reset, to clear everything to a sane
3403 * state. */
3404 ohci_soft_reset(ohci);
3406 /* Now enable LPS, which we need in order to start accessing
3407 * most of the registers. In fact, on some cards (ALI M5251),
3408 * accessing registers in the SClk domain without LPS enabled
3409 * will lock up the machine. Wait 50msec to make sure we have
3410 * full link enabled. */
3411 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3412 mdelay(50);
3414 /* Determine the number of available IR and IT contexts. */
3415 ohci->nb_iso_rcv_ctx =
3416 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3417 DBGMSG(ohci->id, "%d iso receive contexts available",
3418 ohci->nb_iso_rcv_ctx);
3420 ohci->nb_iso_xmit_ctx =
3421 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3422 DBGMSG(ohci->id, "%d iso transmit contexts available",
3423 ohci->nb_iso_xmit_ctx);
3425 /* Set the usage bits for non-existent contexts so they can't
3426 * be allocated */
3427 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3428 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3430 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3431 spin_lock_init(&ohci->iso_tasklet_list_lock);
3432 ohci->ISO_channel_usage = 0;
3433 spin_lock_init(&ohci->IR_channel_lock);
3435 /* the IR DMA context is allocated on-demand; mark it inactive */
3436 ohci->ir_legacy_context.ohci = NULL;
3438 /* same for the IT DMA context */
3439 ohci->it_legacy_context.ohci = NULL;
3441 if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3442 OHCI1394_DRIVER_NAME, ohci))
3443 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3445 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3446 ohci_initialize(ohci);
3448 /* Tell the highlevel this host is ready */
3449 hpsb_add_host(host);
3450 ohci->init_state = OHCI_INIT_DONE;
3452 return 0;
3453 #undef FAIL
3456 static void ohci1394_pci_remove(struct pci_dev *pdev)
3458 struct ti_ohci *ohci;
3460 ohci = pci_get_drvdata(pdev);
3461 if (!ohci)
3462 return;
3464 switch (ohci->init_state) {
3465 case OHCI_INIT_DONE:
3466 hpsb_remove_host(ohci->host);
3468 case OHCI_INIT_HAVE_IRQ:
3469 /* Soft reset before we start - this disables
3470 * interrupts and clears linkEnable and LPS. */
3471 ohci_soft_reset(ohci);
3472 free_irq(ohci->dev->irq, ohci);
3474 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3475 /* Free AR dma */
3476 free_dma_rcv_ctx(&ohci->ar_req_context);
3477 free_dma_rcv_ctx(&ohci->ar_resp_context);
3479 /* Free AT dma */
3480 free_dma_trm_ctx(&ohci->at_req_context);
3481 free_dma_trm_ctx(&ohci->at_resp_context);
3483 /* Free IR dma */
3484 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3486 /* Free IT dma */
3487 free_dma_trm_ctx(&ohci->it_legacy_context);
3489 case OHCI_INIT_HAVE_SELFID_BUFFER:
3490 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3491 ohci->selfid_buf_cpu,
3492 ohci->selfid_buf_bus);
3493 OHCI_DMA_FREE("consistent selfid_buf");
3495 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3496 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3497 ohci->csr_config_rom_cpu,
3498 ohci->csr_config_rom_bus);
3499 OHCI_DMA_FREE("consistent csr_config_rom");
3501 case OHCI_INIT_HAVE_IOMAPPING:
3502 iounmap(ohci->registers);
3504 case OHCI_INIT_HAVE_MEM_REGION:
3505 #ifndef PCMCIA
3506 release_mem_region(pci_resource_start(ohci->dev, 0),
3507 OHCI1394_REGISTER_SIZE);
3508 #endif
3510 #ifdef CONFIG_PPC_PMAC
3511 /* On UniNorth, power down the cable and turn off the chip
3512 * clock when the module is removed to save power on
3513 * laptops. Turning it back ON is done by the arch code when
3514 * pci_enable_device() is called */
3516 struct device_node* of_node;
3518 of_node = pci_device_to_OF_node(ohci->dev);
3519 if (of_node) {
3520 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3521 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3524 #endif /* CONFIG_PPC_PMAC */
3526 case OHCI_INIT_ALLOC_HOST:
3527 pci_set_drvdata(ohci->dev, NULL);
3528 hpsb_unref_host(ohci->host);
3533 #ifdef CONFIG_PM
3534 static int ohci1394_pci_resume (struct pci_dev *dev)
3536 pci_enable_device(dev);
3537 return 0;
3539 #endif
3542 #define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3544 static struct pci_device_id ohci1394_pci_tbl[] __devinitdata = {
3546 .class = PCI_CLASS_FIREWIRE_OHCI,
3547 .class_mask = PCI_ANY_ID,
3548 .vendor = PCI_ANY_ID,
3549 .device = PCI_ANY_ID,
3550 .subvendor = PCI_ANY_ID,
3551 .subdevice = PCI_ANY_ID,
3553 { 0, },
3556 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3558 static struct pci_driver ohci1394_pci_driver = {
3559 .name = OHCI1394_DRIVER_NAME,
3560 .id_table = ohci1394_pci_tbl,
3561 .probe = ohci1394_pci_probe,
3562 .remove = ohci1394_pci_remove,
3564 #ifdef CONFIG_PM
3565 .resume = ohci1394_pci_resume,
3566 #endif /* PM */
3571 /***********************************
3572 * OHCI1394 Video Interface *
3573 ***********************************/
3575 /* essentially the only purpose of this code is to allow another
3576 module to hook into ohci's interrupt handler */
3578 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3580 int i=0;
3582 /* stop the channel program if it's still running */
3583 reg_write(ohci, reg, 0x8000);
3585 /* Wait until it effectively stops */
3586 while (reg_read(ohci, reg) & 0x400) {
3587 i++;
3588 if (i>5000) {
3589 PRINT(KERN_ERR, ohci->id,
3590 "Runaway loop while stopping context: %s...", msg ? msg : "");
3591 return 1;
3594 mb();
3595 udelay(10);
3597 if (msg) PRINT(KERN_ERR, ohci->id, "%s: dma prg stopped", msg);
3598 return 0;
3601 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3602 void (*func)(unsigned long), unsigned long data)
3604 tasklet_init(&tasklet->tasklet, func, data);
3605 tasklet->type = type;
3606 /* We init the tasklet->link field, so we can list_del() it
3607 * without worrying whether it was added to the list or not. */
3608 INIT_LIST_HEAD(&tasklet->link);
3611 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3612 struct ohci1394_iso_tasklet *tasklet)
3614 unsigned long flags, *usage;
3615 int n, i, r = -EBUSY;
3617 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3618 n = ohci->nb_iso_xmit_ctx;
3619 usage = &ohci->it_ctx_usage;
3621 else {
3622 n = ohci->nb_iso_rcv_ctx;
3623 usage = &ohci->ir_ctx_usage;
3625 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3626 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3627 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3628 return r;
3633 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3635 for (i = 0; i < n; i++)
3636 if (!test_and_set_bit(i, usage)) {
3637 tasklet->context = i;
3638 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3639 r = 0;
3640 break;
3643 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3645 return r;
3648 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3649 struct ohci1394_iso_tasklet *tasklet)
3651 unsigned long flags;
3653 tasklet_kill(&tasklet->tasklet);
3655 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3657 if (tasklet->type == OHCI_ISO_TRANSMIT)
3658 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3659 else {
3660 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3662 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3663 clear_bit(0, &ohci->ir_multichannel_used);
3667 list_del(&tasklet->link);
3669 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3672 EXPORT_SYMBOL(ohci1394_stop_context);
3673 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3674 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3675 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3678 /***********************************
3679 * General module initialization *
3680 ***********************************/
3682 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3683 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3684 MODULE_LICENSE("GPL");
3686 static void __exit ohci1394_cleanup (void)
3688 pci_unregister_driver(&ohci1394_pci_driver);
3691 static int __init ohci1394_init(void)
3693 return pci_module_init(&ohci1394_pci_driver);
3696 module_init(ohci1394_init);
3697 module_exit(ohci1394_cleanup);