2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
29 * . DMA mmap for iso receive
30 * . Config ROM generation
32 * Things implemented, but still in test phase:
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
36 * Things not implemented:
37 * . DMA error recovery
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
68 * Leon van Stuivenberg <leonvs@iae.nl>
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
85 #include <linux/bitops.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/slab.h>
89 #include <linux/interrupt.h>
90 #include <linux/wait.h>
91 #include <linux/errno.h>
92 #include <linux/module.h>
93 #include <linux/moduleparam.h>
94 #include <linux/pci.h>
96 #include <linux/poll.h>
97 #include <asm/byteorder.h>
98 #include <asm/atomic.h>
99 #include <asm/uaccess.h>
100 #include <linux/delay.h>
101 #include <linux/spinlock.h>
103 #include <asm/pgtable.h>
104 #include <asm/page.h>
106 #include <linux/types.h>
107 #include <linux/vmalloc.h>
108 #include <linux/init.h>
110 #ifdef CONFIG_PPC_PMAC
111 #include <asm/machdep.h>
112 #include <asm/pmac_feature.h>
113 #include <asm/prom.h>
114 #include <asm/pci-bridge.h>
118 #include "ieee1394.h"
119 #include "ieee1394_types.h"
123 #include "ieee1394_core.h"
124 #include "highlevel.h"
125 #include "ohci1394.h"
127 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
128 #define OHCI1394_DEBUG
135 #ifdef OHCI1394_DEBUG
136 #define DBGMSG(fmt, args...) \
137 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
139 #define DBGMSG(fmt, args...) do {} while (0)
142 /* print general (card independent) information */
143 #define PRINT_G(level, fmt, args...) \
144 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
146 /* print card specific information */
147 #define PRINT(level, fmt, args...) \
148 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
150 /* Module Parameters */
151 static int phys_dma
= 1;
152 module_param(phys_dma
, int, 0444);
153 MODULE_PARM_DESC(phys_dma
, "Enable physical DMA (default = 1).");
155 static void dma_trm_tasklet(unsigned long data
);
156 static void dma_trm_reset(struct dma_trm_ctx
*d
);
158 static int alloc_dma_rcv_ctx(struct ti_ohci
*ohci
, struct dma_rcv_ctx
*d
,
159 enum context_type type
, int ctx
, int num_desc
,
160 int buf_size
, int split_buf_size
, int context_base
);
161 static void free_dma_rcv_ctx(struct dma_rcv_ctx
*d
);
163 static int alloc_dma_trm_ctx(struct ti_ohci
*ohci
, struct dma_trm_ctx
*d
,
164 enum context_type type
, int ctx
, int num_desc
,
167 static void ohci1394_pci_remove(struct pci_dev
*pdev
);
169 #ifndef __LITTLE_ENDIAN
170 static const size_t hdr_sizes
[] = {
171 3, /* TCODE_WRITEQ */
172 4, /* TCODE_WRITEB */
173 3, /* TCODE_WRITE_RESPONSE */
177 3, /* TCODE_READQ_RESPONSE */
178 4, /* TCODE_READB_RESPONSE */
179 1, /* TCODE_CYCLE_START */
180 4, /* TCODE_LOCK_REQUEST */
181 2, /* TCODE_ISO_DATA */
182 4, /* TCODE_LOCK_RESPONSE */
183 /* rest is reserved or link-internal */
186 static inline void header_le32_to_cpu(quadlet_t
*data
, unsigned char tcode
)
190 if (unlikely(tcode
>= ARRAY_SIZE(hdr_sizes
)))
193 size
= hdr_sizes
[tcode
];
195 data
[size
] = le32_to_cpu(data
[size
]);
198 #define header_le32_to_cpu(w,x) do {} while (0)
199 #endif /* !LITTLE_ENDIAN */
201 /***********************************
202 * IEEE-1394 functionality section *
203 ***********************************/
205 static u8
get_phy_reg(struct ti_ohci
*ohci
, u8 addr
)
211 spin_lock_irqsave (&ohci
->phy_reg_lock
, flags
);
213 reg_write(ohci
, OHCI1394_PhyControl
, (addr
<< 8) | 0x00008000);
215 for (i
= 0; i
< OHCI_LOOP_COUNT
; i
++) {
216 if (reg_read(ohci
, OHCI1394_PhyControl
) & 0x80000000)
222 r
= reg_read(ohci
, OHCI1394_PhyControl
);
224 if (i
>= OHCI_LOOP_COUNT
)
225 PRINT (KERN_ERR
, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
226 r
, r
& 0x80000000, i
);
228 spin_unlock_irqrestore (&ohci
->phy_reg_lock
, flags
);
230 return (r
& 0x00ff0000) >> 16;
233 static void set_phy_reg(struct ti_ohci
*ohci
, u8 addr
, u8 data
)
239 spin_lock_irqsave (&ohci
->phy_reg_lock
, flags
);
241 reg_write(ohci
, OHCI1394_PhyControl
, (addr
<< 8) | data
| 0x00004000);
243 for (i
= 0; i
< OHCI_LOOP_COUNT
; i
++) {
244 r
= reg_read(ohci
, OHCI1394_PhyControl
);
245 if (!(r
& 0x00004000))
251 if (i
== OHCI_LOOP_COUNT
)
252 PRINT (KERN_ERR
, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
253 r
, r
& 0x00004000, i
);
255 spin_unlock_irqrestore (&ohci
->phy_reg_lock
, flags
);
260 /* Or's our value into the current value */
261 static void set_phy_reg_mask(struct ti_ohci
*ohci
, u8 addr
, u8 data
)
265 old
= get_phy_reg (ohci
, addr
);
267 set_phy_reg (ohci
, addr
, old
);
272 static void handle_selfid(struct ti_ohci
*ohci
, struct hpsb_host
*host
,
273 int phyid
, int isroot
)
275 quadlet_t
*q
= ohci
->selfid_buf_cpu
;
276 quadlet_t self_id_count
=reg_read(ohci
, OHCI1394_SelfIDCount
);
280 /* Check status of self-id reception */
282 if (ohci
->selfid_swap
)
283 q0
= le32_to_cpu(q
[0]);
287 if ((self_id_count
& 0x80000000) ||
288 ((self_id_count
& 0x00FF0000) != (q0
& 0x00FF0000))) {
290 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
291 self_id_count
, q0
, ohci
->self_id_errors
);
293 /* Tip by James Goodwin <jamesg@Filanet.com>:
294 * We had an error, generate another bus reset in response. */
295 if (ohci
->self_id_errors
<OHCI1394_MAX_SELF_ID_ERRORS
) {
296 set_phy_reg_mask (ohci
, 1, 0x40);
297 ohci
->self_id_errors
++;
300 "Too many errors on SelfID error reception, giving up!");
305 /* SelfID Ok, reset error counter. */
306 ohci
->self_id_errors
= 0;
308 size
= ((self_id_count
& 0x00001FFC) >> 2) - 1;
312 if (ohci
->selfid_swap
) {
313 q0
= le32_to_cpu(q
[0]);
314 q1
= le32_to_cpu(q
[1]);
321 DBGMSG ("SelfID packet 0x%x received", q0
);
322 hpsb_selfid_received(host
, cpu_to_be32(q0
));
323 if (((q0
& 0x3f000000) >> 24) == phyid
)
324 DBGMSG ("SelfID for this node is 0x%08x", q0
);
327 "SelfID is inconsistent [0x%08x/0x%08x]", q0
, q1
);
333 DBGMSG("SelfID complete");
338 static void ohci_soft_reset(struct ti_ohci
*ohci
) {
341 reg_write(ohci
, OHCI1394_HCControlSet
, OHCI1394_HCControl_softReset
);
343 for (i
= 0; i
< OHCI_LOOP_COUNT
; i
++) {
344 if (!(reg_read(ohci
, OHCI1394_HCControlSet
) & OHCI1394_HCControl_softReset
))
348 DBGMSG ("Soft reset finished");
352 /* Generate the dma receive prgs and start the context */
353 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx
*d
, int generate_irq
)
355 struct ti_ohci
*ohci
= (struct ti_ohci
*)(d
->ohci
);
358 ohci1394_stop_context(ohci
, d
->ctrlClear
, NULL
);
360 for (i
=0; i
<d
->num_desc
; i
++) {
363 c
= DMA_CTL_INPUT_MORE
| DMA_CTL_UPDATE
| DMA_CTL_BRANCH
;
367 d
->prg_cpu
[i
]->control
= cpu_to_le32(c
| d
->buf_size
);
369 /* End of descriptor list? */
370 if (i
+ 1 < d
->num_desc
) {
371 d
->prg_cpu
[i
]->branchAddress
=
372 cpu_to_le32((d
->prg_bus
[i
+1] & 0xfffffff0) | 0x1);
374 d
->prg_cpu
[i
]->branchAddress
=
375 cpu_to_le32((d
->prg_bus
[0] & 0xfffffff0));
378 d
->prg_cpu
[i
]->address
= cpu_to_le32(d
->buf_bus
[i
]);
379 d
->prg_cpu
[i
]->status
= cpu_to_le32(d
->buf_size
);
385 if (d
->type
== DMA_CTX_ISO
) {
386 /* Clear contextControl */
387 reg_write(ohci
, d
->ctrlClear
, 0xffffffff);
389 /* Set bufferFill, isochHeader, multichannel for IR context */
390 reg_write(ohci
, d
->ctrlSet
, 0xd0000000);
392 /* Set the context match register to match on all tags */
393 reg_write(ohci
, d
->ctxtMatch
, 0xf0000000);
395 /* Clear the multi channel mask high and low registers */
396 reg_write(ohci
, OHCI1394_IRMultiChanMaskHiClear
, 0xffffffff);
397 reg_write(ohci
, OHCI1394_IRMultiChanMaskLoClear
, 0xffffffff);
399 /* Set up isoRecvIntMask to generate interrupts */
400 reg_write(ohci
, OHCI1394_IsoRecvIntMaskSet
, 1 << d
->ctx
);
403 /* Tell the controller where the first AR program is */
404 reg_write(ohci
, d
->cmdPtr
, d
->prg_bus
[0] | 0x1);
407 reg_write(ohci
, d
->ctrlSet
, 0x00008000);
409 DBGMSG("Receive DMA ctx=%d initialized", d
->ctx
);
412 /* Initialize the dma transmit context */
413 static void initialize_dma_trm_ctx(struct dma_trm_ctx
*d
)
415 struct ti_ohci
*ohci
= (struct ti_ohci
*)(d
->ohci
);
417 /* Stop the context */
418 ohci1394_stop_context(ohci
, d
->ctrlClear
, NULL
);
422 d
->free_prgs
= d
->num_desc
;
423 d
->branchAddrPtr
= NULL
;
424 INIT_LIST_HEAD(&d
->fifo_list
);
425 INIT_LIST_HEAD(&d
->pending_list
);
427 if (d
->type
== DMA_CTX_ISO
) {
428 /* enable interrupts */
429 reg_write(ohci
, OHCI1394_IsoXmitIntMaskSet
, 1 << d
->ctx
);
432 DBGMSG("Transmit DMA ctx=%d initialized", d
->ctx
);
435 /* Count the number of available iso contexts */
436 static int get_nb_iso_ctx(struct ti_ohci
*ohci
, int reg
)
440 reg_write(ohci
, reg
, 0xffffffff);
441 tmp
= reg_read(ohci
, reg
);
443 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg
, tmp
);
445 /* Count the number of contexts */
446 return hweight32(tmp
);
449 /* Global initialization */
450 static void ohci_initialize(struct ti_ohci
*ohci
)
455 spin_lock_init(&ohci
->phy_reg_lock
);
457 /* Put some defaults to these undefined bus options */
458 buf
= reg_read(ohci
, OHCI1394_BusOptions
);
459 buf
|= 0x60000000; /* Enable CMC and ISC */
460 if (hpsb_disable_irm
)
463 buf
|= 0x80000000; /* Enable IRMC */
464 buf
&= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
465 buf
&= ~0x18000000; /* Disable PMC and BMC */
466 reg_write(ohci
, OHCI1394_BusOptions
, buf
);
468 /* Set the bus number */
469 reg_write(ohci
, OHCI1394_NodeID
, 0x0000ffc0);
471 /* Enable posted writes */
472 reg_write(ohci
, OHCI1394_HCControlSet
, OHCI1394_HCControl_postedWriteEnable
);
474 /* Clear link control register */
475 reg_write(ohci
, OHCI1394_LinkControlClear
, 0xffffffff);
477 /* Enable cycle timer and cycle master and set the IRM
478 * contender bit in our self ID packets if appropriate. */
479 reg_write(ohci
, OHCI1394_LinkControlSet
,
480 OHCI1394_LinkControl_CycleTimerEnable
|
481 OHCI1394_LinkControl_CycleMaster
);
482 i
= get_phy_reg(ohci
, 4) | PHY_04_LCTRL
;
483 if (hpsb_disable_irm
)
484 i
&= ~PHY_04_CONTENDER
;
486 i
|= PHY_04_CONTENDER
;
487 set_phy_reg(ohci
, 4, i
);
489 /* Set up self-id dma buffer */
490 reg_write(ohci
, OHCI1394_SelfIDBuffer
, ohci
->selfid_buf_bus
);
493 reg_write(ohci
, OHCI1394_LinkControlSet
, OHCI1394_LinkControl_RcvSelfID
);
495 /* Set the Config ROM mapping register */
496 reg_write(ohci
, OHCI1394_ConfigROMmap
, ohci
->csr_config_rom_bus
);
498 /* Now get our max packet size */
499 ohci
->max_packet_size
=
500 1<<(((reg_read(ohci
, OHCI1394_BusOptions
)>>12)&0xf)+1);
502 /* Clear the interrupt mask */
503 reg_write(ohci
, OHCI1394_IsoRecvIntMaskClear
, 0xffffffff);
504 reg_write(ohci
, OHCI1394_IsoRecvIntEventClear
, 0xffffffff);
506 /* Clear the interrupt mask */
507 reg_write(ohci
, OHCI1394_IsoXmitIntMaskClear
, 0xffffffff);
508 reg_write(ohci
, OHCI1394_IsoXmitIntEventClear
, 0xffffffff);
510 /* Initialize AR dma */
511 initialize_dma_rcv_ctx(&ohci
->ar_req_context
, 0);
512 initialize_dma_rcv_ctx(&ohci
->ar_resp_context
, 0);
514 /* Initialize AT dma */
515 initialize_dma_trm_ctx(&ohci
->at_req_context
);
516 initialize_dma_trm_ctx(&ohci
->at_resp_context
);
518 /* Accept AR requests from all nodes */
519 reg_write(ohci
, OHCI1394_AsReqFilterHiSet
, 0x80000000);
521 /* Set the address range of the physical response unit.
522 * Most controllers do not implement it as a writable register though.
523 * They will keep a hardwired offset of 0x00010000 and show 0x0 as
525 * To actually enable physical responses is the job of our interrupt
526 * handler which programs the physical request filter. */
527 reg_write(ohci
, OHCI1394_PhyUpperBound
,
528 OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED
>> 16);
530 DBGMSG("physUpperBoundOffset=%08x",
531 reg_read(ohci
, OHCI1394_PhyUpperBound
));
533 /* Specify AT retries */
534 reg_write(ohci
, OHCI1394_ATRetries
,
535 OHCI1394_MAX_AT_REQ_RETRIES
|
536 (OHCI1394_MAX_AT_RESP_RETRIES
<<4) |
537 (OHCI1394_MAX_PHYS_RESP_RETRIES
<<8));
539 /* We don't want hardware swapping */
540 reg_write(ohci
, OHCI1394_HCControlClear
, OHCI1394_HCControl_noByteSwap
);
542 /* Enable interrupts */
543 reg_write(ohci
, OHCI1394_IntMaskSet
,
544 OHCI1394_unrecoverableError
|
545 OHCI1394_masterIntEnable
|
547 OHCI1394_selfIDComplete
|
550 OHCI1394_respTxComplete
|
551 OHCI1394_reqTxComplete
|
554 OHCI1394_postedWriteErr
|
555 OHCI1394_cycleTooLong
|
556 OHCI1394_cycleInconsistent
);
559 reg_write(ohci
, OHCI1394_HCControlSet
, OHCI1394_HCControl_linkEnable
);
561 buf
= reg_read(ohci
, OHCI1394_Version
);
562 PRINT(KERN_INFO
, "OHCI-1394 %d.%d (PCI): IRQ=[%d] "
563 "MMIO=[%llx-%llx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
564 ((((buf
) >> 16) & 0xf) + (((buf
) >> 20) & 0xf) * 10),
565 ((((buf
) >> 4) & 0xf) + ((buf
) & 0xf) * 10), ohci
->dev
->irq
,
566 (unsigned long long)pci_resource_start(ohci
->dev
, 0),
567 (unsigned long long)pci_resource_start(ohci
->dev
, 0) + OHCI1394_REGISTER_SIZE
- 1,
568 ohci
->max_packet_size
,
569 ohci
->nb_iso_rcv_ctx
, ohci
->nb_iso_xmit_ctx
);
571 /* Check all of our ports to make sure that if anything is
572 * connected, we enable that port. */
573 num_ports
= get_phy_reg(ohci
, 2) & 0xf;
574 for (i
= 0; i
< num_ports
; i
++) {
577 set_phy_reg(ohci
, 7, i
);
578 status
= get_phy_reg(ohci
, 8);
581 set_phy_reg(ohci
, 8, status
& ~1);
584 /* Serial EEPROM Sanity check. */
585 if ((ohci
->max_packet_size
< 512) ||
586 (ohci
->max_packet_size
> 4096)) {
587 /* Serial EEPROM contents are suspect, set a sane max packet
588 * size and print the raw contents for bug reports if verbose
589 * debug is enabled. */
590 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
594 PRINT(KERN_DEBUG
, "Serial EEPROM has suspicious values, "
595 "attempting to set max_packet_size to 512 bytes");
596 reg_write(ohci
, OHCI1394_BusOptions
,
597 (reg_read(ohci
, OHCI1394_BusOptions
) & 0xf007) | 0x8002);
598 ohci
->max_packet_size
= 512;
599 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
600 PRINT(KERN_DEBUG
, " EEPROM Present: %d",
601 (reg_read(ohci
, OHCI1394_Version
) >> 24) & 0x1);
602 reg_write(ohci
, OHCI1394_GUID_ROM
, 0x80000000);
606 (reg_read(ohci
, OHCI1394_GUID_ROM
) & 0x80000000)); i
++)
609 for (i
= 0; i
< 0x20; i
++) {
610 reg_write(ohci
, OHCI1394_GUID_ROM
, 0x02000000);
611 PRINT(KERN_DEBUG
, " EEPROM %02x: %02x", i
,
612 (reg_read(ohci
, OHCI1394_GUID_ROM
) >> 16) & 0xff);
619 * Insert a packet in the DMA fifo and generate the DMA prg
620 * FIXME: rewrite the program in order to accept packets crossing
622 * check also that a single dma descriptor doesn't cross a
625 static void insert_packet(struct ti_ohci
*ohci
,
626 struct dma_trm_ctx
*d
, struct hpsb_packet
*packet
)
629 int idx
= d
->prg_ind
;
631 DBGMSG("Inserting packet for node " NODE_BUS_FMT
632 ", tlabel=%d, tcode=0x%x, speed=%d",
633 NODE_BUS_ARGS(ohci
->host
, packet
->node_id
), packet
->tlabel
,
634 packet
->tcode
, packet
->speed_code
);
636 d
->prg_cpu
[idx
]->begin
.address
= 0;
637 d
->prg_cpu
[idx
]->begin
.branchAddress
= 0;
639 if (d
->type
== DMA_CTX_ASYNC_RESP
) {
641 * For response packets, we need to put a timeout value in
642 * the 16 lower bits of the status... let's try 1 sec timeout
644 cycleTimer
= reg_read(ohci
, OHCI1394_IsochronousCycleTimer
);
645 d
->prg_cpu
[idx
]->begin
.status
= cpu_to_le32(
646 (((((cycleTimer
>>25)&0x7)+1)&0x7)<<13) |
647 ((cycleTimer
&0x01fff000)>>12));
649 DBGMSG("cycleTimer: %08x timeStamp: %08x",
650 cycleTimer
, d
->prg_cpu
[idx
]->begin
.status
);
652 d
->prg_cpu
[idx
]->begin
.status
= 0;
654 if ( (packet
->type
== hpsb_async
) || (packet
->type
== hpsb_raw
) ) {
656 if (packet
->type
== hpsb_raw
) {
657 d
->prg_cpu
[idx
]->data
[0] = cpu_to_le32(OHCI1394_TCODE_PHY
<<4);
658 d
->prg_cpu
[idx
]->data
[1] = cpu_to_le32(packet
->header
[0]);
659 d
->prg_cpu
[idx
]->data
[2] = cpu_to_le32(packet
->header
[1]);
661 d
->prg_cpu
[idx
]->data
[0] = packet
->speed_code
<<16 |
662 (packet
->header
[0] & 0xFFFF);
664 if (packet
->tcode
== TCODE_ISO_DATA
) {
665 /* Sending an async stream packet */
666 d
->prg_cpu
[idx
]->data
[1] = packet
->header
[0] & 0xFFFF0000;
668 /* Sending a normal async request or response */
669 d
->prg_cpu
[idx
]->data
[1] =
670 (packet
->header
[1] & 0xFFFF) |
671 (packet
->header
[0] & 0xFFFF0000);
672 d
->prg_cpu
[idx
]->data
[2] = packet
->header
[2];
673 d
->prg_cpu
[idx
]->data
[3] = packet
->header
[3];
675 header_le32_to_cpu(d
->prg_cpu
[idx
]->data
, packet
->tcode
);
678 if (packet
->data_size
) { /* block transmit */
679 if (packet
->tcode
== TCODE_STREAM_DATA
){
680 d
->prg_cpu
[idx
]->begin
.control
=
681 cpu_to_le32(DMA_CTL_OUTPUT_MORE
|
682 DMA_CTL_IMMEDIATE
| 0x8);
684 d
->prg_cpu
[idx
]->begin
.control
=
685 cpu_to_le32(DMA_CTL_OUTPUT_MORE
|
686 DMA_CTL_IMMEDIATE
| 0x10);
688 d
->prg_cpu
[idx
]->end
.control
=
689 cpu_to_le32(DMA_CTL_OUTPUT_LAST
|
694 * Check that the packet data buffer
695 * does not cross a page boundary.
697 * XXX Fix this some day. eth1394 seems to trigger
698 * it, but ignoring it doesn't seem to cause a
702 if (cross_bound((unsigned long)packet
->data
,
703 packet
->data_size
)>0) {
704 /* FIXME: do something about it */
706 "%s: packet data addr: %p size %Zd bytes "
707 "cross page boundary", __func__
,
708 packet
->data
, packet
->data_size
);
711 d
->prg_cpu
[idx
]->end
.address
= cpu_to_le32(
712 pci_map_single(ohci
->dev
, packet
->data
,
716 d
->prg_cpu
[idx
]->end
.branchAddress
= 0;
717 d
->prg_cpu
[idx
]->end
.status
= 0;
718 if (d
->branchAddrPtr
)
719 *(d
->branchAddrPtr
) =
720 cpu_to_le32(d
->prg_bus
[idx
] | 0x3);
722 &(d
->prg_cpu
[idx
]->end
.branchAddress
);
723 } else { /* quadlet transmit */
724 if (packet
->type
== hpsb_raw
)
725 d
->prg_cpu
[idx
]->begin
.control
=
726 cpu_to_le32(DMA_CTL_OUTPUT_LAST
|
730 (packet
->header_size
+ 4));
732 d
->prg_cpu
[idx
]->begin
.control
=
733 cpu_to_le32(DMA_CTL_OUTPUT_LAST
|
737 packet
->header_size
);
739 if (d
->branchAddrPtr
)
740 *(d
->branchAddrPtr
) =
741 cpu_to_le32(d
->prg_bus
[idx
] | 0x2);
743 &(d
->prg_cpu
[idx
]->begin
.branchAddress
);
746 } else { /* iso packet */
747 d
->prg_cpu
[idx
]->data
[0] = packet
->speed_code
<<16 |
748 (packet
->header
[0] & 0xFFFF);
749 d
->prg_cpu
[idx
]->data
[1] = packet
->header
[0] & 0xFFFF0000;
750 header_le32_to_cpu(d
->prg_cpu
[idx
]->data
, packet
->tcode
);
752 d
->prg_cpu
[idx
]->begin
.control
=
753 cpu_to_le32(DMA_CTL_OUTPUT_MORE
|
754 DMA_CTL_IMMEDIATE
| 0x8);
755 d
->prg_cpu
[idx
]->end
.control
=
756 cpu_to_le32(DMA_CTL_OUTPUT_LAST
|
761 d
->prg_cpu
[idx
]->end
.address
= cpu_to_le32(
762 pci_map_single(ohci
->dev
, packet
->data
,
763 packet
->data_size
, PCI_DMA_TODEVICE
));
765 d
->prg_cpu
[idx
]->end
.branchAddress
= 0;
766 d
->prg_cpu
[idx
]->end
.status
= 0;
767 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
768 " begin=%08x %08x %08x %08x\n"
769 " %08x %08x %08x %08x\n"
770 " end =%08x %08x %08x %08x",
771 d
->prg_cpu
[idx
]->data
[0], d
->prg_cpu
[idx
]->data
[1],
772 d
->prg_cpu
[idx
]->begin
.control
,
773 d
->prg_cpu
[idx
]->begin
.address
,
774 d
->prg_cpu
[idx
]->begin
.branchAddress
,
775 d
->prg_cpu
[idx
]->begin
.status
,
776 d
->prg_cpu
[idx
]->data
[0],
777 d
->prg_cpu
[idx
]->data
[1],
778 d
->prg_cpu
[idx
]->data
[2],
779 d
->prg_cpu
[idx
]->data
[3],
780 d
->prg_cpu
[idx
]->end
.control
,
781 d
->prg_cpu
[idx
]->end
.address
,
782 d
->prg_cpu
[idx
]->end
.branchAddress
,
783 d
->prg_cpu
[idx
]->end
.status
);
784 if (d
->branchAddrPtr
)
785 *(d
->branchAddrPtr
) = cpu_to_le32(d
->prg_bus
[idx
] | 0x3);
786 d
->branchAddrPtr
= &(d
->prg_cpu
[idx
]->end
.branchAddress
);
790 /* queue the packet in the appropriate context queue */
791 list_add_tail(&packet
->driver_list
, &d
->fifo_list
);
792 d
->prg_ind
= (d
->prg_ind
+ 1) % d
->num_desc
;
796 * This function fills the FIFO with the (eventual) pending packets
797 * and runs or wakes up the DMA prg if necessary.
799 * The function MUST be called with the d->lock held.
801 static void dma_trm_flush(struct ti_ohci
*ohci
, struct dma_trm_ctx
*d
)
803 struct hpsb_packet
*packet
, *ptmp
;
804 int idx
= d
->prg_ind
;
807 /* insert the packets into the dma fifo */
808 list_for_each_entry_safe(packet
, ptmp
, &d
->pending_list
, driver_list
) {
812 /* For the first packet only */
814 z
= (packet
->data_size
) ? 3 : 2;
816 /* Insert the packet */
817 list_del_init(&packet
->driver_list
);
818 insert_packet(ohci
, d
, packet
);
821 /* Nothing must have been done, either no free_prgs or no packets */
825 /* Is the context running ? (should be unless it is
826 the first packet to be sent in this context) */
827 if (!(reg_read(ohci
, d
->ctrlSet
) & 0x8000)) {
828 u32 nodeId
= reg_read(ohci
, OHCI1394_NodeID
);
830 DBGMSG("Starting transmit DMA ctx=%d",d
->ctx
);
831 reg_write(ohci
, d
->cmdPtr
, d
->prg_bus
[idx
] | z
);
833 /* Check that the node id is valid, and not 63 */
834 if (!(nodeId
& 0x80000000) || (nodeId
& 0x3f) == 63)
835 PRINT(KERN_ERR
, "Running dma failed because Node ID is not valid");
837 reg_write(ohci
, d
->ctrlSet
, 0x8000);
839 /* Wake up the dma context if necessary */
840 if (!(reg_read(ohci
, d
->ctrlSet
) & 0x400))
841 DBGMSG("Waking transmit DMA ctx=%d",d
->ctx
);
843 /* do this always, to avoid race condition */
844 reg_write(ohci
, d
->ctrlSet
, 0x1000);
850 /* Transmission of an async or iso packet */
851 static int ohci_transmit(struct hpsb_host
*host
, struct hpsb_packet
*packet
)
853 struct ti_ohci
*ohci
= host
->hostdata
;
854 struct dma_trm_ctx
*d
;
857 if (packet
->data_size
> ohci
->max_packet_size
) {
859 "Transmit packet size %Zd is too big",
864 if (packet
->type
== hpsb_raw
)
865 d
= &ohci
->at_req_context
;
866 else if ((packet
->tcode
& 0x02) && (packet
->tcode
!= TCODE_ISO_DATA
))
867 d
= &ohci
->at_resp_context
;
869 d
= &ohci
->at_req_context
;
871 spin_lock_irqsave(&d
->lock
,flags
);
873 list_add_tail(&packet
->driver_list
, &d
->pending_list
);
875 dma_trm_flush(ohci
, d
);
877 spin_unlock_irqrestore(&d
->lock
,flags
);
882 static int ohci_devctl(struct hpsb_host
*host
, enum devctl_cmd cmd
, int arg
)
884 struct ti_ohci
*ohci
= host
->hostdata
;
885 int retval
= 0, phy_reg
;
891 phy_reg
= get_phy_reg(ohci
, 5);
893 set_phy_reg(ohci
, 5, phy_reg
); /* set ISBR */
896 phy_reg
= get_phy_reg(ohci
, 1);
898 set_phy_reg(ohci
, 1, phy_reg
); /* set IBR */
900 case SHORT_RESET_NO_FORCE_ROOT
:
901 phy_reg
= get_phy_reg(ohci
, 1);
902 if (phy_reg
& 0x80) {
904 set_phy_reg(ohci
, 1, phy_reg
); /* clear RHB */
907 phy_reg
= get_phy_reg(ohci
, 5);
909 set_phy_reg(ohci
, 5, phy_reg
); /* set ISBR */
911 case LONG_RESET_NO_FORCE_ROOT
:
912 phy_reg
= get_phy_reg(ohci
, 1);
915 set_phy_reg(ohci
, 1, phy_reg
); /* clear RHB, set IBR */
917 case SHORT_RESET_FORCE_ROOT
:
918 phy_reg
= get_phy_reg(ohci
, 1);
919 if (!(phy_reg
& 0x80)) {
921 set_phy_reg(ohci
, 1, phy_reg
); /* set RHB */
924 phy_reg
= get_phy_reg(ohci
, 5);
926 set_phy_reg(ohci
, 5, phy_reg
); /* set ISBR */
928 case LONG_RESET_FORCE_ROOT
:
929 phy_reg
= get_phy_reg(ohci
, 1);
931 set_phy_reg(ohci
, 1, phy_reg
); /* set RHB and IBR */
938 case GET_CYCLE_COUNTER
:
939 retval
= reg_read(ohci
, OHCI1394_IsochronousCycleTimer
);
942 case SET_CYCLE_COUNTER
:
943 reg_write(ohci
, OHCI1394_IsochronousCycleTimer
, arg
);
947 PRINT(KERN_ERR
, "devctl command SET_BUS_ID err");
950 case ACT_CYCLE_MASTER
:
952 /* check if we are root and other nodes are present */
953 u32 nodeId
= reg_read(ohci
, OHCI1394_NodeID
);
954 if ((nodeId
& (1<<30)) && (nodeId
& 0x3f)) {
956 * enable cycleTimer, cycleMaster
958 DBGMSG("Cycle master enabled");
959 reg_write(ohci
, OHCI1394_LinkControlSet
,
960 OHCI1394_LinkControl_CycleTimerEnable
|
961 OHCI1394_LinkControl_CycleMaster
);
964 /* disable cycleTimer, cycleMaster, cycleSource */
965 reg_write(ohci
, OHCI1394_LinkControlClear
,
966 OHCI1394_LinkControl_CycleTimerEnable
|
967 OHCI1394_LinkControl_CycleMaster
|
968 OHCI1394_LinkControl_CycleSource
);
972 case CANCEL_REQUESTS
:
973 DBGMSG("Cancel request received");
974 dma_trm_reset(&ohci
->at_req_context
);
975 dma_trm_reset(&ohci
->at_resp_context
);
979 PRINT_G(KERN_ERR
, "ohci_devctl cmd %d not implemented yet",
986 /***********************************
987 * rawiso ISO reception *
988 ***********************************/
991 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
992 buffer is split into "blocks" (regions described by one DMA
993 descriptor). Each block must be one page or less in size, and
994 must not cross a page boundary.
996 There is one little wrinkle with buffer-fill mode: a packet that
997 starts in the final block may wrap around into the first block. But
998 the user API expects all packets to be contiguous. Our solution is
999 to keep the very last page of the DMA buffer in reserve - if a
1000 packet spans the gap, we copy its tail into this page.
1003 struct ohci_iso_recv
{
1004 struct ti_ohci
*ohci
;
1006 struct ohci1394_iso_tasklet task
;
1009 enum { BUFFER_FILL_MODE
= 0,
1010 PACKET_PER_BUFFER_MODE
= 1 } dma_mode
;
1012 /* memory and PCI mapping for the DMA descriptors */
1013 struct dma_prog_region prog
;
1014 struct dma_cmd
*block
; /* = (struct dma_cmd*) prog.virt */
1016 /* how many DMA blocks fit in the buffer */
1017 unsigned int nblocks
;
1019 /* stride of DMA blocks */
1020 unsigned int buf_stride
;
1022 /* number of blocks to batch between interrupts */
1023 int block_irq_interval
;
1025 /* block that DMA will finish next */
1028 /* (buffer-fill only) block that the reader will release next */
1031 /* (buffer-fill only) bytes of buffer the reader has released,
1032 less than one block */
1035 /* (buffer-fill only) buffer offset at which the next packet will appear */
1038 /* OHCI DMA context control registers */
1039 u32 ContextControlSet
;
1040 u32 ContextControlClear
;
1045 static void ohci_iso_recv_task(unsigned long data
);
1046 static void ohci_iso_recv_stop(struct hpsb_iso
*iso
);
1047 static void ohci_iso_recv_shutdown(struct hpsb_iso
*iso
);
1048 static int ohci_iso_recv_start(struct hpsb_iso
*iso
, int cycle
, int tag_mask
, int sync
);
1049 static void ohci_iso_recv_program(struct hpsb_iso
*iso
);
1051 static int ohci_iso_recv_init(struct hpsb_iso
*iso
)
1053 struct ti_ohci
*ohci
= iso
->host
->hostdata
;
1054 struct ohci_iso_recv
*recv
;
1058 recv
= kmalloc(sizeof(*recv
), GFP_KERNEL
);
1062 iso
->hostdata
= recv
;
1064 recv
->task_active
= 0;
1065 dma_prog_region_init(&recv
->prog
);
1068 /* use buffer-fill mode, unless irq_interval is 1
1069 (note: multichannel requires buffer-fill) */
1071 if (((iso
->irq_interval
== 1 && iso
->dma_mode
== HPSB_ISO_DMA_OLD_ABI
) ||
1072 iso
->dma_mode
== HPSB_ISO_DMA_PACKET_PER_BUFFER
) && iso
->channel
!= -1) {
1073 recv
->dma_mode
= PACKET_PER_BUFFER_MODE
;
1075 recv
->dma_mode
= BUFFER_FILL_MODE
;
1078 /* set nblocks, buf_stride, block_irq_interval */
1080 if (recv
->dma_mode
== BUFFER_FILL_MODE
) {
1081 recv
->buf_stride
= PAGE_SIZE
;
1083 /* one block per page of data in the DMA buffer, minus the final guard page */
1084 recv
->nblocks
= iso
->buf_size
/PAGE_SIZE
- 1;
1085 if (recv
->nblocks
< 3) {
1086 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1090 /* iso->irq_interval is in packets - translate that to blocks */
1091 if (iso
->irq_interval
== 1)
1092 recv
->block_irq_interval
= 1;
1094 recv
->block_irq_interval
= iso
->irq_interval
*
1095 ((recv
->nblocks
+1)/iso
->buf_packets
);
1096 if (recv
->block_irq_interval
*4 > recv
->nblocks
)
1097 recv
->block_irq_interval
= recv
->nblocks
/4;
1098 if (recv
->block_irq_interval
< 1)
1099 recv
->block_irq_interval
= 1;
1102 int max_packet_size
;
1104 recv
->nblocks
= iso
->buf_packets
;
1105 recv
->block_irq_interval
= iso
->irq_interval
;
1106 if (recv
->block_irq_interval
* 4 > iso
->buf_packets
)
1107 recv
->block_irq_interval
= iso
->buf_packets
/ 4;
1108 if (recv
->block_irq_interval
< 1)
1109 recv
->block_irq_interval
= 1;
1111 /* choose a buffer stride */
1112 /* must be a power of 2, and <= PAGE_SIZE */
1114 max_packet_size
= iso
->buf_size
/ iso
->buf_packets
;
1116 for (recv
->buf_stride
= 8; recv
->buf_stride
< max_packet_size
;
1117 recv
->buf_stride
*= 2);
1119 if (recv
->buf_stride
*iso
->buf_packets
> iso
->buf_size
||
1120 recv
->buf_stride
> PAGE_SIZE
) {
1121 /* this shouldn't happen, but anyway... */
1122 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1127 recv
->block_reader
= 0;
1128 recv
->released_bytes
= 0;
1129 recv
->block_dma
= 0;
1130 recv
->dma_offset
= 0;
1132 /* size of DMA program = one descriptor per block */
1133 if (dma_prog_region_alloc(&recv
->prog
,
1134 sizeof(struct dma_cmd
) * recv
->nblocks
,
1138 recv
->block
= (struct dma_cmd
*) recv
->prog
.kvirt
;
1140 ohci1394_init_iso_tasklet(&recv
->task
,
1141 iso
->channel
== -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE
:
1143 ohci_iso_recv_task
, (unsigned long) iso
);
1145 if (ohci1394_register_iso_tasklet(recv
->ohci
, &recv
->task
) < 0) {
1150 recv
->task_active
= 1;
1152 /* recv context registers are spaced 32 bytes apart */
1153 ctx
= recv
->task
.context
;
1154 recv
->ContextControlSet
= OHCI1394_IsoRcvContextControlSet
+ 32 * ctx
;
1155 recv
->ContextControlClear
= OHCI1394_IsoRcvContextControlClear
+ 32 * ctx
;
1156 recv
->CommandPtr
= OHCI1394_IsoRcvCommandPtr
+ 32 * ctx
;
1157 recv
->ContextMatch
= OHCI1394_IsoRcvContextMatch
+ 32 * ctx
;
1159 if (iso
->channel
== -1) {
1160 /* clear multi-channel selection mask */
1161 reg_write(recv
->ohci
, OHCI1394_IRMultiChanMaskHiClear
, 0xFFFFFFFF);
1162 reg_write(recv
->ohci
, OHCI1394_IRMultiChanMaskLoClear
, 0xFFFFFFFF);
1165 /* write the DMA program */
1166 ohci_iso_recv_program(iso
);
1168 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1169 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1170 recv
->dma_mode
== BUFFER_FILL_MODE
?
1171 "buffer-fill" : "packet-per-buffer",
1172 iso
->buf_size
/PAGE_SIZE
, iso
->buf_size
,
1173 recv
->nblocks
, recv
->buf_stride
, recv
->block_irq_interval
);
1178 ohci_iso_recv_shutdown(iso
);
1182 static void ohci_iso_recv_stop(struct hpsb_iso
*iso
)
1184 struct ohci_iso_recv
*recv
= iso
->hostdata
;
1186 /* disable interrupts */
1187 reg_write(recv
->ohci
, OHCI1394_IsoRecvIntMaskClear
, 1 << recv
->task
.context
);
1190 ohci1394_stop_context(recv
->ohci
, recv
->ContextControlClear
, NULL
);
1193 static void ohci_iso_recv_shutdown(struct hpsb_iso
*iso
)
1195 struct ohci_iso_recv
*recv
= iso
->hostdata
;
1197 if (recv
->task_active
) {
1198 ohci_iso_recv_stop(iso
);
1199 ohci1394_unregister_iso_tasklet(recv
->ohci
, &recv
->task
);
1200 recv
->task_active
= 0;
1203 dma_prog_region_free(&recv
->prog
);
1205 iso
->hostdata
= NULL
;
1208 /* set up a "gapped" ring buffer DMA program */
1209 static void ohci_iso_recv_program(struct hpsb_iso
*iso
)
1211 struct ohci_iso_recv
*recv
= iso
->hostdata
;
1214 /* address of 'branch' field in previous DMA descriptor */
1215 u32
*prev_branch
= NULL
;
1217 for (blk
= 0; blk
< recv
->nblocks
; blk
++) {
1220 /* the DMA descriptor */
1221 struct dma_cmd
*cmd
= &recv
->block
[blk
];
1223 /* offset of the DMA descriptor relative to the DMA prog buffer */
1224 unsigned long prog_offset
= blk
* sizeof(struct dma_cmd
);
1226 /* offset of this packet's data within the DMA buffer */
1227 unsigned long buf_offset
= blk
* recv
->buf_stride
;
1229 if (recv
->dma_mode
== BUFFER_FILL_MODE
) {
1230 control
= 2 << 28; /* INPUT_MORE */
1232 control
= 3 << 28; /* INPUT_LAST */
1235 control
|= 8 << 24; /* s = 1, update xferStatus and resCount */
1237 /* interrupt on last block, and at intervals */
1238 if (blk
== recv
->nblocks
-1 || (blk
% recv
->block_irq_interval
) == 0) {
1239 control
|= 3 << 20; /* want interrupt */
1242 control
|= 3 << 18; /* enable branch to address */
1243 control
|= recv
->buf_stride
;
1245 cmd
->control
= cpu_to_le32(control
);
1246 cmd
->address
= cpu_to_le32(dma_region_offset_to_bus(&iso
->data_buf
, buf_offset
));
1247 cmd
->branchAddress
= 0; /* filled in on next loop */
1248 cmd
->status
= cpu_to_le32(recv
->buf_stride
);
1250 /* link the previous descriptor to this one */
1252 *prev_branch
= cpu_to_le32(dma_prog_region_offset_to_bus(&recv
->prog
, prog_offset
) | 1);
1255 prev_branch
= &cmd
->branchAddress
;
1258 /* the final descriptor's branch address and Z should be left at 0 */
1261 /* listen or unlisten to a specific channel (multi-channel mode only) */
1262 static void ohci_iso_recv_change_channel(struct hpsb_iso
*iso
, unsigned char channel
, int listen
)
1264 struct ohci_iso_recv
*recv
= iso
->hostdata
;
1268 reg
= listen
? OHCI1394_IRMultiChanMaskLoSet
: OHCI1394_IRMultiChanMaskLoClear
;
1271 reg
= listen
? OHCI1394_IRMultiChanMaskHiSet
: OHCI1394_IRMultiChanMaskHiClear
;
1275 reg_write(recv
->ohci
, reg
, (1 << i
));
1277 /* issue a dummy read to force all PCI writes to be posted immediately */
1279 reg_read(recv
->ohci
, OHCI1394_IsochronousCycleTimer
);
1282 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso
*iso
, u64 mask
)
1284 struct ohci_iso_recv
*recv
= iso
->hostdata
;
1287 for (i
= 0; i
< 64; i
++) {
1288 if (mask
& (1ULL << i
)) {
1290 reg_write(recv
->ohci
, OHCI1394_IRMultiChanMaskLoSet
, (1 << i
));
1292 reg_write(recv
->ohci
, OHCI1394_IRMultiChanMaskHiSet
, (1 << (i
-32)));
1295 reg_write(recv
->ohci
, OHCI1394_IRMultiChanMaskLoClear
, (1 << i
));
1297 reg_write(recv
->ohci
, OHCI1394_IRMultiChanMaskHiClear
, (1 << (i
-32)));
1301 /* issue a dummy read to force all PCI writes to be posted immediately */
1303 reg_read(recv
->ohci
, OHCI1394_IsochronousCycleTimer
);
1306 static int ohci_iso_recv_start(struct hpsb_iso
*iso
, int cycle
, int tag_mask
, int sync
)
1308 struct ohci_iso_recv
*recv
= iso
->hostdata
;
1309 struct ti_ohci
*ohci
= recv
->ohci
;
1310 u32 command
, contextMatch
;
1312 reg_write(recv
->ohci
, recv
->ContextControlClear
, 0xFFFFFFFF);
1315 /* always keep ISO headers */
1316 command
= (1 << 30);
1318 if (recv
->dma_mode
== BUFFER_FILL_MODE
)
1319 command
|= (1 << 31);
1321 reg_write(recv
->ohci
, recv
->ContextControlSet
, command
);
1323 /* match on specified tags */
1324 contextMatch
= tag_mask
<< 28;
1326 if (iso
->channel
== -1) {
1327 /* enable multichannel reception */
1328 reg_write(recv
->ohci
, recv
->ContextControlSet
, (1 << 28));
1330 /* listen on channel */
1331 contextMatch
|= iso
->channel
;
1337 /* enable cycleMatch */
1338 reg_write(recv
->ohci
, recv
->ContextControlSet
, (1 << 29));
1340 /* set starting cycle */
1343 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1344 just snarf them from the current time */
1345 seconds
= reg_read(recv
->ohci
, OHCI1394_IsochronousCycleTimer
) >> 25;
1347 /* advance one second to give some extra time for DMA to start */
1350 cycle
|= (seconds
& 3) << 13;
1352 contextMatch
|= cycle
<< 12;
1356 /* set sync flag on first DMA descriptor */
1357 struct dma_cmd
*cmd
= &recv
->block
[recv
->block_dma
];
1358 cmd
->control
|= cpu_to_le32(DMA_CTL_WAIT
);
1360 /* match sync field */
1361 contextMatch
|= (sync
&0xf)<<8;
1364 reg_write(recv
->ohci
, recv
->ContextMatch
, contextMatch
);
1366 /* address of first descriptor block */
1367 command
= dma_prog_region_offset_to_bus(&recv
->prog
,
1368 recv
->block_dma
* sizeof(struct dma_cmd
));
1369 command
|= 1; /* Z=1 */
1371 reg_write(recv
->ohci
, recv
->CommandPtr
, command
);
1373 /* enable interrupts */
1374 reg_write(recv
->ohci
, OHCI1394_IsoRecvIntMaskSet
, 1 << recv
->task
.context
);
1379 reg_write(recv
->ohci
, recv
->ContextControlSet
, 0x8000);
1381 /* issue a dummy read of the cycle timer register to force
1382 all PCI writes to be posted immediately */
1384 reg_read(recv
->ohci
, OHCI1394_IsochronousCycleTimer
);
1387 if (!(reg_read(recv
->ohci
, recv
->ContextControlSet
) & 0x8000)) {
1389 "Error starting IR DMA (ContextControl 0x%08x)\n",
1390 reg_read(recv
->ohci
, recv
->ContextControlSet
));
1397 static void ohci_iso_recv_release_block(struct ohci_iso_recv
*recv
, int block
)
1399 /* re-use the DMA descriptor for the block */
1400 /* by linking the previous descriptor to it */
1403 int prev_i
= (next_i
== 0) ? (recv
->nblocks
- 1) : (next_i
- 1);
1405 struct dma_cmd
*next
= &recv
->block
[next_i
];
1406 struct dma_cmd
*prev
= &recv
->block
[prev_i
];
1408 /* ignore out-of-range requests */
1409 if ((block
< 0) || (block
> recv
->nblocks
))
1412 /* 'next' becomes the new end of the DMA chain,
1413 so disable branch and enable interrupt */
1414 next
->branchAddress
= 0;
1415 next
->control
|= cpu_to_le32(3 << 20);
1416 next
->status
= cpu_to_le32(recv
->buf_stride
);
1418 /* link prev to next */
1419 prev
->branchAddress
= cpu_to_le32(dma_prog_region_offset_to_bus(&recv
->prog
,
1420 sizeof(struct dma_cmd
) * next_i
)
1423 /* disable interrupt on previous DMA descriptor, except at intervals */
1424 if ((prev_i
% recv
->block_irq_interval
) == 0) {
1425 prev
->control
|= cpu_to_le32(3 << 20); /* enable interrupt */
1427 prev
->control
&= cpu_to_le32(~(3<<20)); /* disable interrupt */
1431 /* wake up DMA in case it fell asleep */
1432 reg_write(recv
->ohci
, recv
->ContextControlSet
, (1 << 12));
1435 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv
*recv
,
1436 struct hpsb_iso_packet_info
*info
)
1438 /* release the memory where the packet was */
1439 recv
->released_bytes
+= info
->total_len
;
1441 /* have we released enough memory for one block? */
1442 while (recv
->released_bytes
> recv
->buf_stride
) {
1443 ohci_iso_recv_release_block(recv
, recv
->block_reader
);
1444 recv
->block_reader
= (recv
->block_reader
+ 1) % recv
->nblocks
;
1445 recv
->released_bytes
-= recv
->buf_stride
;
1449 static inline void ohci_iso_recv_release(struct hpsb_iso
*iso
, struct hpsb_iso_packet_info
*info
)
1451 struct ohci_iso_recv
*recv
= iso
->hostdata
;
1452 if (recv
->dma_mode
== BUFFER_FILL_MODE
) {
1453 ohci_iso_recv_bufferfill_release(recv
, info
);
1455 ohci_iso_recv_release_block(recv
, info
- iso
->infos
);
1459 /* parse all packets from blocks that have been fully received */
1460 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso
*iso
, struct ohci_iso_recv
*recv
)
1464 struct ti_ohci
*ohci
= recv
->ohci
;
1467 /* we expect the next parsable packet to begin at recv->dma_offset */
1468 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1470 unsigned int offset
;
1471 unsigned short len
, cycle
, total_len
;
1472 unsigned char channel
, tag
, sy
;
1474 unsigned char *p
= iso
->data_buf
.kvirt
;
1476 unsigned int this_block
= recv
->dma_offset
/recv
->buf_stride
;
1478 /* don't loop indefinitely */
1479 if (runaway
++ > 100000) {
1480 atomic_inc(&iso
->overflows
);
1482 "IR DMA error - Runaway during buffer parsing!\n");
1486 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1487 if (this_block
== recv
->block_dma
)
1492 /* parse data length, tag, channel, and sy */
1494 /* note: we keep our own local copies of 'len' and 'offset'
1495 so the user can't mess with them by poking in the mmap area */
1497 len
= p
[recv
->dma_offset
+2] | (p
[recv
->dma_offset
+3] << 8);
1501 "IR DMA error - bogus 'len' value %u\n", len
);
1504 channel
= p
[recv
->dma_offset
+1] & 0x3F;
1505 tag
= p
[recv
->dma_offset
+1] >> 6;
1506 sy
= p
[recv
->dma_offset
+0] & 0xF;
1508 /* advance to data payload */
1509 recv
->dma_offset
+= 4;
1511 /* check for wrap-around */
1512 if (recv
->dma_offset
>= recv
->buf_stride
*recv
->nblocks
) {
1513 recv
->dma_offset
-= recv
->buf_stride
*recv
->nblocks
;
1516 /* dma_offset now points to the first byte of the data payload */
1517 offset
= recv
->dma_offset
;
1519 /* advance to xferStatus/timeStamp */
1520 recv
->dma_offset
+= len
;
1522 total_len
= len
+ 8; /* 8 bytes header+trailer in OHCI packet */
1523 /* payload is padded to 4 bytes */
1525 recv
->dma_offset
+= 4 - (len
%4);
1526 total_len
+= 4 - (len
%4);
1529 /* check for wrap-around */
1530 if (recv
->dma_offset
>= recv
->buf_stride
*recv
->nblocks
) {
1531 /* uh oh, the packet data wraps from the last
1532 to the first DMA block - make the packet
1533 contiguous by copying its "tail" into the
1536 int guard_off
= recv
->buf_stride
*recv
->nblocks
;
1537 int tail_len
= len
- (guard_off
- offset
);
1539 if (tail_len
> 0 && tail_len
< recv
->buf_stride
) {
1540 memcpy(iso
->data_buf
.kvirt
+ guard_off
,
1541 iso
->data_buf
.kvirt
,
1545 recv
->dma_offset
-= recv
->buf_stride
*recv
->nblocks
;
1548 /* parse timestamp */
1549 cycle
= p
[recv
->dma_offset
+0] | (p
[recv
->dma_offset
+1]<<8);
1552 /* advance to next packet */
1553 recv
->dma_offset
+= 4;
1555 /* check for wrap-around */
1556 if (recv
->dma_offset
>= recv
->buf_stride
*recv
->nblocks
) {
1557 recv
->dma_offset
-= recv
->buf_stride
*recv
->nblocks
;
1560 hpsb_iso_packet_received(iso
, offset
, len
, total_len
, cycle
, channel
, tag
, sy
);
1567 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso
*iso
, struct ohci_iso_recv
*recv
)
1570 struct ti_ohci
*ohci
= recv
->ohci
;
1572 /* loop over all blocks */
1573 for (loop
= 0; loop
< recv
->nblocks
; loop
++) {
1575 /* check block_dma to see if it's done */
1576 struct dma_cmd
*im
= &recv
->block
[recv
->block_dma
];
1578 /* check the DMA descriptor for new writes to xferStatus */
1579 u16 xferstatus
= le32_to_cpu(im
->status
) >> 16;
1581 /* rescount is the number of bytes *remaining to be written* in the block */
1582 u16 rescount
= le32_to_cpu(im
->status
) & 0xFFFF;
1584 unsigned char event
= xferstatus
& 0x1F;
1587 /* nothing has happened to this block yet */
1591 if (event
!= 0x11) {
1592 atomic_inc(&iso
->overflows
);
1594 "IR DMA error - OHCI error code 0x%02x\n", event
);
1597 if (rescount
!= 0) {
1598 /* the card is still writing to this block;
1599 we can't touch it until it's done */
1603 /* OK, the block is finished... */
1605 /* sync our view of the block */
1606 dma_region_sync_for_cpu(&iso
->data_buf
, recv
->block_dma
*recv
->buf_stride
, recv
->buf_stride
);
1608 /* reset the DMA descriptor */
1609 im
->status
= recv
->buf_stride
;
1611 /* advance block_dma */
1612 recv
->block_dma
= (recv
->block_dma
+ 1) % recv
->nblocks
;
1614 if ((recv
->block_dma
+1) % recv
->nblocks
== recv
->block_reader
) {
1615 atomic_inc(&iso
->overflows
);
1616 DBGMSG("ISO reception overflow - "
1617 "ran out of DMA blocks");
1621 /* parse any packets that have arrived */
1622 ohci_iso_recv_bufferfill_parse(iso
, recv
);
1625 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso
*iso
, struct ohci_iso_recv
*recv
)
1629 struct ti_ohci
*ohci
= recv
->ohci
;
1631 /* loop over the entire buffer */
1632 for (count
= 0; count
< recv
->nblocks
; count
++) {
1635 /* pointer to the DMA descriptor */
1636 struct dma_cmd
*il
= ((struct dma_cmd
*) recv
->prog
.kvirt
) + iso
->pkt_dma
;
1638 /* check the DMA descriptor for new writes to xferStatus */
1639 u16 xferstatus
= le32_to_cpu(il
->status
) >> 16;
1640 u16 rescount
= le32_to_cpu(il
->status
) & 0xFFFF;
1642 unsigned char event
= xferstatus
& 0x1F;
1645 /* this packet hasn't come in yet; we are done for now */
1649 if (event
== 0x11) {
1650 /* packet received successfully! */
1652 /* rescount is the number of bytes *remaining* in the packet buffer,
1653 after the packet was written */
1654 packet_len
= recv
->buf_stride
- rescount
;
1656 } else if (event
== 0x02) {
1657 PRINT(KERN_ERR
, "IR DMA error - packet too long for buffer\n");
1659 PRINT(KERN_ERR
, "IR DMA error - OHCI error code 0x%02x\n", event
);
1662 /* sync our view of the buffer */
1663 dma_region_sync_for_cpu(&iso
->data_buf
, iso
->pkt_dma
* recv
->buf_stride
, recv
->buf_stride
);
1665 /* record the per-packet info */
1667 /* iso header is 8 bytes ahead of the data payload */
1670 unsigned int offset
;
1671 unsigned short cycle
;
1672 unsigned char channel
, tag
, sy
;
1674 offset
= iso
->pkt_dma
* recv
->buf_stride
;
1675 hdr
= iso
->data_buf
.kvirt
+ offset
;
1677 /* skip iso header */
1681 cycle
= (hdr
[0] | (hdr
[1] << 8)) & 0x1FFF;
1682 channel
= hdr
[5] & 0x3F;
1686 hpsb_iso_packet_received(iso
, offset
, packet_len
,
1687 recv
->buf_stride
, cycle
, channel
, tag
, sy
);
1690 /* reset the DMA descriptor */
1691 il
->status
= recv
->buf_stride
;
1694 recv
->block_dma
= iso
->pkt_dma
;
1702 static void ohci_iso_recv_task(unsigned long data
)
1704 struct hpsb_iso
*iso
= (struct hpsb_iso
*) data
;
1705 struct ohci_iso_recv
*recv
= iso
->hostdata
;
1707 if (recv
->dma_mode
== BUFFER_FILL_MODE
)
1708 ohci_iso_recv_bufferfill_task(iso
, recv
);
1710 ohci_iso_recv_packetperbuf_task(iso
, recv
);
1713 /***********************************
1714 * rawiso ISO transmission *
1715 ***********************************/
1717 struct ohci_iso_xmit
{
1718 struct ti_ohci
*ohci
;
1719 struct dma_prog_region prog
;
1720 struct ohci1394_iso_tasklet task
;
1725 u32 ContextControlSet
;
1726 u32 ContextControlClear
;
1730 /* transmission DMA program:
1731 one OUTPUT_MORE_IMMEDIATE for the IT header
1732 one OUTPUT_LAST for the buffer data */
1734 struct iso_xmit_cmd
{
1735 struct dma_cmd output_more_immediate
;
1738 struct dma_cmd output_last
;
1741 static int ohci_iso_xmit_init(struct hpsb_iso
*iso
);
1742 static int ohci_iso_xmit_start(struct hpsb_iso
*iso
, int cycle
);
1743 static void ohci_iso_xmit_shutdown(struct hpsb_iso
*iso
);
1744 static void ohci_iso_xmit_task(unsigned long data
);
1746 static int ohci_iso_xmit_init(struct hpsb_iso
*iso
)
1748 struct ohci_iso_xmit
*xmit
;
1749 unsigned int prog_size
;
1753 xmit
= kmalloc(sizeof(*xmit
), GFP_KERNEL
);
1757 iso
->hostdata
= xmit
;
1758 xmit
->ohci
= iso
->host
->hostdata
;
1759 xmit
->task_active
= 0;
1760 xmit
->last_cycle
= -1;
1761 atomic_set(&iso
->skips
, 0);
1763 dma_prog_region_init(&xmit
->prog
);
1765 prog_size
= sizeof(struct iso_xmit_cmd
) * iso
->buf_packets
;
1767 if (dma_prog_region_alloc(&xmit
->prog
, prog_size
, xmit
->ohci
->dev
))
1770 ohci1394_init_iso_tasklet(&xmit
->task
, OHCI_ISO_TRANSMIT
,
1771 ohci_iso_xmit_task
, (unsigned long) iso
);
1773 if (ohci1394_register_iso_tasklet(xmit
->ohci
, &xmit
->task
) < 0) {
1778 xmit
->task_active
= 1;
1780 /* xmit context registers are spaced 16 bytes apart */
1781 ctx
= xmit
->task
.context
;
1782 xmit
->ContextControlSet
= OHCI1394_IsoXmitContextControlSet
+ 16 * ctx
;
1783 xmit
->ContextControlClear
= OHCI1394_IsoXmitContextControlClear
+ 16 * ctx
;
1784 xmit
->CommandPtr
= OHCI1394_IsoXmitCommandPtr
+ 16 * ctx
;
1789 ohci_iso_xmit_shutdown(iso
);
1793 static void ohci_iso_xmit_stop(struct hpsb_iso
*iso
)
1795 struct ohci_iso_xmit
*xmit
= iso
->hostdata
;
1796 struct ti_ohci
*ohci
= xmit
->ohci
;
1798 /* disable interrupts */
1799 reg_write(xmit
->ohci
, OHCI1394_IsoXmitIntMaskClear
, 1 << xmit
->task
.context
);
1802 if (ohci1394_stop_context(xmit
->ohci
, xmit
->ContextControlClear
, NULL
)) {
1803 /* XXX the DMA context will lock up if you try to send too much data! */
1805 "you probably exceeded the OHCI card's bandwidth limit - "
1806 "reload the module and reduce xmit bandwidth");
1810 static void ohci_iso_xmit_shutdown(struct hpsb_iso
*iso
)
1812 struct ohci_iso_xmit
*xmit
= iso
->hostdata
;
1814 if (xmit
->task_active
) {
1815 ohci_iso_xmit_stop(iso
);
1816 ohci1394_unregister_iso_tasklet(xmit
->ohci
, &xmit
->task
);
1817 xmit
->task_active
= 0;
1820 dma_prog_region_free(&xmit
->prog
);
1822 iso
->hostdata
= NULL
;
1825 static void ohci_iso_xmit_task(unsigned long data
)
1827 struct hpsb_iso
*iso
= (struct hpsb_iso
*) data
;
1828 struct ohci_iso_xmit
*xmit
= iso
->hostdata
;
1829 struct ti_ohci
*ohci
= xmit
->ohci
;
1833 /* check the whole buffer if necessary, starting at pkt_dma */
1834 for (count
= 0; count
< iso
->buf_packets
; count
++) {
1837 /* DMA descriptor */
1838 struct iso_xmit_cmd
*cmd
= dma_region_i(&xmit
->prog
, struct iso_xmit_cmd
, iso
->pkt_dma
);
1840 /* check for new writes to xferStatus */
1841 u16 xferstatus
= le32_to_cpu(cmd
->output_last
.status
) >> 16;
1842 u8 event
= xferstatus
& 0x1F;
1845 /* packet hasn't been sent yet; we are done for now */
1851 "IT DMA error - OHCI error code 0x%02x\n", event
);
1853 /* at least one packet went out, so wake up the writer */
1857 cycle
= le32_to_cpu(cmd
->output_last
.status
) & 0x1FFF;
1859 if (xmit
->last_cycle
> -1) {
1860 int cycle_diff
= cycle
- xmit
->last_cycle
;
1864 if (cycle_diff
< 0) {
1867 PRINT(KERN_ERR
, "bogus cycle diff %d\n",
1871 skip
= cycle_diff
- 1;
1873 DBGMSG("skipped %d cycles without packet loss", skip
);
1874 atomic_add(skip
, &iso
->skips
);
1877 xmit
->last_cycle
= cycle
;
1879 /* tell the subsystem the packet has gone out */
1880 hpsb_iso_packet_sent(iso
, cycle
, event
!= 0x11);
1882 /* reset the DMA descriptor for next time */
1883 cmd
->output_last
.status
= 0;
1890 static int ohci_iso_xmit_queue(struct hpsb_iso
*iso
, struct hpsb_iso_packet_info
*info
)
1892 struct ohci_iso_xmit
*xmit
= iso
->hostdata
;
1893 struct ti_ohci
*ohci
= xmit
->ohci
;
1896 struct iso_xmit_cmd
*next
, *prev
;
1898 unsigned int offset
;
1900 unsigned char tag
, sy
;
1902 /* check that the packet doesn't cross a page boundary
1903 (we could allow this if we added OUTPUT_MORE descriptor support) */
1904 if (cross_bound(info
->offset
, info
->len
)) {
1906 "rawiso xmit: packet %u crosses a page boundary",
1911 offset
= info
->offset
;
1916 /* sync up the card's view of the buffer */
1917 dma_region_sync_for_device(&iso
->data_buf
, offset
, len
);
1919 /* append first_packet to the DMA chain */
1920 /* by linking the previous descriptor to it */
1921 /* (next will become the new end of the DMA chain) */
1923 next_i
= iso
->first_packet
;
1924 prev_i
= (next_i
== 0) ? (iso
->buf_packets
- 1) : (next_i
- 1);
1926 next
= dma_region_i(&xmit
->prog
, struct iso_xmit_cmd
, next_i
);
1927 prev
= dma_region_i(&xmit
->prog
, struct iso_xmit_cmd
, prev_i
);
1929 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
1930 memset(next
, 0, sizeof(struct iso_xmit_cmd
));
1931 next
->output_more_immediate
.control
= cpu_to_le32(0x02000008);
1933 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
1935 /* tcode = 0xA, and sy */
1936 next
->iso_hdr
[0] = 0xA0 | (sy
& 0xF);
1938 /* tag and channel number */
1939 next
->iso_hdr
[1] = (tag
<< 6) | (iso
->channel
& 0x3F);
1941 /* transmission speed */
1942 next
->iso_hdr
[2] = iso
->speed
& 0x7;
1945 next
->iso_hdr
[6] = len
& 0xFF;
1946 next
->iso_hdr
[7] = len
>> 8;
1948 /* set up the OUTPUT_LAST */
1949 next
->output_last
.control
= cpu_to_le32(1 << 28);
1950 next
->output_last
.control
|= cpu_to_le32(1 << 27); /* update timeStamp */
1951 next
->output_last
.control
|= cpu_to_le32(3 << 20); /* want interrupt */
1952 next
->output_last
.control
|= cpu_to_le32(3 << 18); /* enable branch */
1953 next
->output_last
.control
|= cpu_to_le32(len
);
1955 /* payload bus address */
1956 next
->output_last
.address
= cpu_to_le32(dma_region_offset_to_bus(&iso
->data_buf
, offset
));
1958 /* leave branchAddress at zero for now */
1960 /* re-write the previous DMA descriptor to chain to this one */
1962 /* set prev branch address to point to next (Z=3) */
1963 prev
->output_last
.branchAddress
= cpu_to_le32(
1964 dma_prog_region_offset_to_bus(&xmit
->prog
, sizeof(struct iso_xmit_cmd
) * next_i
) | 3);
1967 * Link the skip address to this descriptor itself. This causes a
1968 * context to skip a cycle whenever lost cycles or FIFO overruns occur,
1969 * without dropping the data at that point the application should then
1970 * decide whether this is an error condition or not. Some protocols
1971 * can deal with this by dropping some rate-matching padding packets.
1973 next
->output_more_immediate
.branchAddress
=
1974 prev
->output_last
.branchAddress
;
1976 /* disable interrupt, unless required by the IRQ interval */
1977 if (prev_i
% iso
->irq_interval
) {
1978 prev
->output_last
.control
&= cpu_to_le32(~(3 << 20)); /* no interrupt */
1980 prev
->output_last
.control
|= cpu_to_le32(3 << 20); /* enable interrupt */
1985 /* wake DMA in case it is sleeping */
1986 reg_write(xmit
->ohci
, xmit
->ContextControlSet
, 1 << 12);
1988 /* issue a dummy read of the cycle timer to force all PCI
1989 writes to be posted immediately */
1991 reg_read(xmit
->ohci
, OHCI1394_IsochronousCycleTimer
);
1996 static int ohci_iso_xmit_start(struct hpsb_iso
*iso
, int cycle
)
1998 struct ohci_iso_xmit
*xmit
= iso
->hostdata
;
1999 struct ti_ohci
*ohci
= xmit
->ohci
;
2001 /* clear out the control register */
2002 reg_write(xmit
->ohci
, xmit
->ContextControlClear
, 0xFFFFFFFF);
2005 /* address and length of first descriptor block (Z=3) */
2006 reg_write(xmit
->ohci
, xmit
->CommandPtr
,
2007 dma_prog_region_offset_to_bus(&xmit
->prog
, iso
->pkt_dma
* sizeof(struct iso_xmit_cmd
)) | 3);
2011 u32 start
= cycle
& 0x1FFF;
2013 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2014 just snarf them from the current time */
2015 u32 seconds
= reg_read(xmit
->ohci
, OHCI1394_IsochronousCycleTimer
) >> 25;
2017 /* advance one second to give some extra time for DMA to start */
2020 start
|= (seconds
& 3) << 13;
2022 reg_write(xmit
->ohci
, xmit
->ContextControlSet
, 0x80000000 | (start
<< 16));
2025 /* enable interrupts */
2026 reg_write(xmit
->ohci
, OHCI1394_IsoXmitIntMaskSet
, 1 << xmit
->task
.context
);
2029 reg_write(xmit
->ohci
, xmit
->ContextControlSet
, 0x8000);
2032 /* wait 100 usec to give the card time to go active */
2035 /* check the RUN bit */
2036 if (!(reg_read(xmit
->ohci
, xmit
->ContextControlSet
) & 0x8000)) {
2037 PRINT(KERN_ERR
, "Error starting IT DMA (ContextControl 0x%08x)\n",
2038 reg_read(xmit
->ohci
, xmit
->ContextControlSet
));
2045 static int ohci_isoctl(struct hpsb_iso
*iso
, enum isoctl_cmd cmd
, unsigned long arg
)
2050 return ohci_iso_xmit_init(iso
);
2052 return ohci_iso_xmit_start(iso
, arg
);
2054 ohci_iso_xmit_stop(iso
);
2057 return ohci_iso_xmit_queue(iso
, (struct hpsb_iso_packet_info
*) arg
);
2059 ohci_iso_xmit_shutdown(iso
);
2063 return ohci_iso_recv_init(iso
);
2065 int *args
= (int*) arg
;
2066 return ohci_iso_recv_start(iso
, args
[0], args
[1], args
[2]);
2069 ohci_iso_recv_stop(iso
);
2072 ohci_iso_recv_release(iso
, (struct hpsb_iso_packet_info
*) arg
);
2075 ohci_iso_recv_task((unsigned long) iso
);
2078 ohci_iso_recv_shutdown(iso
);
2080 case RECV_LISTEN_CHANNEL
:
2081 ohci_iso_recv_change_channel(iso
, arg
, 1);
2083 case RECV_UNLISTEN_CHANNEL
:
2084 ohci_iso_recv_change_channel(iso
, arg
, 0);
2086 case RECV_SET_CHANNEL_MASK
:
2087 ohci_iso_recv_set_channel_mask(iso
, *((u64
*) arg
));
2091 PRINT_G(KERN_ERR
, "ohci_isoctl cmd %d not implemented yet",
2098 /***************************************
2099 * IEEE-1394 functionality section END *
2100 ***************************************/
2103 /********************************************************
2104 * Global stuff (interrupt handler, init/shutdown code) *
2105 ********************************************************/
2107 static void dma_trm_reset(struct dma_trm_ctx
*d
)
2109 unsigned long flags
;
2110 LIST_HEAD(packet_list
);
2111 struct ti_ohci
*ohci
= d
->ohci
;
2112 struct hpsb_packet
*packet
, *ptmp
;
2114 ohci1394_stop_context(ohci
, d
->ctrlClear
, NULL
);
2116 /* Lock the context, reset it and release it. Move the packets
2117 * that were pending in the context to packet_list and free
2118 * them after releasing the lock. */
2120 spin_lock_irqsave(&d
->lock
, flags
);
2122 list_splice_init(&d
->fifo_list
, &packet_list
);
2123 list_splice_init(&d
->pending_list
, &packet_list
);
2125 d
->branchAddrPtr
= NULL
;
2126 d
->sent_ind
= d
->prg_ind
;
2127 d
->free_prgs
= d
->num_desc
;
2129 spin_unlock_irqrestore(&d
->lock
, flags
);
2131 if (list_empty(&packet_list
))
2134 PRINT(KERN_INFO
, "AT dma reset ctx=%d, aborting transmission", d
->ctx
);
2136 /* Now process subsystem callbacks for the packets from this
2138 list_for_each_entry_safe(packet
, ptmp
, &packet_list
, driver_list
) {
2139 list_del_init(&packet
->driver_list
);
2140 hpsb_packet_sent(ohci
->host
, packet
, ACKX_ABORTED
);
2144 static void ohci_schedule_iso_tasklets(struct ti_ohci
*ohci
,
2148 struct ohci1394_iso_tasklet
*t
;
2150 unsigned long flags
;
2152 spin_lock_irqsave(&ohci
->iso_tasklet_list_lock
, flags
);
2154 list_for_each_entry(t
, &ohci
->iso_tasklet_list
, link
) {
2155 mask
= 1 << t
->context
;
2157 if (t
->type
== OHCI_ISO_TRANSMIT
) {
2158 if (tx_event
& mask
)
2159 tasklet_schedule(&t
->tasklet
);
2161 /* OHCI_ISO_RECEIVE or OHCI_ISO_MULTICHANNEL_RECEIVE */
2162 if (rx_event
& mask
)
2163 tasklet_schedule(&t
->tasklet
);
2167 spin_unlock_irqrestore(&ohci
->iso_tasklet_list_lock
, flags
);
2170 static irqreturn_t
ohci_irq_handler(int irq
, void *dev_id
)
2172 quadlet_t event
, node_id
;
2173 struct ti_ohci
*ohci
= (struct ti_ohci
*)dev_id
;
2174 struct hpsb_host
*host
= ohci
->host
;
2175 int phyid
= -1, isroot
= 0;
2176 unsigned long flags
;
2178 /* Read and clear the interrupt event register. Don't clear
2179 * the busReset event, though. This is done when we get the
2180 * selfIDComplete interrupt. */
2181 spin_lock_irqsave(&ohci
->event_lock
, flags
);
2182 event
= reg_read(ohci
, OHCI1394_IntEventClear
);
2183 reg_write(ohci
, OHCI1394_IntEventClear
, event
& ~OHCI1394_busReset
);
2184 spin_unlock_irqrestore(&ohci
->event_lock
, flags
);
2189 /* If event is ~(u32)0 cardbus card was ejected. In this case
2190 * we just return, and clean up in the ohci1394_pci_remove
2192 if (event
== ~(u32
) 0) {
2193 DBGMSG("Device removed.");
2197 DBGMSG("IntEvent: %08x", event
);
2199 if (event
& OHCI1394_unrecoverableError
) {
2201 PRINT(KERN_ERR
, "Unrecoverable error!");
2203 if (reg_read(ohci
, OHCI1394_AsReqTrContextControlSet
) & 0x800)
2204 PRINT(KERN_ERR
, "Async Req Tx Context died: "
2205 "ctrl[%08x] cmdptr[%08x]",
2206 reg_read(ohci
, OHCI1394_AsReqTrContextControlSet
),
2207 reg_read(ohci
, OHCI1394_AsReqTrCommandPtr
));
2209 if (reg_read(ohci
, OHCI1394_AsRspTrContextControlSet
) & 0x800)
2210 PRINT(KERN_ERR
, "Async Rsp Tx Context died: "
2211 "ctrl[%08x] cmdptr[%08x]",
2212 reg_read(ohci
, OHCI1394_AsRspTrContextControlSet
),
2213 reg_read(ohci
, OHCI1394_AsRspTrCommandPtr
));
2215 if (reg_read(ohci
, OHCI1394_AsReqRcvContextControlSet
) & 0x800)
2216 PRINT(KERN_ERR
, "Async Req Rcv Context died: "
2217 "ctrl[%08x] cmdptr[%08x]",
2218 reg_read(ohci
, OHCI1394_AsReqRcvContextControlSet
),
2219 reg_read(ohci
, OHCI1394_AsReqRcvCommandPtr
));
2221 if (reg_read(ohci
, OHCI1394_AsRspRcvContextControlSet
) & 0x800)
2222 PRINT(KERN_ERR
, "Async Rsp Rcv Context died: "
2223 "ctrl[%08x] cmdptr[%08x]",
2224 reg_read(ohci
, OHCI1394_AsRspRcvContextControlSet
),
2225 reg_read(ohci
, OHCI1394_AsRspRcvCommandPtr
));
2227 for (ctx
= 0; ctx
< ohci
->nb_iso_xmit_ctx
; ctx
++) {
2228 if (reg_read(ohci
, OHCI1394_IsoXmitContextControlSet
+ (16 * ctx
)) & 0x800)
2229 PRINT(KERN_ERR
, "Iso Xmit %d Context died: "
2230 "ctrl[%08x] cmdptr[%08x]", ctx
,
2231 reg_read(ohci
, OHCI1394_IsoXmitContextControlSet
+ (16 * ctx
)),
2232 reg_read(ohci
, OHCI1394_IsoXmitCommandPtr
+ (16 * ctx
)));
2235 for (ctx
= 0; ctx
< ohci
->nb_iso_rcv_ctx
; ctx
++) {
2236 if (reg_read(ohci
, OHCI1394_IsoRcvContextControlSet
+ (32 * ctx
)) & 0x800)
2237 PRINT(KERN_ERR
, "Iso Recv %d Context died: "
2238 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx
,
2239 reg_read(ohci
, OHCI1394_IsoRcvContextControlSet
+ (32 * ctx
)),
2240 reg_read(ohci
, OHCI1394_IsoRcvCommandPtr
+ (32 * ctx
)),
2241 reg_read(ohci
, OHCI1394_IsoRcvContextMatch
+ (32 * ctx
)));
2244 event
&= ~OHCI1394_unrecoverableError
;
2246 if (event
& OHCI1394_postedWriteErr
) {
2247 PRINT(KERN_ERR
, "physical posted write error");
2248 /* no recovery strategy yet, had to involve protocol drivers */
2249 event
&= ~OHCI1394_postedWriteErr
;
2251 if (event
& OHCI1394_cycleTooLong
) {
2252 if(printk_ratelimit())
2253 PRINT(KERN_WARNING
, "isochronous cycle too long");
2255 DBGMSG("OHCI1394_cycleTooLong");
2256 reg_write(ohci
, OHCI1394_LinkControlSet
,
2257 OHCI1394_LinkControl_CycleMaster
);
2258 event
&= ~OHCI1394_cycleTooLong
;
2260 if (event
& OHCI1394_cycleInconsistent
) {
2261 /* We subscribe to the cycleInconsistent event only to
2262 * clear the corresponding event bit... otherwise,
2263 * isochronous cycleMatch DMA won't work. */
2264 DBGMSG("OHCI1394_cycleInconsistent");
2265 event
&= ~OHCI1394_cycleInconsistent
;
2267 if (event
& OHCI1394_busReset
) {
2268 /* The busReset event bit can't be cleared during the
2269 * selfID phase, so we disable busReset interrupts, to
2270 * avoid burying the cpu in interrupt requests. */
2271 spin_lock_irqsave(&ohci
->event_lock
, flags
);
2272 reg_write(ohci
, OHCI1394_IntMaskClear
, OHCI1394_busReset
);
2274 if (ohci
->check_busreset
) {
2279 while (reg_read(ohci
, OHCI1394_IntEventSet
) & OHCI1394_busReset
) {
2280 reg_write(ohci
, OHCI1394_IntEventClear
, OHCI1394_busReset
);
2282 spin_unlock_irqrestore(&ohci
->event_lock
, flags
);
2284 spin_lock_irqsave(&ohci
->event_lock
, flags
);
2286 /* The loop counter check is to prevent the driver
2287 * from remaining in this state forever. For the
2288 * initial bus reset, the loop continues for ever
2289 * and the system hangs, until some device is plugged-in
2290 * or out manually into a port! The forced reset seems
2291 * to solve this problem. This mainly effects nForce2. */
2292 if (loop_count
> 10000) {
2293 ohci_devctl(host
, RESET_BUS
, LONG_RESET
);
2294 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2301 spin_unlock_irqrestore(&ohci
->event_lock
, flags
);
2302 if (!host
->in_bus_reset
) {
2303 DBGMSG("irq_handler: Bus reset requested");
2305 /* Subsystem call */
2306 hpsb_bus_reset(ohci
->host
);
2308 event
&= ~OHCI1394_busReset
;
2310 if (event
& OHCI1394_reqTxComplete
) {
2311 struct dma_trm_ctx
*d
= &ohci
->at_req_context
;
2312 DBGMSG("Got reqTxComplete interrupt "
2313 "status=0x%08X", reg_read(ohci
, d
->ctrlSet
));
2314 if (reg_read(ohci
, d
->ctrlSet
) & 0x800)
2315 ohci1394_stop_context(ohci
, d
->ctrlClear
,
2318 dma_trm_tasklet((unsigned long)d
);
2319 //tasklet_schedule(&d->task);
2320 event
&= ~OHCI1394_reqTxComplete
;
2322 if (event
& OHCI1394_respTxComplete
) {
2323 struct dma_trm_ctx
*d
= &ohci
->at_resp_context
;
2324 DBGMSG("Got respTxComplete interrupt "
2325 "status=0x%08X", reg_read(ohci
, d
->ctrlSet
));
2326 if (reg_read(ohci
, d
->ctrlSet
) & 0x800)
2327 ohci1394_stop_context(ohci
, d
->ctrlClear
,
2330 tasklet_schedule(&d
->task
);
2331 event
&= ~OHCI1394_respTxComplete
;
2333 if (event
& OHCI1394_RQPkt
) {
2334 struct dma_rcv_ctx
*d
= &ohci
->ar_req_context
;
2335 DBGMSG("Got RQPkt interrupt status=0x%08X",
2336 reg_read(ohci
, d
->ctrlSet
));
2337 if (reg_read(ohci
, d
->ctrlSet
) & 0x800)
2338 ohci1394_stop_context(ohci
, d
->ctrlClear
, "RQPkt");
2340 tasklet_schedule(&d
->task
);
2341 event
&= ~OHCI1394_RQPkt
;
2343 if (event
& OHCI1394_RSPkt
) {
2344 struct dma_rcv_ctx
*d
= &ohci
->ar_resp_context
;
2345 DBGMSG("Got RSPkt interrupt status=0x%08X",
2346 reg_read(ohci
, d
->ctrlSet
));
2347 if (reg_read(ohci
, d
->ctrlSet
) & 0x800)
2348 ohci1394_stop_context(ohci
, d
->ctrlClear
, "RSPkt");
2350 tasklet_schedule(&d
->task
);
2351 event
&= ~OHCI1394_RSPkt
;
2353 if (event
& OHCI1394_isochRx
) {
2356 rx_event
= reg_read(ohci
, OHCI1394_IsoRecvIntEventSet
);
2357 reg_write(ohci
, OHCI1394_IsoRecvIntEventClear
, rx_event
);
2358 ohci_schedule_iso_tasklets(ohci
, rx_event
, 0);
2359 event
&= ~OHCI1394_isochRx
;
2361 if (event
& OHCI1394_isochTx
) {
2364 tx_event
= reg_read(ohci
, OHCI1394_IsoXmitIntEventSet
);
2365 reg_write(ohci
, OHCI1394_IsoXmitIntEventClear
, tx_event
);
2366 ohci_schedule_iso_tasklets(ohci
, 0, tx_event
);
2367 event
&= ~OHCI1394_isochTx
;
2369 if (event
& OHCI1394_selfIDComplete
) {
2370 if (host
->in_bus_reset
) {
2371 node_id
= reg_read(ohci
, OHCI1394_NodeID
);
2373 if (!(node_id
& 0x80000000)) {
2375 "SelfID received, but NodeID invalid "
2376 "(probably new bus reset occurred): %08X",
2378 goto selfid_not_valid
;
2381 phyid
= node_id
& 0x0000003f;
2382 isroot
= (node_id
& 0x40000000) != 0;
2384 DBGMSG("SelfID interrupt received "
2385 "(phyid %d, %s)", phyid
,
2386 (isroot
? "root" : "not root"));
2388 handle_selfid(ohci
, host
, phyid
, isroot
);
2390 /* Clear the bus reset event and re-enable the
2391 * busReset interrupt. */
2392 spin_lock_irqsave(&ohci
->event_lock
, flags
);
2393 reg_write(ohci
, OHCI1394_IntEventClear
, OHCI1394_busReset
);
2394 reg_write(ohci
, OHCI1394_IntMaskSet
, OHCI1394_busReset
);
2395 spin_unlock_irqrestore(&ohci
->event_lock
, flags
);
2397 /* Turn on phys dma reception.
2399 * TODO: Enable some sort of filtering management.
2402 reg_write(ohci
, OHCI1394_PhyReqFilterHiSet
,
2404 reg_write(ohci
, OHCI1394_PhyReqFilterLoSet
,
2408 DBGMSG("PhyReqFilter=%08x%08x",
2409 reg_read(ohci
, OHCI1394_PhyReqFilterHiSet
),
2410 reg_read(ohci
, OHCI1394_PhyReqFilterLoSet
));
2412 hpsb_selfid_complete(host
, phyid
, isroot
);
2415 "SelfID received outside of bus reset sequence");
2418 event
&= ~OHCI1394_selfIDComplete
;
2421 /* Make sure we handle everything, just in case we accidentally
2422 * enabled an interrupt that we didn't write a handler for. */
2424 PRINT(KERN_ERR
, "Unhandled interrupt(s) 0x%08x",
2430 /* Put the buffer back into the dma context */
2431 static void insert_dma_buffer(struct dma_rcv_ctx
*d
, int idx
)
2433 struct ti_ohci
*ohci
= (struct ti_ohci
*)(d
->ohci
);
2434 DBGMSG("Inserting dma buf ctx=%d idx=%d", d
->ctx
, idx
);
2436 d
->prg_cpu
[idx
]->status
= cpu_to_le32(d
->buf_size
);
2437 d
->prg_cpu
[idx
]->branchAddress
&= le32_to_cpu(0xfffffff0);
2438 idx
= (idx
+ d
->num_desc
- 1 ) % d
->num_desc
;
2439 d
->prg_cpu
[idx
]->branchAddress
|= le32_to_cpu(0x00000001);
2441 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2442 * context program descriptors before it sees the wakeup bit set. */
2445 /* wake up the dma context if necessary */
2446 if (!(reg_read(ohci
, d
->ctrlSet
) & 0x400)) {
2448 "Waking dma ctx=%d ... processing is probably too slow",
2452 /* do this always, to avoid race condition */
2453 reg_write(ohci
, d
->ctrlSet
, 0x1000);
2456 #define cond_le32_to_cpu(data, noswap) \
2457 (noswap ? data : le32_to_cpu(data))
2459 static const int TCODE_SIZE
[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2460 -1, 0, -1, 0, -1, -1, 16, -1};
2463 * Determine the length of a packet in the buffer
2464 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2466 static inline int packet_length(struct dma_rcv_ctx
*d
, int idx
,
2467 quadlet_t
*buf_ptr
, int offset
,
2468 unsigned char tcode
, int noswap
)
2472 if (d
->type
== DMA_CTX_ASYNC_REQ
|| d
->type
== DMA_CTX_ASYNC_RESP
) {
2473 length
= TCODE_SIZE
[tcode
];
2475 if (offset
+ 12 >= d
->buf_size
) {
2476 length
= (cond_le32_to_cpu(d
->buf_cpu
[(idx
+ 1) % d
->num_desc
]
2477 [3 - ((d
->buf_size
- offset
) >> 2)], noswap
) >> 16);
2479 length
= (cond_le32_to_cpu(buf_ptr
[3], noswap
) >> 16);
2483 } else if (d
->type
== DMA_CTX_ISO
) {
2484 /* Assumption: buffer fill mode with header/trailer */
2485 length
= (cond_le32_to_cpu(buf_ptr
[0], noswap
) >> 16) + 8;
2488 if (length
> 0 && length
% 4)
2489 length
+= 4 - (length
% 4);
2494 /* Tasklet that processes dma receive buffers */
2495 static void dma_rcv_tasklet (unsigned long data
)
2497 struct dma_rcv_ctx
*d
= (struct dma_rcv_ctx
*)data
;
2498 struct ti_ohci
*ohci
= (struct ti_ohci
*)(d
->ohci
);
2499 unsigned int split_left
, idx
, offset
, rescount
;
2500 unsigned char tcode
;
2501 int length
, bytes_left
, ack
;
2502 unsigned long flags
;
2507 spin_lock_irqsave(&d
->lock
, flags
);
2510 offset
= d
->buf_offset
;
2511 buf_ptr
= d
->buf_cpu
[idx
] + offset
/4;
2513 rescount
= le32_to_cpu(d
->prg_cpu
[idx
]->status
) & 0xffff;
2514 bytes_left
= d
->buf_size
- rescount
- offset
;
2516 while (bytes_left
> 0) {
2517 tcode
= (cond_le32_to_cpu(buf_ptr
[0], ohci
->no_swap_incoming
) >> 4) & 0xf;
2519 /* packet_length() will return < 4 for an error */
2520 length
= packet_length(d
, idx
, buf_ptr
, offset
, tcode
, ohci
->no_swap_incoming
);
2522 if (length
< 4) { /* something is wrong */
2523 sprintf(msg
,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2524 tcode
, cond_le32_to_cpu(buf_ptr
[0], ohci
->no_swap_incoming
),
2526 ohci1394_stop_context(ohci
, d
->ctrlClear
, msg
);
2527 spin_unlock_irqrestore(&d
->lock
, flags
);
2531 /* The first case is where we have a packet that crosses
2532 * over more than one descriptor. The next case is where
2533 * it's all in the first descriptor. */
2534 if ((offset
+ length
) > d
->buf_size
) {
2535 DBGMSG("Split packet rcv'd");
2536 if (length
> d
->split_buf_size
) {
2537 ohci1394_stop_context(ohci
, d
->ctrlClear
,
2538 "Split packet size exceeded");
2540 d
->buf_offset
= offset
;
2541 spin_unlock_irqrestore(&d
->lock
, flags
);
2545 if (le32_to_cpu(d
->prg_cpu
[(idx
+1)%d
->num_desc
]->status
)
2547 /* Other part of packet not written yet.
2548 * this should never happen I think
2549 * anyway we'll get it on the next call. */
2551 "Got only half a packet!");
2553 d
->buf_offset
= offset
;
2554 spin_unlock_irqrestore(&d
->lock
, flags
);
2558 split_left
= length
;
2559 split_ptr
= (char *)d
->spb
;
2560 memcpy(split_ptr
,buf_ptr
,d
->buf_size
-offset
);
2561 split_left
-= d
->buf_size
-offset
;
2562 split_ptr
+= d
->buf_size
-offset
;
2563 insert_dma_buffer(d
, idx
);
2564 idx
= (idx
+1) % d
->num_desc
;
2565 buf_ptr
= d
->buf_cpu
[idx
];
2568 while (split_left
>= d
->buf_size
) {
2569 memcpy(split_ptr
,buf_ptr
,d
->buf_size
);
2570 split_ptr
+= d
->buf_size
;
2571 split_left
-= d
->buf_size
;
2572 insert_dma_buffer(d
, idx
);
2573 idx
= (idx
+1) % d
->num_desc
;
2574 buf_ptr
= d
->buf_cpu
[idx
];
2577 if (split_left
> 0) {
2578 memcpy(split_ptr
, buf_ptr
, split_left
);
2579 offset
= split_left
;
2580 buf_ptr
+= offset
/4;
2583 DBGMSG("Single packet rcv'd");
2584 memcpy(d
->spb
, buf_ptr
, length
);
2586 buf_ptr
+= length
/4;
2587 if (offset
==d
->buf_size
) {
2588 insert_dma_buffer(d
, idx
);
2589 idx
= (idx
+1) % d
->num_desc
;
2590 buf_ptr
= d
->buf_cpu
[idx
];
2595 /* We get one phy packet to the async descriptor for each
2596 * bus reset. We always ignore it. */
2597 if (tcode
!= OHCI1394_TCODE_PHY
) {
2598 if (!ohci
->no_swap_incoming
)
2599 header_le32_to_cpu(d
->spb
, tcode
);
2600 DBGMSG("Packet received from node"
2601 " %d ack=0x%02X spd=%d tcode=0x%X"
2602 " length=%d ctx=%d tlabel=%d",
2603 (d
->spb
[1]>>16)&0x3f,
2604 (cond_le32_to_cpu(d
->spb
[length
/4-1], ohci
->no_swap_incoming
)>>16)&0x1f,
2605 (cond_le32_to_cpu(d
->spb
[length
/4-1], ohci
->no_swap_incoming
)>>21)&0x3,
2606 tcode
, length
, d
->ctx
,
2607 (d
->spb
[0]>>10)&0x3f);
2609 ack
= (((cond_le32_to_cpu(d
->spb
[length
/4-1], ohci
->no_swap_incoming
)>>16)&0x1f)
2612 hpsb_packet_received(ohci
->host
, d
->spb
,
2615 #ifdef OHCI1394_DEBUG
2617 PRINT (KERN_DEBUG
, "Got phy packet ctx=%d ... discarded",
2621 rescount
= le32_to_cpu(d
->prg_cpu
[idx
]->status
) & 0xffff;
2623 bytes_left
= d
->buf_size
- rescount
- offset
;
2628 d
->buf_offset
= offset
;
2630 spin_unlock_irqrestore(&d
->lock
, flags
);
2633 /* Bottom half that processes sent packets */
2634 static void dma_trm_tasklet (unsigned long data
)
2636 struct dma_trm_ctx
*d
= (struct dma_trm_ctx
*)data
;
2637 struct ti_ohci
*ohci
= (struct ti_ohci
*)(d
->ohci
);
2638 struct hpsb_packet
*packet
, *ptmp
;
2639 unsigned long flags
;
2643 spin_lock_irqsave(&d
->lock
, flags
);
2645 list_for_each_entry_safe(packet
, ptmp
, &d
->fifo_list
, driver_list
) {
2646 datasize
= packet
->data_size
;
2647 if (datasize
&& packet
->type
!= hpsb_raw
)
2648 status
= le32_to_cpu(
2649 d
->prg_cpu
[d
->sent_ind
]->end
.status
) >> 16;
2651 status
= le32_to_cpu(
2652 d
->prg_cpu
[d
->sent_ind
]->begin
.status
) >> 16;
2655 /* this packet hasn't been sent yet*/
2658 #ifdef OHCI1394_DEBUG
2660 if (((le32_to_cpu(d
->prg_cpu
[d
->sent_ind
]->data
[0])>>4)&0xf) == 0xa)
2661 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2662 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2663 (le32_to_cpu(d
->prg_cpu
[d
->sent_ind
]->data
[0])>>8)&0x3f,
2664 (le32_to_cpu(d
->prg_cpu
[d
->sent_ind
]->data
[0])>>4)&0xf,
2665 status
&0x1f, (status
>>5)&0x3,
2666 le32_to_cpu(d
->prg_cpu
[d
->sent_ind
]->data
[1])>>16,
2669 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2670 "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2671 (le32_to_cpu(d
->prg_cpu
[d
->sent_ind
]->data
[1])>>16)&0x3f,
2672 (le32_to_cpu(d
->prg_cpu
[d
->sent_ind
]->data
[0])>>4)&0xf,
2673 (le32_to_cpu(d
->prg_cpu
[d
->sent_ind
]->data
[0])>>10)&0x3f,
2674 status
&0x1f, (status
>>5)&0x3,
2675 le32_to_cpu(d
->prg_cpu
[d
->sent_ind
]->data
[3])>>16,
2678 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2679 "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2680 (le32_to_cpu(d
->prg_cpu
[d
->sent_ind
]->data
[1])
2682 (le32_to_cpu(d
->prg_cpu
[d
->sent_ind
]->data
[0])
2684 (le32_to_cpu(d
->prg_cpu
[d
->sent_ind
]->data
[0])
2686 status
&0x1f, (status
>>5)&0x3,
2687 le32_to_cpu(d
->prg_cpu
[d
->sent_ind
]->data
[3]),
2691 if (status
& 0x10) {
2694 switch (status
& 0x1f) {
2695 case EVT_NO_STATUS
: /* that should never happen */
2696 case EVT_RESERVED_A
: /* that should never happen */
2697 case EVT_LONG_PACKET
: /* that should never happen */
2698 PRINT(KERN_WARNING
, "Received OHCI evt_* error 0x%x", status
& 0x1f);
2699 ack
= ACKX_SEND_ERROR
;
2701 case EVT_MISSING_ACK
:
2705 ack
= ACKX_SEND_ERROR
;
2707 case EVT_OVERRUN
: /* that should never happen */
2708 PRINT(KERN_WARNING
, "Received OHCI evt_* error 0x%x", status
& 0x1f);
2709 ack
= ACKX_SEND_ERROR
;
2711 case EVT_DESCRIPTOR_READ
:
2713 case EVT_DATA_WRITE
:
2714 ack
= ACKX_SEND_ERROR
;
2716 case EVT_BUS_RESET
: /* that should never happen */
2717 PRINT(KERN_WARNING
, "Received OHCI evt_* error 0x%x", status
& 0x1f);
2718 ack
= ACKX_SEND_ERROR
;
2724 ack
= ACKX_SEND_ERROR
;
2726 case EVT_RESERVED_B
: /* that should never happen */
2727 case EVT_RESERVED_C
: /* that should never happen */
2728 PRINT(KERN_WARNING
, "Received OHCI evt_* error 0x%x", status
& 0x1f);
2729 ack
= ACKX_SEND_ERROR
;
2733 ack
= ACKX_SEND_ERROR
;
2736 PRINT(KERN_ERR
, "Unhandled OHCI evt_* error 0x%x", status
& 0x1f);
2737 ack
= ACKX_SEND_ERROR
;
2742 list_del_init(&packet
->driver_list
);
2743 hpsb_packet_sent(ohci
->host
, packet
, ack
);
2746 pci_unmap_single(ohci
->dev
,
2747 cpu_to_le32(d
->prg_cpu
[d
->sent_ind
]->end
.address
),
2748 datasize
, PCI_DMA_TODEVICE
);
2750 d
->sent_ind
= (d
->sent_ind
+1)%d
->num_desc
;
2754 dma_trm_flush(ohci
, d
);
2756 spin_unlock_irqrestore(&d
->lock
, flags
);
2759 static void free_dma_rcv_ctx(struct dma_rcv_ctx
*d
)
2762 struct ti_ohci
*ohci
= d
->ohci
;
2767 DBGMSG("Freeing dma_rcv_ctx %d", d
->ctx
);
2770 for (i
=0; i
<d
->num_desc
; i
++)
2771 if (d
->buf_cpu
[i
] && d
->buf_bus
[i
])
2772 pci_free_consistent(
2773 ohci
->dev
, d
->buf_size
,
2774 d
->buf_cpu
[i
], d
->buf_bus
[i
]);
2779 for (i
=0; i
<d
->num_desc
; i
++)
2780 if (d
->prg_cpu
[i
] && d
->prg_bus
[i
])
2781 pci_pool_free(d
->prg_pool
, d
->prg_cpu
[i
],
2783 pci_pool_destroy(d
->prg_pool
);
2789 /* Mark this context as freed. */
2794 alloc_dma_rcv_ctx(struct ti_ohci
*ohci
, struct dma_rcv_ctx
*d
,
2795 enum context_type type
, int ctx
, int num_desc
,
2796 int buf_size
, int split_buf_size
, int context_base
)
2799 static int num_allocs
;
2800 static char pool_name
[20];
2806 d
->num_desc
= num_desc
;
2807 d
->buf_size
= buf_size
;
2808 d
->split_buf_size
= split_buf_size
;
2814 d
->buf_cpu
= kzalloc(d
->num_desc
* sizeof(*d
->buf_cpu
), GFP_ATOMIC
);
2815 d
->buf_bus
= kzalloc(d
->num_desc
* sizeof(*d
->buf_bus
), GFP_ATOMIC
);
2817 if (d
->buf_cpu
== NULL
|| d
->buf_bus
== NULL
) {
2818 PRINT(KERN_ERR
, "Failed to allocate %s", "DMA buffer");
2819 free_dma_rcv_ctx(d
);
2823 d
->prg_cpu
= kzalloc(d
->num_desc
* sizeof(*d
->prg_cpu
), GFP_ATOMIC
);
2824 d
->prg_bus
= kzalloc(d
->num_desc
* sizeof(*d
->prg_bus
), GFP_ATOMIC
);
2826 if (d
->prg_cpu
== NULL
|| d
->prg_bus
== NULL
) {
2827 PRINT(KERN_ERR
, "Failed to allocate %s", "DMA prg");
2828 free_dma_rcv_ctx(d
);
2832 d
->spb
= kmalloc(d
->split_buf_size
, GFP_ATOMIC
);
2834 if (d
->spb
== NULL
) {
2835 PRINT(KERN_ERR
, "Failed to allocate %s", "split buffer");
2836 free_dma_rcv_ctx(d
);
2840 len
= sprintf(pool_name
, "ohci1394_rcv_prg");
2841 sprintf(pool_name
+len
, "%d", num_allocs
);
2842 d
->prg_pool
= pci_pool_create(pool_name
, ohci
->dev
,
2843 sizeof(struct dma_cmd
), 4, 0);
2844 if(d
->prg_pool
== NULL
)
2846 PRINT(KERN_ERR
, "pci_pool_create failed for %s", pool_name
);
2847 free_dma_rcv_ctx(d
);
2852 for (i
=0; i
<d
->num_desc
; i
++) {
2853 d
->buf_cpu
[i
] = pci_alloc_consistent(ohci
->dev
,
2857 if (d
->buf_cpu
[i
] != NULL
) {
2858 memset(d
->buf_cpu
[i
], 0, d
->buf_size
);
2861 "Failed to allocate %s", "DMA buffer");
2862 free_dma_rcv_ctx(d
);
2866 d
->prg_cpu
[i
] = pci_pool_alloc(d
->prg_pool
, GFP_KERNEL
, d
->prg_bus
+i
);
2868 if (d
->prg_cpu
[i
] != NULL
) {
2869 memset(d
->prg_cpu
[i
], 0, sizeof(struct dma_cmd
));
2872 "Failed to allocate %s", "DMA prg");
2873 free_dma_rcv_ctx(d
);
2878 spin_lock_init(&d
->lock
);
2880 d
->ctrlSet
= context_base
+ OHCI1394_ContextControlSet
;
2881 d
->ctrlClear
= context_base
+ OHCI1394_ContextControlClear
;
2882 d
->cmdPtr
= context_base
+ OHCI1394_ContextCommandPtr
;
2884 tasklet_init(&d
->task
, dma_rcv_tasklet
, (unsigned long) d
);
2888 static void free_dma_trm_ctx(struct dma_trm_ctx
*d
)
2891 struct ti_ohci
*ohci
= d
->ohci
;
2896 DBGMSG("Freeing dma_trm_ctx %d", d
->ctx
);
2899 for (i
=0; i
<d
->num_desc
; i
++)
2900 if (d
->prg_cpu
[i
] && d
->prg_bus
[i
])
2901 pci_pool_free(d
->prg_pool
, d
->prg_cpu
[i
],
2903 pci_pool_destroy(d
->prg_pool
);
2908 /* Mark this context as freed. */
2913 alloc_dma_trm_ctx(struct ti_ohci
*ohci
, struct dma_trm_ctx
*d
,
2914 enum context_type type
, int ctx
, int num_desc
,
2918 static char pool_name
[20];
2919 static int num_allocs
=0;
2924 d
->num_desc
= num_desc
;
2929 d
->prg_cpu
= kzalloc(d
->num_desc
* sizeof(*d
->prg_cpu
), GFP_KERNEL
);
2930 d
->prg_bus
= kzalloc(d
->num_desc
* sizeof(*d
->prg_bus
), GFP_KERNEL
);
2932 if (d
->prg_cpu
== NULL
|| d
->prg_bus
== NULL
) {
2933 PRINT(KERN_ERR
, "Failed to allocate %s", "AT DMA prg");
2934 free_dma_trm_ctx(d
);
2938 len
= sprintf(pool_name
, "ohci1394_trm_prg");
2939 sprintf(pool_name
+len
, "%d", num_allocs
);
2940 d
->prg_pool
= pci_pool_create(pool_name
, ohci
->dev
,
2941 sizeof(struct at_dma_prg
), 4, 0);
2942 if (d
->prg_pool
== NULL
) {
2943 PRINT(KERN_ERR
, "pci_pool_create failed for %s", pool_name
);
2944 free_dma_trm_ctx(d
);
2949 for (i
= 0; i
< d
->num_desc
; i
++) {
2950 d
->prg_cpu
[i
] = pci_pool_alloc(d
->prg_pool
, GFP_KERNEL
, d
->prg_bus
+i
);
2952 if (d
->prg_cpu
[i
] != NULL
) {
2953 memset(d
->prg_cpu
[i
], 0, sizeof(struct at_dma_prg
));
2956 "Failed to allocate %s", "AT DMA prg");
2957 free_dma_trm_ctx(d
);
2962 spin_lock_init(&d
->lock
);
2964 /* initialize tasklet */
2965 d
->ctrlSet
= context_base
+ OHCI1394_ContextControlSet
;
2966 d
->ctrlClear
= context_base
+ OHCI1394_ContextControlClear
;
2967 d
->cmdPtr
= context_base
+ OHCI1394_ContextCommandPtr
;
2968 tasklet_init(&d
->task
, dma_trm_tasklet
, (unsigned long)d
);
2972 static void ohci_set_hw_config_rom(struct hpsb_host
*host
, __be32
*config_rom
)
2974 struct ti_ohci
*ohci
= host
->hostdata
;
2976 reg_write(ohci
, OHCI1394_ConfigROMhdr
, be32_to_cpu(config_rom
[0]));
2977 reg_write(ohci
, OHCI1394_BusOptions
, be32_to_cpu(config_rom
[2]));
2979 memcpy(ohci
->csr_config_rom_cpu
, config_rom
, OHCI_CONFIG_ROM_LEN
);
2983 static quadlet_t
ohci_hw_csr_reg(struct hpsb_host
*host
, int reg
,
2984 quadlet_t data
, quadlet_t compare
)
2986 struct ti_ohci
*ohci
= host
->hostdata
;
2989 reg_write(ohci
, OHCI1394_CSRData
, data
);
2990 reg_write(ohci
, OHCI1394_CSRCompareData
, compare
);
2991 reg_write(ohci
, OHCI1394_CSRControl
, reg
& 0x3);
2993 for (i
= 0; i
< OHCI_LOOP_COUNT
; i
++) {
2994 if (reg_read(ohci
, OHCI1394_CSRControl
) & 0x80000000)
3000 return reg_read(ohci
, OHCI1394_CSRData
);
3003 static struct hpsb_host_driver ohci1394_driver
= {
3004 .owner
= THIS_MODULE
,
3005 .name
= OHCI1394_DRIVER_NAME
,
3006 .set_hw_config_rom
= ohci_set_hw_config_rom
,
3007 .transmit_packet
= ohci_transmit
,
3008 .devctl
= ohci_devctl
,
3009 .isoctl
= ohci_isoctl
,
3010 .hw_csr_reg
= ohci_hw_csr_reg
,
3013 /***********************************
3014 * PCI Driver Interface functions *
3015 ***********************************/
3017 #ifdef CONFIG_PPC_PMAC
3018 static void ohci1394_pmac_on(struct pci_dev
*dev
)
3020 if (machine_is(powermac
)) {
3021 struct device_node
*ofn
= pci_device_to_OF_node(dev
);
3024 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER
, ofn
, 0, 1);
3025 pmac_call_feature(PMAC_FTR_1394_ENABLE
, ofn
, 0, 1);
3030 static void ohci1394_pmac_off(struct pci_dev
*dev
)
3032 if (machine_is(powermac
)) {
3033 struct device_node
*ofn
= pci_device_to_OF_node(dev
);
3036 pmac_call_feature(PMAC_FTR_1394_ENABLE
, ofn
, 0, 0);
3037 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER
, ofn
, 0, 0);
3042 #define ohci1394_pmac_on(dev)
3043 #define ohci1394_pmac_off(dev)
3044 #endif /* CONFIG_PPC_PMAC */
3046 static int __devinit
ohci1394_pci_probe(struct pci_dev
*dev
,
3047 const struct pci_device_id
*ent
)
3049 struct hpsb_host
*host
;
3050 struct ti_ohci
*ohci
; /* shortcut to currently handled device */
3051 resource_size_t ohci_base
;
3054 ohci1394_pmac_on(dev
);
3055 if (pci_enable_device(dev
)) {
3056 PRINT_G(KERN_ERR
, "Failed to enable OHCI hardware");
3060 pci_set_master(dev
);
3062 host
= hpsb_alloc_host(&ohci1394_driver
, sizeof(struct ti_ohci
), &dev
->dev
);
3064 PRINT_G(KERN_ERR
, "Failed to allocate %s", "host structure");
3067 ohci
= host
->hostdata
;
3070 ohci
->init_state
= OHCI_INIT_ALLOC_HOST
;
3072 pci_set_drvdata(dev
, ohci
);
3074 /* We don't want hardware swapping */
3075 pci_write_config_dword(dev
, OHCI1394_PCI_HCI_Control
, 0);
3077 /* Some oddball Apple controllers do not order the selfid
3078 * properly, so we make up for it here. */
3079 #ifndef __LITTLE_ENDIAN
3080 /* XXX: Need a better way to check this. I'm wondering if we can
3081 * read the values of the OHCI1394_PCI_HCI_Control and the
3082 * noByteSwapData registers to see if they were not cleared to
3083 * zero. Should this work? Obviously it's not defined what these
3084 * registers will read when they aren't supported. Bleh! */
3085 if (dev
->vendor
== PCI_VENDOR_ID_APPLE
&&
3086 dev
->device
== PCI_DEVICE_ID_APPLE_UNI_N_FW
) {
3087 ohci
->no_swap_incoming
= 1;
3088 ohci
->selfid_swap
= 0;
3090 ohci
->selfid_swap
= 1;
3094 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3095 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3098 /* These chipsets require a bit of extra care when checking after
3100 if ((dev
->vendor
== PCI_VENDOR_ID_APPLE
&&
3101 dev
->device
== PCI_DEVICE_ID_APPLE_UNI_N_FW
) ||
3102 (dev
->vendor
== PCI_VENDOR_ID_NVIDIA
&&
3103 dev
->device
== PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
))
3104 ohci
->check_busreset
= 1;
3106 /* We hardwire the MMIO length, since some CardBus adaptors
3107 * fail to report the right length. Anyway, the ohci spec
3108 * clearly says it's 2kb, so this shouldn't be a problem. */
3109 ohci_base
= pci_resource_start(dev
, 0);
3110 if (pci_resource_len(dev
, 0) < OHCI1394_REGISTER_SIZE
)
3111 PRINT(KERN_WARNING
, "PCI resource length of 0x%llx too small!",
3112 (unsigned long long)pci_resource_len(dev
, 0));
3114 if (!request_mem_region(ohci_base
, OHCI1394_REGISTER_SIZE
,
3115 OHCI1394_DRIVER_NAME
)) {
3116 PRINT_G(KERN_ERR
, "MMIO resource (0x%llx - 0x%llx) unavailable",
3117 (unsigned long long)ohci_base
,
3118 (unsigned long long)ohci_base
+ OHCI1394_REGISTER_SIZE
);
3121 ohci
->init_state
= OHCI_INIT_HAVE_MEM_REGION
;
3123 ohci
->registers
= ioremap(ohci_base
, OHCI1394_REGISTER_SIZE
);
3124 if (ohci
->registers
== NULL
) {
3125 PRINT_G(KERN_ERR
, "Failed to remap registers");
3129 ohci
->init_state
= OHCI_INIT_HAVE_IOMAPPING
;
3130 DBGMSG("Remapped memory spaces reg 0x%p", ohci
->registers
);
3132 /* csr_config rom allocation */
3133 ohci
->csr_config_rom_cpu
=
3134 pci_alloc_consistent(ohci
->dev
, OHCI_CONFIG_ROM_LEN
,
3135 &ohci
->csr_config_rom_bus
);
3136 if (ohci
->csr_config_rom_cpu
== NULL
) {
3137 PRINT_G(KERN_ERR
, "Failed to allocate %s", "buffer config rom");
3140 ohci
->init_state
= OHCI_INIT_HAVE_CONFIG_ROM_BUFFER
;
3142 /* self-id dma buffer allocation */
3143 ohci
->selfid_buf_cpu
=
3144 pci_alloc_consistent(ohci
->dev
, OHCI1394_SI_DMA_BUF_SIZE
,
3145 &ohci
->selfid_buf_bus
);
3146 if (ohci
->selfid_buf_cpu
== NULL
) {
3147 PRINT_G(KERN_ERR
, "Failed to allocate %s", "self-ID buffer");
3150 ohci
->init_state
= OHCI_INIT_HAVE_SELFID_BUFFER
;
3152 if ((unsigned long)ohci
->selfid_buf_cpu
& 0x1fff)
3153 PRINT(KERN_INFO
, "SelfID buffer %p is not aligned on "
3154 "8Kb boundary... may cause problems on some CXD3222 chip",
3155 ohci
->selfid_buf_cpu
);
3157 /* No self-id errors at startup */
3158 ohci
->self_id_errors
= 0;
3160 ohci
->init_state
= OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE
;
3161 /* AR DMA request context allocation */
3162 if (alloc_dma_rcv_ctx(ohci
, &ohci
->ar_req_context
,
3163 DMA_CTX_ASYNC_REQ
, 0, AR_REQ_NUM_DESC
,
3164 AR_REQ_BUF_SIZE
, AR_REQ_SPLIT_BUF_SIZE
,
3165 OHCI1394_AsReqRcvContextBase
) < 0) {
3166 PRINT_G(KERN_ERR
, "Failed to allocate %s", "AR Req context");
3169 /* AR DMA response context allocation */
3170 if (alloc_dma_rcv_ctx(ohci
, &ohci
->ar_resp_context
,
3171 DMA_CTX_ASYNC_RESP
, 0, AR_RESP_NUM_DESC
,
3172 AR_RESP_BUF_SIZE
, AR_RESP_SPLIT_BUF_SIZE
,
3173 OHCI1394_AsRspRcvContextBase
) < 0) {
3174 PRINT_G(KERN_ERR
, "Failed to allocate %s", "AR Resp context");
3177 /* AT DMA request context */
3178 if (alloc_dma_trm_ctx(ohci
, &ohci
->at_req_context
,
3179 DMA_CTX_ASYNC_REQ
, 0, AT_REQ_NUM_DESC
,
3180 OHCI1394_AsReqTrContextBase
) < 0) {
3181 PRINT_G(KERN_ERR
, "Failed to allocate %s", "AT Req context");
3184 /* AT DMA response context */
3185 if (alloc_dma_trm_ctx(ohci
, &ohci
->at_resp_context
,
3186 DMA_CTX_ASYNC_RESP
, 1, AT_RESP_NUM_DESC
,
3187 OHCI1394_AsRspTrContextBase
) < 0) {
3188 PRINT_G(KERN_ERR
, "Failed to allocate %s", "AT Resp context");
3191 /* Start off with a soft reset, to clear everything to a sane
3193 ohci_soft_reset(ohci
);
3195 /* Now enable LPS, which we need in order to start accessing
3196 * most of the registers. In fact, on some cards (ALI M5251),
3197 * accessing registers in the SClk domain without LPS enabled
3198 * will lock up the machine. */
3199 reg_write(ohci
, OHCI1394_HCControlSet
, OHCI1394_HCControl_LPS
);
3201 /* Disable and clear interrupts */
3202 reg_write(ohci
, OHCI1394_IntEventClear
, 0xffffffff);
3203 reg_write(ohci
, OHCI1394_IntMaskClear
, 0xffffffff);
3205 /* Flush MMIO writes and wait to make sure we have full link enabled. */
3206 reg_read(ohci
, OHCI1394_Version
);
3209 /* Determine the number of available IR and IT contexts. */
3210 ohci
->nb_iso_rcv_ctx
=
3211 get_nb_iso_ctx(ohci
, OHCI1394_IsoRecvIntMaskSet
);
3212 ohci
->nb_iso_xmit_ctx
=
3213 get_nb_iso_ctx(ohci
, OHCI1394_IsoXmitIntMaskSet
);
3215 /* Set the usage bits for non-existent contexts so they can't
3217 ohci
->ir_ctx_usage
= ~0 << ohci
->nb_iso_rcv_ctx
;
3218 ohci
->it_ctx_usage
= ~0 << ohci
->nb_iso_xmit_ctx
;
3220 INIT_LIST_HEAD(&ohci
->iso_tasklet_list
);
3221 spin_lock_init(&ohci
->iso_tasklet_list_lock
);
3222 ohci
->ISO_channel_usage
= 0;
3223 spin_lock_init(&ohci
->IR_channel_lock
);
3225 spin_lock_init(&ohci
->event_lock
);
3228 * interrupts are disabled, all right, but... due to IRQF_SHARED we
3229 * might get called anyway. We'll see no event, of course, but
3230 * we need to get to that "no event", so enough should be initialized
3233 err
= request_irq(dev
->irq
, ohci_irq_handler
, IRQF_SHARED
,
3234 OHCI1394_DRIVER_NAME
, ohci
);
3236 PRINT_G(KERN_ERR
, "Failed to allocate interrupt %d", dev
->irq
);
3239 ohci
->init_state
= OHCI_INIT_HAVE_IRQ
;
3240 ohci_initialize(ohci
);
3242 /* Set certain csr values */
3243 host
->csr
.guid_hi
= reg_read(ohci
, OHCI1394_GUIDHi
);
3244 host
->csr
.guid_lo
= reg_read(ohci
, OHCI1394_GUIDLo
);
3245 host
->csr
.cyc_clk_acc
= 100; /* how do we determine clk accuracy? */
3246 host
->csr
.max_rec
= (reg_read(ohci
, OHCI1394_BusOptions
) >> 12) & 0xf;
3247 host
->csr
.lnk_spd
= reg_read(ohci
, OHCI1394_BusOptions
) & 0x7;
3250 host
->low_addr_space
=
3251 (u64
) reg_read(ohci
, OHCI1394_PhyUpperBound
) << 16;
3252 if (!host
->low_addr_space
)
3253 host
->low_addr_space
= OHCI1394_PHYS_UPPER_BOUND_FIXED
;
3255 host
->middle_addr_space
= OHCI1394_MIDDLE_ADDRESS_SPACE
;
3257 /* Tell the highlevel this host is ready */
3258 if (hpsb_add_host(host
)) {
3259 PRINT_G(KERN_ERR
, "Failed to register host with highlevel");
3262 ohci
->init_state
= OHCI_INIT_DONE
;
3266 ohci1394_pci_remove(dev
);
3270 static void ohci1394_pci_remove(struct pci_dev
*dev
)
3272 struct ti_ohci
*ohci
;
3273 struct device
*device
;
3275 ohci
= pci_get_drvdata(dev
);
3279 device
= get_device(&ohci
->host
->device
);
3281 switch (ohci
->init_state
) {
3282 case OHCI_INIT_DONE
:
3283 hpsb_remove_host(ohci
->host
);
3285 /* Clear out BUS Options */
3286 reg_write(ohci
, OHCI1394_ConfigROMhdr
, 0);
3287 reg_write(ohci
, OHCI1394_BusOptions
,
3288 (reg_read(ohci
, OHCI1394_BusOptions
) & 0x0000f007) |
3290 memset(ohci
->csr_config_rom_cpu
, 0, OHCI_CONFIG_ROM_LEN
);
3292 case OHCI_INIT_HAVE_IRQ
:
3293 /* Clear interrupt registers */
3294 reg_write(ohci
, OHCI1394_IntMaskClear
, 0xffffffff);
3295 reg_write(ohci
, OHCI1394_IntEventClear
, 0xffffffff);
3296 reg_write(ohci
, OHCI1394_IsoXmitIntMaskClear
, 0xffffffff);
3297 reg_write(ohci
, OHCI1394_IsoXmitIntEventClear
, 0xffffffff);
3298 reg_write(ohci
, OHCI1394_IsoRecvIntMaskClear
, 0xffffffff);
3299 reg_write(ohci
, OHCI1394_IsoRecvIntEventClear
, 0xffffffff);
3301 /* Disable IRM Contender */
3302 set_phy_reg(ohci
, 4, ~0xc0 & get_phy_reg(ohci
, 4));
3304 /* Clear link control register */
3305 reg_write(ohci
, OHCI1394_LinkControlClear
, 0xffffffff);
3307 /* Let all other nodes know to ignore us */
3308 ohci_devctl(ohci
->host
, RESET_BUS
, LONG_RESET_NO_FORCE_ROOT
);
3310 /* Soft reset before we start - this disables
3311 * interrupts and clears linkEnable and LPS. */
3312 ohci_soft_reset(ohci
);
3313 free_irq(dev
->irq
, ohci
);
3315 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE
:
3316 /* The ohci_soft_reset() stops all DMA contexts, so we
3317 * dont need to do this. */
3318 free_dma_rcv_ctx(&ohci
->ar_req_context
);
3319 free_dma_rcv_ctx(&ohci
->ar_resp_context
);
3320 free_dma_trm_ctx(&ohci
->at_req_context
);
3321 free_dma_trm_ctx(&ohci
->at_resp_context
);
3323 case OHCI_INIT_HAVE_SELFID_BUFFER
:
3324 pci_free_consistent(dev
, OHCI1394_SI_DMA_BUF_SIZE
,
3325 ohci
->selfid_buf_cpu
,
3326 ohci
->selfid_buf_bus
);
3328 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER
:
3329 pci_free_consistent(dev
, OHCI_CONFIG_ROM_LEN
,
3330 ohci
->csr_config_rom_cpu
,
3331 ohci
->csr_config_rom_bus
);
3333 case OHCI_INIT_HAVE_IOMAPPING
:
3334 iounmap(ohci
->registers
);
3336 case OHCI_INIT_HAVE_MEM_REGION
:
3337 release_mem_region(pci_resource_start(dev
, 0),
3338 OHCI1394_REGISTER_SIZE
);
3340 case OHCI_INIT_ALLOC_HOST
:
3341 pci_set_drvdata(dev
, NULL
);
3347 ohci1394_pmac_off(dev
);
3351 static int ohci1394_pci_suspend(struct pci_dev
*dev
, pm_message_t state
)
3354 struct ti_ohci
*ohci
= pci_get_drvdata(dev
);
3357 printk(KERN_ERR
"%s: tried to suspend nonexisting host\n",
3358 OHCI1394_DRIVER_NAME
);
3361 DBGMSG("suspend called");
3363 /* Clear the async DMA contexts and stop using the controller */
3364 hpsb_bus_reset(ohci
->host
);
3366 /* See ohci1394_pci_remove() for comments on this sequence */
3367 reg_write(ohci
, OHCI1394_ConfigROMhdr
, 0);
3368 reg_write(ohci
, OHCI1394_BusOptions
,
3369 (reg_read(ohci
, OHCI1394_BusOptions
) & 0x0000f007) |
3371 reg_write(ohci
, OHCI1394_IntMaskClear
, 0xffffffff);
3372 reg_write(ohci
, OHCI1394_IntEventClear
, 0xffffffff);
3373 reg_write(ohci
, OHCI1394_IsoXmitIntMaskClear
, 0xffffffff);
3374 reg_write(ohci
, OHCI1394_IsoXmitIntEventClear
, 0xffffffff);
3375 reg_write(ohci
, OHCI1394_IsoRecvIntMaskClear
, 0xffffffff);
3376 reg_write(ohci
, OHCI1394_IsoRecvIntEventClear
, 0xffffffff);
3377 set_phy_reg(ohci
, 4, ~0xc0 & get_phy_reg(ohci
, 4));
3378 reg_write(ohci
, OHCI1394_LinkControlClear
, 0xffffffff);
3379 ohci_devctl(ohci
->host
, RESET_BUS
, LONG_RESET_NO_FORCE_ROOT
);
3380 ohci_soft_reset(ohci
);
3382 free_irq(dev
->irq
, ohci
);
3383 err
= pci_save_state(dev
);
3385 PRINT(KERN_ERR
, "pci_save_state failed with %d", err
);
3388 err
= pci_set_power_state(dev
, pci_choose_state(dev
, state
));
3390 DBGMSG("pci_set_power_state failed with %d", err
);
3391 ohci1394_pmac_off(dev
);
3396 static int ohci1394_pci_resume(struct pci_dev
*dev
)
3399 struct ti_ohci
*ohci
= pci_get_drvdata(dev
);
3402 printk(KERN_ERR
"%s: tried to resume nonexisting host\n",
3403 OHCI1394_DRIVER_NAME
);
3406 DBGMSG("resume called");
3408 ohci1394_pmac_on(dev
);
3409 pci_set_power_state(dev
, PCI_D0
);
3410 pci_restore_state(dev
);
3411 err
= pci_enable_device(dev
);
3413 PRINT(KERN_ERR
, "pci_enable_device failed with %d", err
);
3417 /* See ohci1394_pci_probe() for comments on this sequence */
3418 ohci_soft_reset(ohci
);
3419 reg_write(ohci
, OHCI1394_HCControlSet
, OHCI1394_HCControl_LPS
);
3420 reg_write(ohci
, OHCI1394_IntEventClear
, 0xffffffff);
3421 reg_write(ohci
, OHCI1394_IntMaskClear
, 0xffffffff);
3422 reg_read(ohci
, OHCI1394_Version
);
3425 err
= request_irq(dev
->irq
, ohci_irq_handler
, IRQF_SHARED
,
3426 OHCI1394_DRIVER_NAME
, ohci
);
3428 PRINT_G(KERN_ERR
, "Failed to allocate interrupt %d", dev
->irq
);
3432 ohci_initialize(ohci
);
3434 hpsb_resume_host(ohci
->host
);
3437 #endif /* CONFIG_PM */
3439 static struct pci_device_id ohci1394_pci_tbl
[] = {
3441 .class = PCI_CLASS_SERIAL_FIREWIRE_OHCI
,
3442 .class_mask
= PCI_ANY_ID
,
3443 .vendor
= PCI_ANY_ID
,
3444 .device
= PCI_ANY_ID
,
3445 .subvendor
= PCI_ANY_ID
,
3446 .subdevice
= PCI_ANY_ID
,
3451 MODULE_DEVICE_TABLE(pci
, ohci1394_pci_tbl
);
3453 static struct pci_driver ohci1394_pci_driver
= {
3454 .name
= OHCI1394_DRIVER_NAME
,
3455 .id_table
= ohci1394_pci_tbl
,
3456 .probe
= ohci1394_pci_probe
,
3457 .remove
= ohci1394_pci_remove
,
3459 .resume
= ohci1394_pci_resume
,
3460 .suspend
= ohci1394_pci_suspend
,
3464 /***********************************
3465 * OHCI1394 Video Interface *
3466 ***********************************/
3468 /* essentially the only purpose of this code is to allow another
3469 module to hook into ohci's interrupt handler */
3471 /* returns zero if successful, one if DMA context is locked up */
3472 int ohci1394_stop_context(struct ti_ohci
*ohci
, int reg
, char *msg
)
3476 /* stop the channel program if it's still running */
3477 reg_write(ohci
, reg
, 0x8000);
3479 /* Wait until it effectively stops */
3480 while (reg_read(ohci
, reg
) & 0x400) {
3484 "Runaway loop while stopping context: %s...", msg
? msg
: "");
3491 if (msg
) PRINT(KERN_ERR
, "%s: dma prg stopped", msg
);
3495 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet
*tasklet
, int type
,
3496 void (*func
)(unsigned long), unsigned long data
)
3498 tasklet_init(&tasklet
->tasklet
, func
, data
);
3499 tasklet
->type
= type
;
3500 /* We init the tasklet->link field, so we can list_del() it
3501 * without worrying whether it was added to the list or not. */
3502 INIT_LIST_HEAD(&tasklet
->link
);
3505 int ohci1394_register_iso_tasklet(struct ti_ohci
*ohci
,
3506 struct ohci1394_iso_tasklet
*tasklet
)
3508 unsigned long flags
, *usage
;
3509 int n
, i
, r
= -EBUSY
;
3511 if (tasklet
->type
== OHCI_ISO_TRANSMIT
) {
3512 n
= ohci
->nb_iso_xmit_ctx
;
3513 usage
= &ohci
->it_ctx_usage
;
3516 n
= ohci
->nb_iso_rcv_ctx
;
3517 usage
= &ohci
->ir_ctx_usage
;
3519 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3520 if (tasklet
->type
== OHCI_ISO_MULTICHANNEL_RECEIVE
) {
3521 if (test_and_set_bit(0, &ohci
->ir_multichannel_used
)) {
3527 spin_lock_irqsave(&ohci
->iso_tasklet_list_lock
, flags
);
3529 for (i
= 0; i
< n
; i
++)
3530 if (!test_and_set_bit(i
, usage
)) {
3531 tasklet
->context
= i
;
3532 list_add_tail(&tasklet
->link
, &ohci
->iso_tasklet_list
);
3537 spin_unlock_irqrestore(&ohci
->iso_tasklet_list_lock
, flags
);
3542 void ohci1394_unregister_iso_tasklet(struct ti_ohci
*ohci
,
3543 struct ohci1394_iso_tasklet
*tasklet
)
3545 unsigned long flags
;
3547 tasklet_kill(&tasklet
->tasklet
);
3549 spin_lock_irqsave(&ohci
->iso_tasklet_list_lock
, flags
);
3551 if (tasklet
->type
== OHCI_ISO_TRANSMIT
)
3552 clear_bit(tasklet
->context
, &ohci
->it_ctx_usage
);
3554 clear_bit(tasklet
->context
, &ohci
->ir_ctx_usage
);
3556 if (tasklet
->type
== OHCI_ISO_MULTICHANNEL_RECEIVE
) {
3557 clear_bit(0, &ohci
->ir_multichannel_used
);
3561 list_del(&tasklet
->link
);
3563 spin_unlock_irqrestore(&ohci
->iso_tasklet_list_lock
, flags
);
3566 EXPORT_SYMBOL(ohci1394_stop_context
);
3567 EXPORT_SYMBOL(ohci1394_init_iso_tasklet
);
3568 EXPORT_SYMBOL(ohci1394_register_iso_tasklet
);
3569 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet
);
3571 /***********************************
3572 * General module initialization *
3573 ***********************************/
3575 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3576 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3577 MODULE_LICENSE("GPL");
3579 static void __exit
ohci1394_cleanup (void)
3581 pci_unregister_driver(&ohci1394_pci_driver
);
3584 static int __init
ohci1394_init(void)
3586 return pci_register_driver(&ohci1394_pci_driver
);
3589 module_init(ohci1394_init
);
3590 module_exit(ohci1394_cleanup
);