1 /* -*- c-basic-offset: 8 -*-
3 * fw-ohci.c - Driver for OHCI 1394 boards
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include <linux/poll.h>
28 #include <linux/dma-mapping.h>
30 #include <asm/uaccess.h>
31 #include <asm/semaphore.h>
33 #include "fw-transaction.h"
36 #define descriptor_output_more 0
37 #define descriptor_output_last (1 << 12)
38 #define descriptor_input_more (2 << 12)
39 #define descriptor_input_last (3 << 12)
40 #define descriptor_status (1 << 11)
41 #define descriptor_key_immediate (2 << 8)
42 #define descriptor_ping (1 << 7)
43 #define descriptor_yy (1 << 6)
44 #define descriptor_no_irq (0 << 4)
45 #define descriptor_irq_error (1 << 4)
46 #define descriptor_irq_always (3 << 4)
47 #define descriptor_branch_always (3 << 2)
48 #define descriptor_wait (3 << 0)
54 __le32 branch_address
;
56 __le16 transfer_status
;
57 } __attribute__((aligned(16)));
59 struct db_descriptor
{
62 __le16 second_req_count
;
63 __le16 first_req_count
;
64 __le32 branch_address
;
65 __le16 second_res_count
;
66 __le16 first_res_count
;
71 } __attribute__((aligned(16)));
73 #define control_set(regs) (regs)
74 #define control_clear(regs) ((regs) + 4)
75 #define command_ptr(regs) ((regs) + 12)
76 #define context_match(regs) ((regs) + 16)
79 struct descriptor descriptor
;
80 struct ar_buffer
*next
;
86 struct ar_buffer
*current_buffer
;
87 struct ar_buffer
*last_buffer
;
90 struct tasklet_struct tasklet
;
95 typedef int (*descriptor_callback_t
)(struct context
*ctx
,
97 struct descriptor
*last
);
102 struct descriptor
*buffer
;
103 dma_addr_t buffer_bus
;
105 struct descriptor
*head_descriptor
;
106 struct descriptor
*tail_descriptor
;
107 struct descriptor
*tail_descriptor_last
;
108 struct descriptor
*prev_descriptor
;
110 descriptor_callback_t callback
;
112 struct tasklet_struct tasklet
;
118 struct fw_ohci
*ohci
;
119 dma_addr_t descriptor_bus
;
120 dma_addr_t buffer_bus
;
121 struct fw_packet
*current_packet
;
123 struct list_head list
;
126 struct descriptor more
;
128 struct descriptor last
;
133 struct tasklet_struct tasklet
;
136 #define it_header_sy(v) ((v) << 0)
137 #define it_header_tcode(v) ((v) << 4)
138 #define it_header_channel(v) ((v) << 8)
139 #define it_header_tag(v) ((v) << 14)
140 #define it_header_speed(v) ((v) << 16)
141 #define it_header_data_length(v) ((v) << 16)
144 struct fw_iso_context base
;
145 struct context context
;
147 size_t header_length
;
150 #define CONFIG_ROM_SIZE 1024
155 __iomem
char *registers
;
156 dma_addr_t self_id_bus
;
158 struct tasklet_struct bus_reset_tasklet
;
161 int request_generation
;
163 /* Spinlock for accessing fw_ohci data. Never call out of
164 * this driver with this lock held. */
166 u32 self_id_buffer
[512];
168 /* Config rom buffers */
170 dma_addr_t config_rom_bus
;
171 __be32
*next_config_rom
;
172 dma_addr_t next_config_rom_bus
;
175 struct ar_context ar_request_ctx
;
176 struct ar_context ar_response_ctx
;
177 struct at_context at_request_ctx
;
178 struct at_context at_response_ctx
;
181 struct iso_context
*it_context_list
;
183 struct iso_context
*ir_context_list
;
186 static inline struct fw_ohci
*fw_ohci(struct fw_card
*card
)
188 return container_of(card
, struct fw_ohci
, card
);
191 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
192 #define IR_CONTEXT_BUFFER_FILL 0x80000000
193 #define IR_CONTEXT_ISOCH_HEADER 0x40000000
194 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
195 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
196 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
198 #define CONTEXT_RUN 0x8000
199 #define CONTEXT_WAKE 0x1000
200 #define CONTEXT_DEAD 0x0800
201 #define CONTEXT_ACTIVE 0x0400
203 #define OHCI1394_MAX_AT_REQ_RETRIES 0x2
204 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
205 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
207 #define FW_OHCI_MAJOR 240
208 #define OHCI1394_REGISTER_SIZE 0x800
209 #define OHCI_LOOP_COUNT 500
210 #define OHCI1394_PCI_HCI_Control 0x40
211 #define SELF_ID_BUF_SIZE 0x800
212 #define OHCI_TCODE_PHY_PACKET 0x0e
214 static char ohci_driver_name
[] = KBUILD_MODNAME
;
216 static inline void reg_write(const struct fw_ohci
*ohci
, int offset
, u32 data
)
218 writel(data
, ohci
->registers
+ offset
);
221 static inline u32
reg_read(const struct fw_ohci
*ohci
, int offset
)
223 return readl(ohci
->registers
+ offset
);
226 static inline void flush_writes(const struct fw_ohci
*ohci
)
228 /* Do a dummy read to flush writes. */
229 reg_read(ohci
, OHCI1394_Version
);
233 ohci_update_phy_reg(struct fw_card
*card
, int addr
,
234 int clear_bits
, int set_bits
)
236 struct fw_ohci
*ohci
= fw_ohci(card
);
239 reg_write(ohci
, OHCI1394_PhyControl
, OHCI1394_PhyControl_Read(addr
));
241 val
= reg_read(ohci
, OHCI1394_PhyControl
);
242 if ((val
& OHCI1394_PhyControl_ReadDone
) == 0) {
243 fw_error("failed to set phy reg bits.\n");
247 old
= OHCI1394_PhyControl_ReadData(val
);
248 old
= (old
& ~clear_bits
) | set_bits
;
249 reg_write(ohci
, OHCI1394_PhyControl
,
250 OHCI1394_PhyControl_Write(addr
, old
));
255 static int ar_context_add_page(struct ar_context
*ctx
)
257 struct device
*dev
= ctx
->ohci
->card
.device
;
258 struct ar_buffer
*ab
;
262 ab
= (struct ar_buffer
*) __get_free_page(GFP_ATOMIC
);
266 ab_bus
= dma_map_single(dev
, ab
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
267 if (dma_mapping_error(ab_bus
)) {
268 free_page((unsigned long) ab
);
272 memset(&ab
->descriptor
, 0, sizeof ab
->descriptor
);
273 ab
->descriptor
.control
= cpu_to_le16(descriptor_input_more
|
275 descriptor_branch_always
);
276 offset
= offsetof(struct ar_buffer
, data
);
277 ab
->descriptor
.req_count
= cpu_to_le16(PAGE_SIZE
- offset
);
278 ab
->descriptor
.data_address
= cpu_to_le32(ab_bus
+ offset
);
279 ab
->descriptor
.res_count
= cpu_to_le16(PAGE_SIZE
- offset
);
280 ab
->descriptor
.branch_address
= 0;
282 dma_sync_single_for_device(dev
, ab_bus
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
284 ctx
->last_buffer
->descriptor
.branch_address
= ab_bus
| 1;
285 ctx
->last_buffer
->next
= ab
;
286 ctx
->last_buffer
= ab
;
288 reg_write(ctx
->ohci
, control_set(ctx
->regs
), CONTEXT_WAKE
);
289 flush_writes(ctx
->ohci
);
294 static __le32
*handle_ar_packet(struct ar_context
*ctx
, __le32
*buffer
)
296 struct fw_ohci
*ohci
= ctx
->ohci
;
298 u32 status
, length
, tcode
;
300 p
.header
[0] = le32_to_cpu(buffer
[0]);
301 p
.header
[1] = le32_to_cpu(buffer
[1]);
302 p
.header
[2] = le32_to_cpu(buffer
[2]);
304 tcode
= (p
.header
[0] >> 4) & 0x0f;
306 case TCODE_WRITE_QUADLET_REQUEST
:
307 case TCODE_READ_QUADLET_RESPONSE
:
308 p
.header
[3] = (__force __u32
) buffer
[3];
309 p
.header_length
= 16;
310 p
.payload_length
= 0;
313 case TCODE_READ_BLOCK_REQUEST
:
314 p
.header
[3] = le32_to_cpu(buffer
[3]);
315 p
.header_length
= 16;
316 p
.payload_length
= 0;
319 case TCODE_WRITE_BLOCK_REQUEST
:
320 case TCODE_READ_BLOCK_RESPONSE
:
321 case TCODE_LOCK_REQUEST
:
322 case TCODE_LOCK_RESPONSE
:
323 p
.header
[3] = le32_to_cpu(buffer
[3]);
324 p
.header_length
= 16;
325 p
.payload_length
= p
.header
[3] >> 16;
328 case TCODE_WRITE_RESPONSE
:
329 case TCODE_READ_QUADLET_REQUEST
:
330 case OHCI_TCODE_PHY_PACKET
:
331 p
.header_length
= 12;
332 p
.payload_length
= 0;
336 p
.payload
= (void *) buffer
+ p
.header_length
;
338 /* FIXME: What to do about evt_* errors? */
339 length
= (p
.header_length
+ p
.payload_length
+ 3) / 4;
340 status
= le32_to_cpu(buffer
[length
]);
342 p
.ack
= ((status
>> 16) & 0x1f) - 16;
343 p
.speed
= (status
>> 21) & 0x7;
344 p
.timestamp
= status
& 0xffff;
345 p
.generation
= ohci
->request_generation
;
347 /* The OHCI bus reset handler synthesizes a phy packet with
348 * the new generation number when a bus reset happens (see
349 * section 8.4.2.3). This helps us determine when a request
350 * was received and make sure we send the response in the same
351 * generation. We only need this for requests; for responses
352 * we use the unique tlabel for finding the matching
355 if (p
.ack
+ 16 == 0x09)
356 ohci
->request_generation
= (buffer
[2] >> 16) & 0xff;
357 else if (ctx
== &ohci
->ar_request_ctx
)
358 fw_core_handle_request(&ohci
->card
, &p
);
360 fw_core_handle_response(&ohci
->card
, &p
);
362 return buffer
+ length
+ 1;
365 static void ar_context_tasklet(unsigned long data
)
367 struct ar_context
*ctx
= (struct ar_context
*)data
;
368 struct fw_ohci
*ohci
= ctx
->ohci
;
369 struct ar_buffer
*ab
;
370 struct descriptor
*d
;
373 ab
= ctx
->current_buffer
;
376 if (d
->res_count
== 0) {
377 size_t size
, rest
, offset
;
379 /* This descriptor is finished and we may have a
380 * packet split across this and the next buffer. We
381 * reuse the page for reassembling the split packet. */
383 offset
= offsetof(struct ar_buffer
, data
);
384 dma_unmap_single(ohci
->card
.device
,
385 ab
->descriptor
.data_address
- offset
,
386 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
391 size
= buffer
+ PAGE_SIZE
- ctx
->pointer
;
392 rest
= le16_to_cpu(d
->req_count
) - le16_to_cpu(d
->res_count
);
393 memmove(buffer
, ctx
->pointer
, size
);
394 memcpy(buffer
+ size
, ab
->data
, rest
);
395 ctx
->current_buffer
= ab
;
396 ctx
->pointer
= (void *) ab
->data
+ rest
;
397 end
= buffer
+ size
+ rest
;
400 buffer
= handle_ar_packet(ctx
, buffer
);
402 free_page((unsigned long)buffer
);
403 ar_context_add_page(ctx
);
405 buffer
= ctx
->pointer
;
407 (void *) ab
+ PAGE_SIZE
- le16_to_cpu(d
->res_count
);
410 buffer
= handle_ar_packet(ctx
, buffer
);
415 ar_context_init(struct ar_context
*ctx
, struct fw_ohci
*ohci
, u32 regs
)
421 ctx
->last_buffer
= &ab
;
422 tasklet_init(&ctx
->tasklet
, ar_context_tasklet
, (unsigned long)ctx
);
424 ar_context_add_page(ctx
);
425 ar_context_add_page(ctx
);
426 ctx
->current_buffer
= ab
.next
;
427 ctx
->pointer
= ctx
->current_buffer
->data
;
429 reg_write(ctx
->ohci
, command_ptr(ctx
->regs
), ab
.descriptor
.branch_address
);
430 reg_write(ctx
->ohci
, control_set(ctx
->regs
), CONTEXT_RUN
);
431 flush_writes(ctx
->ohci
);
436 static void context_tasklet(unsigned long data
)
438 struct context
*ctx
= (struct context
*) data
;
439 struct fw_ohci
*ohci
= ctx
->ohci
;
440 struct descriptor
*d
, *last
;
444 dma_sync_single_for_cpu(ohci
->card
.device
, ctx
->buffer_bus
,
445 ctx
->buffer_size
, DMA_TO_DEVICE
);
447 d
= ctx
->tail_descriptor
;
448 last
= ctx
->tail_descriptor_last
;
450 while (last
->branch_address
!= 0) {
451 address
= le32_to_cpu(last
->branch_address
);
453 d
= ctx
->buffer
+ (address
- ctx
->buffer_bus
) / sizeof *d
;
454 last
= (z
== 2) ? d
: d
+ z
- 1;
456 if (!ctx
->callback(ctx
, d
, last
))
459 ctx
->tail_descriptor
= d
;
460 ctx
->tail_descriptor_last
= last
;
465 context_init(struct context
*ctx
, struct fw_ohci
*ohci
,
466 size_t buffer_size
, u32 regs
,
467 descriptor_callback_t callback
)
471 ctx
->buffer_size
= buffer_size
;
472 ctx
->buffer
= kmalloc(buffer_size
, GFP_KERNEL
);
473 if (ctx
->buffer
== NULL
)
476 tasklet_init(&ctx
->tasklet
, context_tasklet
, (unsigned long)ctx
);
477 ctx
->callback
= callback
;
480 dma_map_single(ohci
->card
.device
, ctx
->buffer
,
481 buffer_size
, DMA_TO_DEVICE
);
482 if (dma_mapping_error(ctx
->buffer_bus
)) {
487 ctx
->head_descriptor
= ctx
->buffer
;
488 ctx
->prev_descriptor
= ctx
->buffer
;
489 ctx
->tail_descriptor
= ctx
->buffer
;
490 ctx
->tail_descriptor_last
= ctx
->buffer
;
492 /* We put a dummy descriptor in the buffer that has a NULL
493 * branch address and looks like it's been sent. That way we
494 * have a descriptor to append DMA programs to. Also, the
495 * ring buffer invariant is that it always has at least one
496 * element so that head == tail means buffer full. */
498 memset(ctx
->head_descriptor
, 0, sizeof *ctx
->head_descriptor
);
499 ctx
->head_descriptor
->control
= cpu_to_le16(descriptor_output_last
);
500 ctx
->head_descriptor
->transfer_status
= cpu_to_le16(0x8011);
501 ctx
->head_descriptor
++;
507 context_release(struct context
*ctx
)
509 struct fw_card
*card
= &ctx
->ohci
->card
;
511 dma_unmap_single(card
->device
, ctx
->buffer_bus
,
512 ctx
->buffer_size
, DMA_TO_DEVICE
);
516 static struct descriptor
*
517 context_get_descriptors(struct context
*ctx
, int z
, dma_addr_t
*d_bus
)
519 struct descriptor
*d
, *tail
, *end
;
521 d
= ctx
->head_descriptor
;
522 tail
= ctx
->tail_descriptor
;
523 end
= ctx
->buffer
+ ctx
->buffer_size
/ sizeof(struct descriptor
);
527 } else if (d
> tail
&& d
+ z
<= end
) {
529 } else if (d
> tail
&& ctx
->buffer
+ z
<= tail
) {
537 memset(d
, 0, z
* sizeof *d
);
538 *d_bus
= ctx
->buffer_bus
+ (d
- ctx
->buffer
) * sizeof *d
;
543 static void context_run(struct context
*ctx
, u32 extra
)
545 struct fw_ohci
*ohci
= ctx
->ohci
;
547 reg_write(ohci
, command_ptr(ctx
->regs
),
548 le32_to_cpu(ctx
->tail_descriptor_last
->branch_address
));
549 reg_write(ohci
, control_clear(ctx
->regs
), ~0);
550 reg_write(ohci
, control_set(ctx
->regs
), CONTEXT_RUN
| extra
);
554 static void context_append(struct context
*ctx
,
555 struct descriptor
*d
, int z
, int extra
)
559 d_bus
= ctx
->buffer_bus
+ (d
- ctx
->buffer
) * sizeof *d
;
561 ctx
->head_descriptor
= d
+ z
+ extra
;
562 ctx
->prev_descriptor
->branch_address
= cpu_to_le32(d_bus
| z
);
563 ctx
->prev_descriptor
= z
== 2 ? d
: d
+ z
- 1;
565 dma_sync_single_for_device(ctx
->ohci
->card
.device
, ctx
->buffer_bus
,
566 ctx
->buffer_size
, DMA_TO_DEVICE
);
568 reg_write(ctx
->ohci
, control_set(ctx
->regs
), CONTEXT_WAKE
);
569 flush_writes(ctx
->ohci
);
572 static void context_stop(struct context
*ctx
)
577 reg_write(ctx
->ohci
, control_clear(ctx
->regs
), CONTEXT_RUN
);
578 flush_writes(ctx
->ohci
);
580 for (i
= 0; i
< 10; i
++) {
581 reg
= reg_read(ctx
->ohci
, control_set(ctx
->regs
));
582 if ((reg
& CONTEXT_ACTIVE
) == 0)
585 fw_notify("context_stop: still active (0x%08x)\n", reg
);
591 do_packet_callbacks(struct fw_ohci
*ohci
, struct list_head
*list
)
593 struct fw_packet
*p
, *next
;
595 list_for_each_entry_safe(p
, next
, list
, link
)
596 p
->callback(p
, &ohci
->card
, p
->ack
);
600 complete_transmission(struct fw_packet
*packet
,
601 int ack
, struct list_head
*list
)
603 list_move_tail(&packet
->link
, list
);
607 /* This function prepares the first packet in the context queue for
608 * transmission. Must always be called with the ochi->lock held to
609 * ensure proper generation handling and locking around packet queue
612 at_context_setup_packet(struct at_context
*ctx
, struct list_head
*list
)
614 struct fw_packet
*packet
;
615 struct fw_ohci
*ohci
= ctx
->ohci
;
618 packet
= fw_packet(ctx
->list
.next
);
620 memset(&ctx
->d
, 0, sizeof ctx
->d
);
621 if (packet
->payload_length
> 0) {
622 packet
->payload_bus
= dma_map_single(ohci
->card
.device
,
624 packet
->payload_length
,
626 if (dma_mapping_error(packet
->payload_bus
)) {
627 complete_transmission(packet
, RCODE_SEND_ERROR
, list
);
631 ctx
->d
.more
.control
=
632 cpu_to_le16(descriptor_output_more
|
633 descriptor_key_immediate
);
634 ctx
->d
.more
.req_count
= cpu_to_le16(packet
->header_length
);
635 ctx
->d
.more
.res_count
= cpu_to_le16(packet
->timestamp
);
636 ctx
->d
.last
.control
=
637 cpu_to_le16(descriptor_output_last
|
638 descriptor_irq_always
|
639 descriptor_branch_always
);
640 ctx
->d
.last
.req_count
= cpu_to_le16(packet
->payload_length
);
641 ctx
->d
.last
.data_address
= cpu_to_le32(packet
->payload_bus
);
644 ctx
->d
.more
.control
=
645 cpu_to_le16(descriptor_output_last
|
646 descriptor_key_immediate
|
647 descriptor_irq_always
|
648 descriptor_branch_always
);
649 ctx
->d
.more
.req_count
= cpu_to_le16(packet
->header_length
);
650 ctx
->d
.more
.res_count
= cpu_to_le16(packet
->timestamp
);
654 /* The DMA format for asyncronous link packets is different
655 * from the IEEE1394 layout, so shift the fields around
656 * accordingly. If header_length is 8, it's a PHY packet, to
657 * which we need to prepend an extra quadlet. */
658 if (packet
->header_length
> 8) {
659 ctx
->d
.header
[0] = cpu_to_le32((packet
->header
[0] & 0xffff) |
660 (packet
->speed
<< 16));
661 ctx
->d
.header
[1] = cpu_to_le32((packet
->header
[1] & 0xffff) |
662 (packet
->header
[0] & 0xffff0000));
663 ctx
->d
.header
[2] = cpu_to_le32(packet
->header
[2]);
665 tcode
= (packet
->header
[0] >> 4) & 0x0f;
666 if (TCODE_IS_BLOCK_PACKET(tcode
))
667 ctx
->d
.header
[3] = cpu_to_le32(packet
->header
[3]);
669 ctx
->d
.header
[3] = packet
->header
[3];
672 cpu_to_le32((OHCI1394_phy_tcode
<< 4) |
673 (packet
->speed
<< 16));
674 ctx
->d
.header
[1] = cpu_to_le32(packet
->header
[0]);
675 ctx
->d
.header
[2] = cpu_to_le32(packet
->header
[1]);
676 ctx
->d
.more
.req_count
= cpu_to_le16(12);
679 /* FIXME: Document how the locking works. */
680 if (ohci
->generation
== packet
->generation
) {
681 reg_write(ctx
->ohci
, command_ptr(ctx
->regs
),
682 ctx
->descriptor_bus
| z
);
683 reg_write(ctx
->ohci
, control_set(ctx
->regs
),
684 CONTEXT_RUN
| CONTEXT_WAKE
);
685 ctx
->current_packet
= packet
;
687 /* We dont return error codes from this function; all
688 * transmission errors are reported through the
690 complete_transmission(packet
, RCODE_GENERATION
, list
);
694 static void at_context_stop(struct at_context
*ctx
)
698 reg_write(ctx
->ohci
, control_clear(ctx
->regs
), CONTEXT_RUN
);
700 reg
= reg_read(ctx
->ohci
, control_set(ctx
->regs
));
701 if (reg
& CONTEXT_ACTIVE
)
702 fw_notify("Tried to stop context, but it is still active "
706 static void at_context_tasklet(unsigned long data
)
708 struct at_context
*ctx
= (struct at_context
*)data
;
709 struct fw_ohci
*ohci
= ctx
->ohci
;
710 struct fw_packet
*packet
;
715 spin_lock_irqsave(&ohci
->lock
, flags
);
717 packet
= fw_packet(ctx
->list
.next
);
719 at_context_stop(ctx
);
721 /* If the head of the list isn't the packet that just got
722 * transmitted, the packet got cancelled before we finished
723 * transmitting it. */
724 if (ctx
->current_packet
!= packet
)
727 if (packet
->payload_length
> 0) {
728 dma_unmap_single(ohci
->card
.device
, packet
->payload_bus
,
729 packet
->payload_length
, DMA_TO_DEVICE
);
730 evt
= le16_to_cpu(ctx
->d
.last
.transfer_status
) & 0x1f;
731 packet
->timestamp
= le16_to_cpu(ctx
->d
.last
.res_count
);
734 evt
= le16_to_cpu(ctx
->d
.more
.transfer_status
) & 0x1f;
735 packet
->timestamp
= le16_to_cpu(ctx
->d
.more
.res_count
);
740 case OHCI1394_evt_timeout
:
741 /* Async response transmit timed out. */
742 complete_transmission(packet
, RCODE_CANCELLED
, &list
);
745 case OHCI1394_evt_flushed
:
746 /* The packet was flushed should give same
747 * error as when we try to use a stale
748 * generation count. */
749 complete_transmission(packet
,
750 RCODE_GENERATION
, &list
);
753 case OHCI1394_evt_missing_ack
:
754 /* Using a valid (current) generation count,
755 * but the node is not on the bus or not
757 complete_transmission(packet
, RCODE_NO_ACK
, &list
);
761 complete_transmission(packet
, RCODE_SEND_ERROR
, &list
);
765 complete_transmission(packet
, evt
- 16, &list
);
768 /* If more packets are queued, set up the next one. */
769 if (!list_empty(&ctx
->list
))
770 at_context_setup_packet(ctx
, &list
);
772 spin_unlock_irqrestore(&ohci
->lock
, flags
);
774 do_packet_callbacks(ohci
, &list
);
778 at_context_init(struct at_context
*ctx
, struct fw_ohci
*ohci
, u32 regs
)
780 INIT_LIST_HEAD(&ctx
->list
);
782 ctx
->descriptor_bus
=
783 dma_map_single(ohci
->card
.device
, &ctx
->d
,
784 sizeof ctx
->d
, DMA_TO_DEVICE
);
785 if (dma_mapping_error(ctx
->descriptor_bus
))
791 tasklet_init(&ctx
->tasklet
, at_context_tasklet
, (unsigned long)ctx
);
796 #define header_get_destination(q) (((q) >> 16) & 0xffff)
797 #define header_get_tcode(q) (((q) >> 4) & 0x0f)
798 #define header_get_offset_high(q) (((q) >> 0) & 0xffff)
799 #define header_get_data_length(q) (((q) >> 16) & 0xffff)
800 #define header_get_extended_tcode(q) (((q) >> 0) & 0xffff)
803 handle_local_rom(struct fw_ohci
*ohci
, struct fw_packet
*packet
, u32 csr
)
805 struct fw_packet response
;
806 int tcode
, length
, i
;
808 tcode
= header_get_tcode(packet
->header
[0]);
809 if (TCODE_IS_BLOCK_PACKET(tcode
))
810 length
= header_get_data_length(packet
->header
[3]);
814 i
= csr
- CSR_CONFIG_ROM
;
815 if (i
+ length
> CONFIG_ROM_SIZE
) {
816 fw_fill_response(&response
, packet
->header
,
817 RCODE_ADDRESS_ERROR
, NULL
, 0);
818 } else if (!TCODE_IS_READ_REQUEST(tcode
)) {
819 fw_fill_response(&response
, packet
->header
,
820 RCODE_TYPE_ERROR
, NULL
, 0);
822 fw_fill_response(&response
, packet
->header
, RCODE_COMPLETE
,
823 (void *) ohci
->config_rom
+ i
, length
);
826 fw_core_handle_response(&ohci
->card
, &response
);
830 handle_local_lock(struct fw_ohci
*ohci
, struct fw_packet
*packet
, u32 csr
)
832 struct fw_packet response
;
833 int tcode
, length
, ext_tcode
, sel
;
834 __be32
*payload
, lock_old
;
835 u32 lock_arg
, lock_data
;
837 tcode
= header_get_tcode(packet
->header
[0]);
838 length
= header_get_data_length(packet
->header
[3]);
839 payload
= packet
->payload
;
840 ext_tcode
= header_get_extended_tcode(packet
->header
[3]);
842 if (tcode
== TCODE_LOCK_REQUEST
&&
843 ext_tcode
== EXTCODE_COMPARE_SWAP
&& length
== 8) {
844 lock_arg
= be32_to_cpu(payload
[0]);
845 lock_data
= be32_to_cpu(payload
[1]);
846 } else if (tcode
== TCODE_READ_QUADLET_REQUEST
) {
850 fw_fill_response(&response
, packet
->header
,
851 RCODE_TYPE_ERROR
, NULL
, 0);
855 sel
= (csr
- CSR_BUS_MANAGER_ID
) / 4;
856 reg_write(ohci
, OHCI1394_CSRData
, lock_data
);
857 reg_write(ohci
, OHCI1394_CSRCompareData
, lock_arg
);
858 reg_write(ohci
, OHCI1394_CSRControl
, sel
);
860 if (reg_read(ohci
, OHCI1394_CSRControl
) & 0x80000000)
861 lock_old
= cpu_to_be32(reg_read(ohci
, OHCI1394_CSRData
));
863 fw_notify("swap not done yet\n");
865 fw_fill_response(&response
, packet
->header
,
866 RCODE_COMPLETE
, &lock_old
, sizeof lock_old
);
868 fw_core_handle_response(&ohci
->card
, &response
);
872 handle_local_request(struct at_context
*ctx
, struct fw_packet
*packet
)
877 packet
->ack
= ACK_PENDING
;
878 packet
->callback(packet
, &ctx
->ohci
->card
, packet
->ack
);
881 ((unsigned long long)
882 header_get_offset_high(packet
->header
[1]) << 32) |
884 csr
= offset
- CSR_REGISTER_BASE
;
886 /* Handle config rom reads. */
887 if (csr
>= CSR_CONFIG_ROM
&& csr
< CSR_CONFIG_ROM_END
)
888 handle_local_rom(ctx
->ohci
, packet
, csr
);
890 case CSR_BUS_MANAGER_ID
:
891 case CSR_BANDWIDTH_AVAILABLE
:
892 case CSR_CHANNELS_AVAILABLE_HI
:
893 case CSR_CHANNELS_AVAILABLE_LO
:
894 handle_local_lock(ctx
->ohci
, packet
, csr
);
897 if (ctx
== &ctx
->ohci
->at_request_ctx
)
898 fw_core_handle_request(&ctx
->ohci
->card
, packet
);
900 fw_core_handle_response(&ctx
->ohci
->card
, packet
);
906 at_context_transmit(struct at_context
*ctx
, struct fw_packet
*packet
)
911 spin_lock_irqsave(&ctx
->ohci
->lock
, flags
);
913 if (header_get_destination(packet
->header
[0]) == ctx
->ohci
->node_id
&&
914 ctx
->ohci
->generation
== packet
->generation
) {
915 spin_unlock_irqrestore(&ctx
->ohci
->lock
, flags
);
916 handle_local_request(ctx
, packet
);
920 list_add_tail(&packet
->link
, &ctx
->list
);
921 if (ctx
->list
.next
== &packet
->link
)
922 at_context_setup_packet(ctx
, &list
);
924 spin_unlock_irqrestore(&ctx
->ohci
->lock
, flags
);
926 do_packet_callbacks(ctx
->ohci
, &list
);
929 static void bus_reset_tasklet(unsigned long data
)
931 struct fw_ohci
*ohci
= (struct fw_ohci
*)data
;
932 int self_id_count
, i
, j
, reg
;
933 int generation
, new_generation
;
936 reg
= reg_read(ohci
, OHCI1394_NodeID
);
937 if (!(reg
& OHCI1394_NodeID_idValid
)) {
938 fw_error("node ID not valid, new bus reset in progress\n");
941 ohci
->node_id
= reg
& 0xffff;
943 /* The count in the SelfIDCount register is the number of
944 * bytes in the self ID receive buffer. Since we also receive
945 * the inverted quadlets and a header quadlet, we shift one
946 * bit extra to get the actual number of self IDs. */
948 self_id_count
= (reg_read(ohci
, OHCI1394_SelfIDCount
) >> 3) & 0x3ff;
949 generation
= (le32_to_cpu(ohci
->self_id_cpu
[0]) >> 16) & 0xff;
951 for (i
= 1, j
= 0; j
< self_id_count
; i
+= 2, j
++) {
952 if (ohci
->self_id_cpu
[i
] != ~ohci
->self_id_cpu
[i
+ 1])
953 fw_error("inconsistent self IDs\n");
954 ohci
->self_id_buffer
[j
] = le32_to_cpu(ohci
->self_id_cpu
[i
]);
957 /* Check the consistency of the self IDs we just read. The
958 * problem we face is that a new bus reset can start while we
959 * read out the self IDs from the DMA buffer. If this happens,
960 * the DMA buffer will be overwritten with new self IDs and we
961 * will read out inconsistent data. The OHCI specification
962 * (section 11.2) recommends a technique similar to
963 * linux/seqlock.h, where we remember the generation of the
964 * self IDs in the buffer before reading them out and compare
965 * it to the current generation after reading them out. If
966 * the two generations match we know we have a consistent set
969 new_generation
= (reg_read(ohci
, OHCI1394_SelfIDCount
) >> 16) & 0xff;
970 if (new_generation
!= generation
) {
971 fw_notify("recursive bus reset detected, "
972 "discarding self ids\n");
976 /* FIXME: Document how the locking works. */
977 spin_lock_irqsave(&ohci
->lock
, flags
);
979 ohci
->generation
= generation
;
980 at_context_stop(&ohci
->at_request_ctx
);
981 at_context_stop(&ohci
->at_response_ctx
);
982 reg_write(ohci
, OHCI1394_IntEventClear
, OHCI1394_busReset
);
984 /* This next bit is unrelated to the AT context stuff but we
985 * have to do it under the spinlock also. If a new config rom
986 * was set up before this reset, the old one is now no longer
987 * in use and we can free it. Update the config rom pointers
988 * to point to the current config rom and clear the
989 * next_config_rom pointer so a new udpate can take place. */
991 if (ohci
->next_config_rom
!= NULL
) {
992 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
993 ohci
->config_rom
, ohci
->config_rom_bus
);
994 ohci
->config_rom
= ohci
->next_config_rom
;
995 ohci
->config_rom_bus
= ohci
->next_config_rom_bus
;
996 ohci
->next_config_rom
= NULL
;
998 /* Restore config_rom image and manually update
999 * config_rom registers. Writing the header quadlet
1000 * will indicate that the config rom is ready, so we
1002 reg_write(ohci
, OHCI1394_BusOptions
,
1003 be32_to_cpu(ohci
->config_rom
[2]));
1004 ohci
->config_rom
[0] = cpu_to_be32(ohci
->next_header
);
1005 reg_write(ohci
, OHCI1394_ConfigROMhdr
, ohci
->next_header
);
1008 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1010 fw_core_handle_bus_reset(&ohci
->card
, ohci
->node_id
, generation
,
1011 self_id_count
, ohci
->self_id_buffer
);
1014 static irqreturn_t
irq_handler(int irq
, void *data
)
1016 struct fw_ohci
*ohci
= data
;
1017 u32 event
, iso_event
;
1020 event
= reg_read(ohci
, OHCI1394_IntEventClear
);
1025 reg_write(ohci
, OHCI1394_IntEventClear
, event
);
1027 if (event
& OHCI1394_selfIDComplete
)
1028 tasklet_schedule(&ohci
->bus_reset_tasklet
);
1030 if (event
& OHCI1394_RQPkt
)
1031 tasklet_schedule(&ohci
->ar_request_ctx
.tasklet
);
1033 if (event
& OHCI1394_RSPkt
)
1034 tasklet_schedule(&ohci
->ar_response_ctx
.tasklet
);
1036 if (event
& OHCI1394_reqTxComplete
)
1037 tasklet_schedule(&ohci
->at_request_ctx
.tasklet
);
1039 if (event
& OHCI1394_respTxComplete
)
1040 tasklet_schedule(&ohci
->at_response_ctx
.tasklet
);
1042 iso_event
= reg_read(ohci
, OHCI1394_IsoRecvIntEventClear
);
1043 reg_write(ohci
, OHCI1394_IsoRecvIntEventClear
, iso_event
);
1046 i
= ffs(iso_event
) - 1;
1047 tasklet_schedule(&ohci
->ir_context_list
[i
].context
.tasklet
);
1048 iso_event
&= ~(1 << i
);
1051 iso_event
= reg_read(ohci
, OHCI1394_IsoXmitIntEventClear
);
1052 reg_write(ohci
, OHCI1394_IsoXmitIntEventClear
, iso_event
);
1055 i
= ffs(iso_event
) - 1;
1056 tasklet_schedule(&ohci
->it_context_list
[i
].context
.tasklet
);
1057 iso_event
&= ~(1 << i
);
1063 static int ohci_enable(struct fw_card
*card
, u32
*config_rom
, size_t length
)
1065 struct fw_ohci
*ohci
= fw_ohci(card
);
1066 struct pci_dev
*dev
= to_pci_dev(card
->device
);
1068 /* When the link is not yet enabled, the atomic config rom
1069 * update mechanism described below in ohci_set_config_rom()
1070 * is not active. We have to update ConfigRomHeader and
1071 * BusOptions manually, and the write to ConfigROMmap takes
1072 * effect immediately. We tie this to the enabling of the
1073 * link, so we have a valid config rom before enabling - the
1074 * OHCI requires that ConfigROMhdr and BusOptions have valid
1075 * values before enabling.
1077 * However, when the ConfigROMmap is written, some controllers
1078 * always read back quadlets 0 and 2 from the config rom to
1079 * the ConfigRomHeader and BusOptions registers on bus reset.
1080 * They shouldn't do that in this initial case where the link
1081 * isn't enabled. This means we have to use the same
1082 * workaround here, setting the bus header to 0 and then write
1083 * the right values in the bus reset tasklet.
1086 ohci
->next_config_rom
=
1087 dma_alloc_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
1088 &ohci
->next_config_rom_bus
, GFP_KERNEL
);
1089 if (ohci
->next_config_rom
== NULL
)
1092 memset(ohci
->next_config_rom
, 0, CONFIG_ROM_SIZE
);
1093 fw_memcpy_to_be32(ohci
->next_config_rom
, config_rom
, length
* 4);
1095 ohci
->next_header
= config_rom
[0];
1096 ohci
->next_config_rom
[0] = 0;
1097 reg_write(ohci
, OHCI1394_ConfigROMhdr
, 0);
1098 reg_write(ohci
, OHCI1394_BusOptions
, config_rom
[2]);
1099 reg_write(ohci
, OHCI1394_ConfigROMmap
, ohci
->next_config_rom_bus
);
1101 reg_write(ohci
, OHCI1394_AsReqFilterHiSet
, 0x80000000);
1103 if (request_irq(dev
->irq
, irq_handler
,
1104 SA_SHIRQ
, ohci_driver_name
, ohci
)) {
1105 fw_error("Failed to allocate shared interrupt %d.\n",
1107 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
1108 ohci
->config_rom
, ohci
->config_rom_bus
);
1112 reg_write(ohci
, OHCI1394_HCControlSet
,
1113 OHCI1394_HCControl_linkEnable
|
1114 OHCI1394_HCControl_BIBimageValid
);
1117 /* We are ready to go, initiate bus reset to finish the
1118 * initialization. */
1120 fw_core_initiate_bus_reset(&ohci
->card
, 1);
1126 ohci_set_config_rom(struct fw_card
*card
, u32
*config_rom
, size_t length
)
1128 struct fw_ohci
*ohci
;
1129 unsigned long flags
;
1131 __be32
*next_config_rom
;
1132 dma_addr_t next_config_rom_bus
;
1134 ohci
= fw_ohci(card
);
1136 /* When the OHCI controller is enabled, the config rom update
1137 * mechanism is a bit tricky, but easy enough to use. See
1138 * section 5.5.6 in the OHCI specification.
1140 * The OHCI controller caches the new config rom address in a
1141 * shadow register (ConfigROMmapNext) and needs a bus reset
1142 * for the changes to take place. When the bus reset is
1143 * detected, the controller loads the new values for the
1144 * ConfigRomHeader and BusOptions registers from the specified
1145 * config rom and loads ConfigROMmap from the ConfigROMmapNext
1146 * shadow register. All automatically and atomically.
1148 * Now, there's a twist to this story. The automatic load of
1149 * ConfigRomHeader and BusOptions doesn't honor the
1150 * noByteSwapData bit, so with a be32 config rom, the
1151 * controller will load be32 values in to these registers
1152 * during the atomic update, even on litte endian
1153 * architectures. The workaround we use is to put a 0 in the
1154 * header quadlet; 0 is endian agnostic and means that the
1155 * config rom isn't ready yet. In the bus reset tasklet we
1156 * then set up the real values for the two registers.
1158 * We use ohci->lock to avoid racing with the code that sets
1159 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
1163 dma_alloc_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
1164 &next_config_rom_bus
, GFP_KERNEL
);
1165 if (next_config_rom
== NULL
)
1168 spin_lock_irqsave(&ohci
->lock
, flags
);
1170 if (ohci
->next_config_rom
== NULL
) {
1171 ohci
->next_config_rom
= next_config_rom
;
1172 ohci
->next_config_rom_bus
= next_config_rom_bus
;
1174 memset(ohci
->next_config_rom
, 0, CONFIG_ROM_SIZE
);
1175 fw_memcpy_to_be32(ohci
->next_config_rom
, config_rom
,
1178 ohci
->next_header
= config_rom
[0];
1179 ohci
->next_config_rom
[0] = 0;
1181 reg_write(ohci
, OHCI1394_ConfigROMmap
,
1182 ohci
->next_config_rom_bus
);
1184 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
1185 next_config_rom
, next_config_rom_bus
);
1189 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1191 /* Now initiate a bus reset to have the changes take
1192 * effect. We clean up the old config rom memory and DMA
1193 * mappings in the bus reset tasklet, since the OHCI
1194 * controller could need to access it before the bus reset
1197 fw_core_initiate_bus_reset(&ohci
->card
, 1);
1202 static void ohci_send_request(struct fw_card
*card
, struct fw_packet
*packet
)
1204 struct fw_ohci
*ohci
= fw_ohci(card
);
1206 at_context_transmit(&ohci
->at_request_ctx
, packet
);
1209 static void ohci_send_response(struct fw_card
*card
, struct fw_packet
*packet
)
1211 struct fw_ohci
*ohci
= fw_ohci(card
);
1213 at_context_transmit(&ohci
->at_response_ctx
, packet
);
1216 static int ohci_cancel_packet(struct fw_card
*card
, struct fw_packet
*packet
)
1218 struct fw_ohci
*ohci
= fw_ohci(card
);
1220 unsigned long flags
;
1222 spin_lock_irqsave(&ohci
->lock
, flags
);
1224 if (packet
->ack
== 0) {
1225 fw_notify("cancelling packet %p (header[0]=%08x)\n",
1226 packet
, packet
->header
[0]);
1228 complete_transmission(packet
, RCODE_CANCELLED
, &list
);
1231 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1233 do_packet_callbacks(ohci
, &list
);
1235 /* Return success if we actually cancelled something. */
1236 return list_empty(&list
) ? -ENOENT
: 0;
1240 ohci_enable_phys_dma(struct fw_card
*card
, int node_id
, int generation
)
1242 struct fw_ohci
*ohci
= fw_ohci(card
);
1243 unsigned long flags
;
1246 /* FIXME: Make sure this bitmask is cleared when we clear the busReset
1247 * interrupt bit. Clear physReqResourceAllBuses on bus reset. */
1249 spin_lock_irqsave(&ohci
->lock
, flags
);
1251 if (ohci
->generation
!= generation
) {
1256 /* NOTE, if the node ID contains a non-local bus ID, physical DMA is
1257 * enabled for _all_ nodes on remote buses. */
1259 n
= (node_id
& 0xffc0) == LOCAL_BUS
? node_id
& 0x3f : 63;
1261 reg_write(ohci
, OHCI1394_PhyReqFilterLoSet
, 1 << n
);
1263 reg_write(ohci
, OHCI1394_PhyReqFilterHiSet
, 1 << (n
- 32));
1267 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1271 static int handle_ir_bufferfill_packet(struct context
*context
,
1272 struct descriptor
*d
,
1273 struct descriptor
*last
)
1275 struct iso_context
*ctx
=
1276 container_of(context
, struct iso_context
, context
);
1278 if (d
->res_count
> 0)
1281 if (le16_to_cpu(last
->control
) & descriptor_irq_always
)
1282 ctx
->base
.callback(&ctx
->base
,
1283 le16_to_cpu(last
->res_count
),
1284 0, NULL
, ctx
->base
.callback_data
);
1289 static int handle_ir_dualbuffer_packet(struct context
*context
,
1290 struct descriptor
*d
,
1291 struct descriptor
*last
)
1293 struct iso_context
*ctx
=
1294 container_of(context
, struct iso_context
, context
);
1295 struct db_descriptor
*db
= (struct db_descriptor
*) d
;
1296 size_t header_length
;
1298 if (db
->first_res_count
> 0 && db
->second_res_count
> 0)
1299 /* This descriptor isn't done yet, stop iteration. */
1302 header_length
= db
->first_req_count
- db
->first_res_count
;
1303 if (ctx
->header_length
+ header_length
<= PAGE_SIZE
)
1304 memcpy(ctx
->header
+ ctx
->header_length
, db
+ 1, header_length
);
1305 ctx
->header_length
+= header_length
;
1307 if (le16_to_cpu(db
->control
) & descriptor_irq_always
) {
1308 ctx
->base
.callback(&ctx
->base
, 0,
1309 ctx
->header_length
, ctx
->header
,
1310 ctx
->base
.callback_data
);
1311 ctx
->header_length
= 0;
1317 #define ISO_BUFFER_SIZE (64 * 1024)
1319 static int handle_it_packet(struct context
*context
,
1320 struct descriptor
*d
,
1321 struct descriptor
*last
)
1323 struct iso_context
*ctx
=
1324 container_of(context
, struct iso_context
, context
);
1326 if (last
->transfer_status
== 0)
1327 /* This descriptor isn't done yet, stop iteration. */
1330 if (le16_to_cpu(last
->control
) & descriptor_irq_always
)
1331 ctx
->base
.callback(&ctx
->base
, le16_to_cpu(last
->res_count
),
1332 0, NULL
, ctx
->base
.callback_data
);
1337 static struct fw_iso_context
*
1338 ohci_allocate_iso_context(struct fw_card
*card
, int type
, size_t header_size
)
1340 struct fw_ohci
*ohci
= fw_ohci(card
);
1341 struct iso_context
*ctx
, *list
;
1342 descriptor_callback_t callback
;
1344 unsigned long flags
;
1345 int index
, retval
= -ENOMEM
;
1347 if (type
== FW_ISO_CONTEXT_TRANSMIT
) {
1348 mask
= &ohci
->it_context_mask
;
1349 list
= ohci
->it_context_list
;
1350 callback
= handle_it_packet
;
1352 mask
= &ohci
->ir_context_mask
;
1353 list
= ohci
->ir_context_list
;
1354 if (header_size
> 0)
1355 callback
= handle_ir_dualbuffer_packet
;
1357 callback
= handle_ir_bufferfill_packet
;
1360 spin_lock_irqsave(&ohci
->lock
, flags
);
1361 index
= ffs(*mask
) - 1;
1363 *mask
&= ~(1 << index
);
1364 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1367 return ERR_PTR(-EBUSY
);
1369 if (type
== FW_ISO_CONTEXT_TRANSMIT
)
1370 regs
= OHCI1394_IsoXmitContextBase(index
);
1372 regs
= OHCI1394_IsoRcvContextBase(index
);
1375 memset(ctx
, 0, sizeof *ctx
);
1376 ctx
->header_length
= 0;
1377 ctx
->header
= (void *) __get_free_page(GFP_KERNEL
);
1378 if (ctx
->header
== NULL
)
1381 retval
= context_init(&ctx
->context
, ohci
, ISO_BUFFER_SIZE
,
1384 goto out_with_header
;
1389 free_page((unsigned long)ctx
->header
);
1391 spin_lock_irqsave(&ohci
->lock
, flags
);
1392 *mask
|= 1 << index
;
1393 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1395 return ERR_PTR(retval
);
1398 static int ohci_start_iso(struct fw_iso_context
*base
, s32 cycle
)
1400 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
1401 struct fw_ohci
*ohci
= ctx
->context
.ohci
;
1402 u32 cycle_match
= 0, mode
;
1405 if (ctx
->base
.type
== FW_ISO_CONTEXT_TRANSMIT
) {
1406 index
= ctx
- ohci
->it_context_list
;
1408 cycle_match
= IT_CONTEXT_CYCLE_MATCH_ENABLE
|
1409 (cycle
& 0x7fff) << 16;
1411 reg_write(ohci
, OHCI1394_IsoXmitIntEventClear
, 1 << index
);
1412 reg_write(ohci
, OHCI1394_IsoXmitIntMaskSet
, 1 << index
);
1413 context_run(&ctx
->context
, cycle_match
);
1415 index
= ctx
- ohci
->ir_context_list
;
1417 if (ctx
->base
.header_size
> 0)
1418 mode
= IR_CONTEXT_DUAL_BUFFER_MODE
;
1420 mode
= IR_CONTEXT_BUFFER_FILL
;
1421 reg_write(ohci
, OHCI1394_IsoRecvIntEventClear
, 1 << index
);
1422 reg_write(ohci
, OHCI1394_IsoRecvIntMaskSet
, 1 << index
);
1423 reg_write(ohci
, context_match(ctx
->context
.regs
),
1424 0xf0000000 | ctx
->base
.channel
);
1425 context_run(&ctx
->context
, mode
);
1431 static int ohci_stop_iso(struct fw_iso_context
*base
)
1433 struct fw_ohci
*ohci
= fw_ohci(base
->card
);
1434 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
1437 if (ctx
->base
.type
== FW_ISO_CONTEXT_TRANSMIT
) {
1438 index
= ctx
- ohci
->it_context_list
;
1439 reg_write(ohci
, OHCI1394_IsoXmitIntMaskClear
, 1 << index
);
1441 index
= ctx
- ohci
->ir_context_list
;
1442 reg_write(ohci
, OHCI1394_IsoRecvIntMaskClear
, 1 << index
);
1445 context_stop(&ctx
->context
);
1450 static void ohci_free_iso_context(struct fw_iso_context
*base
)
1452 struct fw_ohci
*ohci
= fw_ohci(base
->card
);
1453 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
1454 unsigned long flags
;
1457 ohci_stop_iso(base
);
1458 context_release(&ctx
->context
);
1459 free_page((unsigned long)ctx
->header
);
1461 spin_lock_irqsave(&ohci
->lock
, flags
);
1463 if (ctx
->base
.type
== FW_ISO_CONTEXT_TRANSMIT
) {
1464 index
= ctx
- ohci
->it_context_list
;
1465 ohci
->it_context_mask
|= 1 << index
;
1467 index
= ctx
- ohci
->ir_context_list
;
1468 ohci
->ir_context_mask
|= 1 << index
;
1471 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1475 ohci_queue_iso_transmit(struct fw_iso_context
*base
,
1476 struct fw_iso_packet
*packet
,
1477 struct fw_iso_buffer
*buffer
,
1478 unsigned long payload
)
1480 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
1481 struct descriptor
*d
, *last
, *pd
;
1482 struct fw_iso_packet
*p
;
1484 dma_addr_t d_bus
, page_bus
;
1485 u32 z
, header_z
, payload_z
, irq
;
1486 u32 payload_index
, payload_end_index
, next_page_index
;
1487 int page
, end_page
, i
, length
, offset
;
1489 /* FIXME: Cycle lost behavior should be configurable: lose
1490 * packet, retransmit or terminate.. */
1493 payload_index
= payload
;
1499 if (p
->header_length
> 0)
1502 /* Determine the first page the payload isn't contained in. */
1503 end_page
= PAGE_ALIGN(payload_index
+ p
->payload_length
) >> PAGE_SHIFT
;
1504 if (p
->payload_length
> 0)
1505 payload_z
= end_page
- (payload_index
>> PAGE_SHIFT
);
1511 /* Get header size in number of descriptors. */
1512 header_z
= DIV_ROUND_UP(p
->header_length
, sizeof *d
);
1514 d
= context_get_descriptors(&ctx
->context
, z
+ header_z
, &d_bus
);
1519 d
[0].control
= cpu_to_le16(descriptor_key_immediate
);
1520 d
[0].req_count
= cpu_to_le16(8);
1522 header
= (__le32
*) &d
[1];
1523 header
[0] = cpu_to_le32(it_header_sy(p
->sy
) |
1524 it_header_tag(p
->tag
) |
1525 it_header_tcode(TCODE_STREAM_DATA
) |
1526 it_header_channel(ctx
->base
.channel
) |
1527 it_header_speed(ctx
->base
.speed
));
1529 cpu_to_le32(it_header_data_length(p
->header_length
+
1530 p
->payload_length
));
1533 if (p
->header_length
> 0) {
1534 d
[2].req_count
= cpu_to_le16(p
->header_length
);
1535 d
[2].data_address
= cpu_to_le32(d_bus
+ z
* sizeof *d
);
1536 memcpy(&d
[z
], p
->header
, p
->header_length
);
1539 pd
= d
+ z
- payload_z
;
1540 payload_end_index
= payload_index
+ p
->payload_length
;
1541 for (i
= 0; i
< payload_z
; i
++) {
1542 page
= payload_index
>> PAGE_SHIFT
;
1543 offset
= payload_index
& ~PAGE_MASK
;
1544 next_page_index
= (page
+ 1) << PAGE_SHIFT
;
1546 min(next_page_index
, payload_end_index
) - payload_index
;
1547 pd
[i
].req_count
= cpu_to_le16(length
);
1549 page_bus
= page_private(buffer
->pages
[page
]);
1550 pd
[i
].data_address
= cpu_to_le32(page_bus
+ offset
);
1552 payload_index
+= length
;
1556 irq
= descriptor_irq_always
;
1558 irq
= descriptor_no_irq
;
1560 last
= z
== 2 ? d
: d
+ z
- 1;
1561 last
->control
|= cpu_to_le16(descriptor_output_last
|
1563 descriptor_branch_always
|
1566 context_append(&ctx
->context
, d
, z
, header_z
);
1572 ohci_queue_iso_receive_dualbuffer(struct fw_iso_context
*base
,
1573 struct fw_iso_packet
*packet
,
1574 struct fw_iso_buffer
*buffer
,
1575 unsigned long payload
)
1577 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
1578 struct db_descriptor
*db
= NULL
;
1579 struct descriptor
*d
;
1580 struct fw_iso_packet
*p
;
1581 dma_addr_t d_bus
, page_bus
;
1582 u32 z
, header_z
, length
, rest
;
1585 /* FIXME: Cycle lost behavior should be configurable: lose
1586 * packet, retransmit or terminate.. */
1591 /* Get header size in number of descriptors. */
1592 header_z
= DIV_ROUND_UP(p
->header_length
, sizeof *d
);
1593 page
= payload
>> PAGE_SHIFT
;
1594 offset
= payload
& ~PAGE_MASK
;
1595 rest
= p
->payload_length
;
1597 /* FIXME: OHCI 1.0 doesn't support dual buffer receive */
1598 /* FIXME: handle descriptor_wait */
1599 /* FIXME: make packet-per-buffer/dual-buffer a context option */
1601 d
= context_get_descriptors(&ctx
->context
,
1602 z
+ header_z
, &d_bus
);
1606 db
= (struct db_descriptor
*) d
;
1607 db
->control
= cpu_to_le16(descriptor_status
|
1608 descriptor_branch_always
);
1609 db
->first_size
= cpu_to_le16(ctx
->base
.header_size
);
1610 db
->first_req_count
= cpu_to_le16(p
->header_length
);
1611 db
->first_res_count
= db
->first_req_count
;
1612 db
->first_buffer
= cpu_to_le32(d_bus
+ sizeof *db
);
1614 if (offset
+ rest
< PAGE_SIZE
)
1617 length
= PAGE_SIZE
- offset
;
1619 db
->second_req_count
= cpu_to_le16(length
);
1620 db
->second_res_count
= db
->second_req_count
;
1621 page_bus
= page_private(buffer
->pages
[page
]);
1622 db
->second_buffer
= cpu_to_le32(page_bus
+ offset
);
1624 if (p
->interrupt
&& length
== rest
)
1625 db
->control
|= cpu_to_le16(descriptor_irq_always
);
1627 context_append(&ctx
->context
, d
, z
, header_z
);
1628 offset
= (offset
+ length
) & ~PAGE_MASK
;
1637 ohci_queue_iso_receive_bufferfill(struct fw_iso_context
*base
,
1638 struct fw_iso_packet
*packet
,
1639 struct fw_iso_buffer
*buffer
,
1640 unsigned long payload
)
1642 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
1643 struct descriptor
*d
= NULL
;
1644 dma_addr_t d_bus
, page_bus
;
1648 page
= payload
>> PAGE_SHIFT
;
1649 offset
= payload
& ~PAGE_MASK
;
1650 rest
= packet
->payload_length
;
1653 d
= context_get_descriptors(&ctx
->context
, 1, &d_bus
);
1657 d
->control
= cpu_to_le16(descriptor_input_more
|
1659 descriptor_branch_always
);
1661 if (offset
+ rest
< PAGE_SIZE
)
1664 length
= PAGE_SIZE
- offset
;
1666 page_bus
= page_private(buffer
->pages
[page
]);
1667 d
->data_address
= cpu_to_le32(page_bus
+ offset
);
1668 d
->req_count
= cpu_to_le16(length
);
1669 d
->res_count
= cpu_to_le16(length
);
1671 if (packet
->interrupt
&& length
== rest
)
1672 d
->control
|= cpu_to_le16(descriptor_irq_always
);
1674 context_append(&ctx
->context
, d
, 1, 0);
1676 offset
= (offset
+ length
) & ~PAGE_MASK
;
1685 ohci_queue_iso(struct fw_iso_context
*base
,
1686 struct fw_iso_packet
*packet
,
1687 struct fw_iso_buffer
*buffer
,
1688 unsigned long payload
)
1690 if (base
->type
== FW_ISO_CONTEXT_TRANSMIT
)
1691 return ohci_queue_iso_transmit(base
, packet
, buffer
, payload
);
1692 else if (base
->header_size
== 0)
1693 return ohci_queue_iso_receive_bufferfill(base
, packet
,
1696 return ohci_queue_iso_receive_dualbuffer(base
, packet
,
1700 static const struct fw_card_driver ohci_driver
= {
1701 .name
= ohci_driver_name
,
1702 .enable
= ohci_enable
,
1703 .update_phy_reg
= ohci_update_phy_reg
,
1704 .set_config_rom
= ohci_set_config_rom
,
1705 .send_request
= ohci_send_request
,
1706 .send_response
= ohci_send_response
,
1707 .cancel_packet
= ohci_cancel_packet
,
1708 .enable_phys_dma
= ohci_enable_phys_dma
,
1710 .allocate_iso_context
= ohci_allocate_iso_context
,
1711 .free_iso_context
= ohci_free_iso_context
,
1712 .queue_iso
= ohci_queue_iso
,
1713 .start_iso
= ohci_start_iso
,
1714 .stop_iso
= ohci_stop_iso
,
1717 static int software_reset(struct fw_ohci
*ohci
)
1721 reg_write(ohci
, OHCI1394_HCControlSet
, OHCI1394_HCControl_softReset
);
1723 for (i
= 0; i
< OHCI_LOOP_COUNT
; i
++) {
1724 if ((reg_read(ohci
, OHCI1394_HCControlSet
) &
1725 OHCI1394_HCControl_softReset
) == 0)
1733 /* ---------- pci subsystem interface ---------- */
1743 static int cleanup(struct fw_ohci
*ohci
, int stage
, int code
)
1745 struct pci_dev
*dev
= to_pci_dev(ohci
->card
.device
);
1748 case CLEANUP_SELF_ID
:
1749 dma_free_coherent(ohci
->card
.device
, SELF_ID_BUF_SIZE
,
1750 ohci
->self_id_cpu
, ohci
->self_id_bus
);
1751 case CLEANUP_REGISTERS
:
1752 kfree(ohci
->it_context_list
);
1753 kfree(ohci
->ir_context_list
);
1754 pci_iounmap(dev
, ohci
->registers
);
1756 pci_release_region(dev
, 0);
1757 case CLEANUP_DISABLE
:
1758 pci_disable_device(dev
);
1759 case CLEANUP_PUT_CARD
:
1760 fw_card_put(&ohci
->card
);
1766 static int __devinit
1767 pci_probe(struct pci_dev
*dev
, const struct pci_device_id
*ent
)
1769 struct fw_ohci
*ohci
;
1770 u32 bus_options
, max_receive
, link_speed
, version
;
1775 ohci
= kzalloc(sizeof *ohci
, GFP_KERNEL
);
1777 fw_error("Could not malloc fw_ohci data.\n");
1781 fw_card_initialize(&ohci
->card
, &ohci_driver
, &dev
->dev
);
1783 if (pci_enable_device(dev
)) {
1784 fw_error("Failed to enable OHCI hardware.\n");
1785 return cleanup(ohci
, CLEANUP_PUT_CARD
, -ENODEV
);
1788 pci_set_master(dev
);
1789 pci_write_config_dword(dev
, OHCI1394_PCI_HCI_Control
, 0);
1790 pci_set_drvdata(dev
, ohci
);
1792 spin_lock_init(&ohci
->lock
);
1794 tasklet_init(&ohci
->bus_reset_tasklet
,
1795 bus_reset_tasklet
, (unsigned long)ohci
);
1797 if (pci_request_region(dev
, 0, ohci_driver_name
)) {
1798 fw_error("MMIO resource unavailable\n");
1799 return cleanup(ohci
, CLEANUP_DISABLE
, -EBUSY
);
1802 ohci
->registers
= pci_iomap(dev
, 0, OHCI1394_REGISTER_SIZE
);
1803 if (ohci
->registers
== NULL
) {
1804 fw_error("Failed to remap registers\n");
1805 return cleanup(ohci
, CLEANUP_IOMEM
, -ENXIO
);
1808 if (software_reset(ohci
)) {
1809 fw_error("Failed to reset ohci card.\n");
1810 return cleanup(ohci
, CLEANUP_REGISTERS
, -EBUSY
);
1813 /* Now enable LPS, which we need in order to start accessing
1814 * most of the registers. In fact, on some cards (ALI M5251),
1815 * accessing registers in the SClk domain without LPS enabled
1816 * will lock up the machine. Wait 50msec to make sure we have
1817 * full link enabled. */
1818 reg_write(ohci
, OHCI1394_HCControlSet
,
1819 OHCI1394_HCControl_LPS
|
1820 OHCI1394_HCControl_postedWriteEnable
);
1824 reg_write(ohci
, OHCI1394_HCControlClear
,
1825 OHCI1394_HCControl_noByteSwapData
);
1827 reg_write(ohci
, OHCI1394_LinkControlSet
,
1828 OHCI1394_LinkControl_rcvSelfID
|
1829 OHCI1394_LinkControl_cycleTimerEnable
|
1830 OHCI1394_LinkControl_cycleMaster
);
1832 ar_context_init(&ohci
->ar_request_ctx
, ohci
,
1833 OHCI1394_AsReqRcvContextControlSet
);
1835 ar_context_init(&ohci
->ar_response_ctx
, ohci
,
1836 OHCI1394_AsRspRcvContextControlSet
);
1838 at_context_init(&ohci
->at_request_ctx
, ohci
,
1839 OHCI1394_AsReqTrContextControlSet
);
1841 at_context_init(&ohci
->at_response_ctx
, ohci
,
1842 OHCI1394_AsRspTrContextControlSet
);
1844 reg_write(ohci
, OHCI1394_ATRetries
,
1845 OHCI1394_MAX_AT_REQ_RETRIES
|
1846 (OHCI1394_MAX_AT_RESP_RETRIES
<< 4) |
1847 (OHCI1394_MAX_PHYS_RESP_RETRIES
<< 8));
1849 reg_write(ohci
, OHCI1394_IsoRecvIntMaskSet
, ~0);
1850 ohci
->it_context_mask
= reg_read(ohci
, OHCI1394_IsoRecvIntMaskSet
);
1851 reg_write(ohci
, OHCI1394_IsoRecvIntMaskClear
, ~0);
1852 size
= sizeof(struct iso_context
) * hweight32(ohci
->it_context_mask
);
1853 ohci
->it_context_list
= kzalloc(size
, GFP_KERNEL
);
1855 reg_write(ohci
, OHCI1394_IsoXmitIntMaskSet
, ~0);
1856 ohci
->ir_context_mask
= reg_read(ohci
, OHCI1394_IsoXmitIntMaskSet
);
1857 reg_write(ohci
, OHCI1394_IsoXmitIntMaskClear
, ~0);
1858 size
= sizeof(struct iso_context
) * hweight32(ohci
->ir_context_mask
);
1859 ohci
->ir_context_list
= kzalloc(size
, GFP_KERNEL
);
1861 if (ohci
->it_context_list
== NULL
|| ohci
->ir_context_list
== NULL
) {
1862 fw_error("Out of memory for it/ir contexts.\n");
1863 return cleanup(ohci
, CLEANUP_REGISTERS
, -ENOMEM
);
1866 /* self-id dma buffer allocation */
1867 ohci
->self_id_cpu
= dma_alloc_coherent(ohci
->card
.device
,
1871 if (ohci
->self_id_cpu
== NULL
) {
1872 fw_error("Out of memory for self ID buffer.\n");
1873 return cleanup(ohci
, CLEANUP_REGISTERS
, -ENOMEM
);
1876 reg_write(ohci
, OHCI1394_SelfIDBuffer
, ohci
->self_id_bus
);
1877 reg_write(ohci
, OHCI1394_PhyUpperBound
, 0x00010000);
1878 reg_write(ohci
, OHCI1394_IntEventClear
, ~0);
1879 reg_write(ohci
, OHCI1394_IntMaskClear
, ~0);
1880 reg_write(ohci
, OHCI1394_IntMaskSet
,
1881 OHCI1394_selfIDComplete
|
1882 OHCI1394_RQPkt
| OHCI1394_RSPkt
|
1883 OHCI1394_reqTxComplete
| OHCI1394_respTxComplete
|
1884 OHCI1394_isochRx
| OHCI1394_isochTx
|
1885 OHCI1394_masterIntEnable
);
1887 bus_options
= reg_read(ohci
, OHCI1394_BusOptions
);
1888 max_receive
= (bus_options
>> 12) & 0xf;
1889 link_speed
= bus_options
& 0x7;
1890 guid
= ((u64
) reg_read(ohci
, OHCI1394_GUIDHi
) << 32) |
1891 reg_read(ohci
, OHCI1394_GUIDLo
);
1893 error_code
= fw_card_add(&ohci
->card
, max_receive
, link_speed
, guid
);
1895 return cleanup(ohci
, CLEANUP_SELF_ID
, error_code
);
1897 version
= reg_read(ohci
, OHCI1394_Version
);
1898 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
1899 dev
->dev
.bus_id
, (version
>> 16) & 0xff, version
& 0xff);
1904 static void pci_remove(struct pci_dev
*dev
)
1906 struct fw_ohci
*ohci
;
1908 ohci
= pci_get_drvdata(dev
);
1909 reg_write(ohci
, OHCI1394_IntMaskClear
, OHCI1394_masterIntEnable
);
1910 fw_core_remove_card(&ohci
->card
);
1912 /* FIXME: Fail all pending packets here, now that the upper
1913 * layers can't queue any more. */
1915 software_reset(ohci
);
1916 free_irq(dev
->irq
, ohci
);
1917 cleanup(ohci
, CLEANUP_SELF_ID
, 0);
1919 fw_notify("Removed fw-ohci device.\n");
1922 static struct pci_device_id pci_table
[] = {
1923 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI
, ~0) },
1927 MODULE_DEVICE_TABLE(pci
, pci_table
);
1929 static struct pci_driver fw_ohci_pci_driver
= {
1930 .name
= ohci_driver_name
,
1931 .id_table
= pci_table
,
1933 .remove
= pci_remove
,
1936 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1937 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
1938 MODULE_LICENSE("GPL");
1940 static int __init
fw_ohci_init(void)
1942 return pci_register_driver(&fw_ohci_pci_driver
);
1945 static void __exit
fw_ohci_cleanup(void)
1947 pci_unregister_driver(&fw_ohci_pci_driver
);
1950 module_init(fw_ohci_init
);
1951 module_exit(fw_ohci_cleanup
);