2 * Driver for OHCI 1394 controllers
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/compiler.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/firewire.h>
26 #include <linux/firewire-constants.h>
27 #include <linux/gfp.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
31 #include <linux/kernel.h>
32 #include <linux/list.h>
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/pci.h>
37 #include <linux/pci_ids.h>
38 #include <linux/spinlock.h>
39 #include <linux/string.h>
41 #include <asm/atomic.h>
42 #include <asm/byteorder.h>
44 #include <asm/system.h>
46 #ifdef CONFIG_PPC_PMAC
47 #include <asm/pmac_feature.h>
53 #define DESCRIPTOR_OUTPUT_MORE 0
54 #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
55 #define DESCRIPTOR_INPUT_MORE (2 << 12)
56 #define DESCRIPTOR_INPUT_LAST (3 << 12)
57 #define DESCRIPTOR_STATUS (1 << 11)
58 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
59 #define DESCRIPTOR_PING (1 << 7)
60 #define DESCRIPTOR_YY (1 << 6)
61 #define DESCRIPTOR_NO_IRQ (0 << 4)
62 #define DESCRIPTOR_IRQ_ERROR (1 << 4)
63 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
64 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
65 #define DESCRIPTOR_WAIT (3 << 0)
71 __le32 branch_address
;
73 __le16 transfer_status
;
74 } __attribute__((aligned(16)));
76 struct db_descriptor
{
79 __le16 second_req_count
;
80 __le16 first_req_count
;
81 __le32 branch_address
;
82 __le16 second_res_count
;
83 __le16 first_res_count
;
88 } __attribute__((aligned(16)));
90 #define CONTROL_SET(regs) (regs)
91 #define CONTROL_CLEAR(regs) ((regs) + 4)
92 #define COMMAND_PTR(regs) ((regs) + 12)
93 #define CONTEXT_MATCH(regs) ((regs) + 16)
96 struct descriptor descriptor
;
97 struct ar_buffer
*next
;
102 struct fw_ohci
*ohci
;
103 struct ar_buffer
*current_buffer
;
104 struct ar_buffer
*last_buffer
;
107 struct tasklet_struct tasklet
;
112 typedef int (*descriptor_callback_t
)(struct context
*ctx
,
113 struct descriptor
*d
,
114 struct descriptor
*last
);
117 * A buffer that contains a block of DMA-able coherent memory used for
118 * storing a portion of a DMA descriptor program.
120 struct descriptor_buffer
{
121 struct list_head list
;
122 dma_addr_t buffer_bus
;
125 struct descriptor buffer
[0];
129 struct fw_ohci
*ohci
;
131 int total_allocation
;
134 * List of page-sized buffers for storing DMA descriptors.
135 * Head of list contains buffers in use and tail of list contains
138 struct list_head buffer_list
;
141 * Pointer to a buffer inside buffer_list that contains the tail
142 * end of the current DMA program.
144 struct descriptor_buffer
*buffer_tail
;
147 * The descriptor containing the branch address of the first
148 * descriptor that has not yet been filled by the device.
150 struct descriptor
*last
;
153 * The last descriptor in the DMA program. It contains the branch
154 * address that must be updated upon appending a new descriptor.
156 struct descriptor
*prev
;
158 descriptor_callback_t callback
;
160 struct tasklet_struct tasklet
;
163 #define IT_HEADER_SY(v) ((v) << 0)
164 #define IT_HEADER_TCODE(v) ((v) << 4)
165 #define IT_HEADER_CHANNEL(v) ((v) << 8)
166 #define IT_HEADER_TAG(v) ((v) << 14)
167 #define IT_HEADER_SPEED(v) ((v) << 16)
168 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
171 struct fw_iso_context base
;
172 struct context context
;
175 size_t header_length
;
178 #define CONFIG_ROM_SIZE 1024
183 __iomem
char *registers
;
184 dma_addr_t self_id_bus
;
186 struct tasklet_struct bus_reset_tasklet
;
189 int request_generation
; /* for timestamping incoming requests */
190 atomic_t bus_seconds
;
194 bool bus_reset_packet_quirk
;
195 bool iso_cycle_timer_quirk
;
198 * Spinlock for accessing fw_ohci data. Never call out of
199 * this driver with this lock held.
202 u32 self_id_buffer
[512];
204 /* Config rom buffers */
206 dma_addr_t config_rom_bus
;
207 __be32
*next_config_rom
;
208 dma_addr_t next_config_rom_bus
;
211 struct ar_context ar_request_ctx
;
212 struct ar_context ar_response_ctx
;
213 struct context at_request_ctx
;
214 struct context at_response_ctx
;
217 struct iso_context
*it_context_list
;
218 u64 ir_context_channels
;
220 struct iso_context
*ir_context_list
;
223 static inline struct fw_ohci
*fw_ohci(struct fw_card
*card
)
225 return container_of(card
, struct fw_ohci
, card
);
228 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
229 #define IR_CONTEXT_BUFFER_FILL 0x80000000
230 #define IR_CONTEXT_ISOCH_HEADER 0x40000000
231 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
232 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
233 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
235 #define CONTEXT_RUN 0x8000
236 #define CONTEXT_WAKE 0x1000
237 #define CONTEXT_DEAD 0x0800
238 #define CONTEXT_ACTIVE 0x0400
240 #define OHCI1394_MAX_AT_REQ_RETRIES 0xf
241 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
242 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
244 #define OHCI1394_REGISTER_SIZE 0x800
245 #define OHCI_LOOP_COUNT 500
246 #define OHCI1394_PCI_HCI_Control 0x40
247 #define SELF_ID_BUF_SIZE 0x800
248 #define OHCI_TCODE_PHY_PACKET 0x0e
249 #define OHCI_VERSION_1_1 0x010010
251 static char ohci_driver_name
[] = KBUILD_MODNAME
;
253 #ifdef CONFIG_FIREWIRE_OHCI_DEBUG
255 #define OHCI_PARAM_DEBUG_AT_AR 1
256 #define OHCI_PARAM_DEBUG_SELFIDS 2
257 #define OHCI_PARAM_DEBUG_IRQS 4
258 #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
260 static int param_debug
;
261 module_param_named(debug
, param_debug
, int, 0644);
262 MODULE_PARM_DESC(debug
, "Verbose logging (default = 0"
263 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR
)
264 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS
)
265 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS
)
266 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS
)
267 ", or a combination, or all = -1)");
269 static void log_irqs(u32 evt
)
271 if (likely(!(param_debug
&
272 (OHCI_PARAM_DEBUG_IRQS
| OHCI_PARAM_DEBUG_BUSRESETS
))))
275 if (!(param_debug
& OHCI_PARAM_DEBUG_IRQS
) &&
276 !(evt
& OHCI1394_busReset
))
279 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt
,
280 evt
& OHCI1394_selfIDComplete
? " selfID" : "",
281 evt
& OHCI1394_RQPkt
? " AR_req" : "",
282 evt
& OHCI1394_RSPkt
? " AR_resp" : "",
283 evt
& OHCI1394_reqTxComplete
? " AT_req" : "",
284 evt
& OHCI1394_respTxComplete
? " AT_resp" : "",
285 evt
& OHCI1394_isochRx
? " IR" : "",
286 evt
& OHCI1394_isochTx
? " IT" : "",
287 evt
& OHCI1394_postedWriteErr
? " postedWriteErr" : "",
288 evt
& OHCI1394_cycleTooLong
? " cycleTooLong" : "",
289 evt
& OHCI1394_cycle64Seconds
? " cycle64Seconds" : "",
290 evt
& OHCI1394_cycleInconsistent
? " cycleInconsistent" : "",
291 evt
& OHCI1394_regAccessFail
? " regAccessFail" : "",
292 evt
& OHCI1394_busReset
? " busReset" : "",
293 evt
& ~(OHCI1394_selfIDComplete
| OHCI1394_RQPkt
|
294 OHCI1394_RSPkt
| OHCI1394_reqTxComplete
|
295 OHCI1394_respTxComplete
| OHCI1394_isochRx
|
296 OHCI1394_isochTx
| OHCI1394_postedWriteErr
|
297 OHCI1394_cycleTooLong
| OHCI1394_cycle64Seconds
|
298 OHCI1394_cycleInconsistent
|
299 OHCI1394_regAccessFail
| OHCI1394_busReset
)
303 static const char *speed
[] = {
304 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
306 static const char *power
[] = {
307 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
308 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
310 static const char port
[] = { '.', '-', 'p', 'c', };
312 static char _p(u32
*s
, int shift
)
314 return port
[*s
>> shift
& 3];
317 static void log_selfids(int node_id
, int generation
, int self_id_count
, u32
*s
)
319 if (likely(!(param_debug
& OHCI_PARAM_DEBUG_SELFIDS
)))
322 fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
323 self_id_count
, generation
, node_id
);
325 for (; self_id_count
--; ++s
)
326 if ((*s
& 1 << 23) == 0)
327 fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
328 "%s gc=%d %s %s%s%s\n",
329 *s
, *s
>> 24 & 63, _p(s
, 6), _p(s
, 4), _p(s
, 2),
330 speed
[*s
>> 14 & 3], *s
>> 16 & 63,
331 power
[*s
>> 8 & 7], *s
>> 22 & 1 ? "L" : "",
332 *s
>> 11 & 1 ? "c" : "", *s
& 2 ? "i" : "");
334 fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
336 _p(s
, 16), _p(s
, 14), _p(s
, 12), _p(s
, 10),
337 _p(s
, 8), _p(s
, 6), _p(s
, 4), _p(s
, 2));
340 static const char *evts
[] = {
341 [0x00] = "evt_no_status", [0x01] = "-reserved-",
342 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
343 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
344 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
345 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
346 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
347 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
348 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
349 [0x10] = "-reserved-", [0x11] = "ack_complete",
350 [0x12] = "ack_pending ", [0x13] = "-reserved-",
351 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
352 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
353 [0x18] = "-reserved-", [0x19] = "-reserved-",
354 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
355 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
356 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
357 [0x20] = "pending/cancelled",
359 static const char *tcodes
[] = {
360 [0x0] = "QW req", [0x1] = "BW req",
361 [0x2] = "W resp", [0x3] = "-reserved-",
362 [0x4] = "QR req", [0x5] = "BR req",
363 [0x6] = "QR resp", [0x7] = "BR resp",
364 [0x8] = "cycle start", [0x9] = "Lk req",
365 [0xa] = "async stream packet", [0xb] = "Lk resp",
366 [0xc] = "-reserved-", [0xd] = "-reserved-",
367 [0xe] = "link internal", [0xf] = "-reserved-",
369 static const char *phys
[] = {
370 [0x0] = "phy config packet", [0x1] = "link-on packet",
371 [0x2] = "self-id packet", [0x3] = "-reserved-",
374 static void log_ar_at_event(char dir
, int speed
, u32
*header
, int evt
)
376 int tcode
= header
[0] >> 4 & 0xf;
379 if (likely(!(param_debug
& OHCI_PARAM_DEBUG_AT_AR
)))
382 if (unlikely(evt
>= ARRAY_SIZE(evts
)))
385 if (evt
== OHCI1394_evt_bus_reset
) {
386 fw_notify("A%c evt_bus_reset, generation %d\n",
387 dir
, (header
[2] >> 16) & 0xff);
391 if (header
[0] == ~header
[1]) {
392 fw_notify("A%c %s, %s, %08x\n",
393 dir
, evts
[evt
], phys
[header
[0] >> 30 & 0x3], header
[0]);
398 case 0x0: case 0x6: case 0x8:
399 snprintf(specific
, sizeof(specific
), " = %08x",
400 be32_to_cpu((__force __be32
)header
[3]));
402 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
403 snprintf(specific
, sizeof(specific
), " %x,%x",
404 header
[3] >> 16, header
[3] & 0xffff);
412 fw_notify("A%c %s, %s\n", dir
, evts
[evt
], tcodes
[tcode
]);
414 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
415 fw_notify("A%c spd %x tl %02x, "
418 dir
, speed
, header
[0] >> 10 & 0x3f,
419 header
[1] >> 16, header
[0] >> 16, evts
[evt
],
420 tcodes
[tcode
], header
[1] & 0xffff, header
[2], specific
);
423 fw_notify("A%c spd %x tl %02x, "
426 dir
, speed
, header
[0] >> 10 & 0x3f,
427 header
[1] >> 16, header
[0] >> 16, evts
[evt
],
428 tcodes
[tcode
], specific
);
434 #define log_irqs(evt)
435 #define log_selfids(node_id, generation, self_id_count, sid)
436 #define log_ar_at_event(dir, speed, header, evt)
438 #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
440 static inline void reg_write(const struct fw_ohci
*ohci
, int offset
, u32 data
)
442 writel(data
, ohci
->registers
+ offset
);
445 static inline u32
reg_read(const struct fw_ohci
*ohci
, int offset
)
447 return readl(ohci
->registers
+ offset
);
450 static inline void flush_writes(const struct fw_ohci
*ohci
)
452 /* Do a dummy read to flush writes. */
453 reg_read(ohci
, OHCI1394_Version
);
456 static int ohci_update_phy_reg(struct fw_card
*card
, int addr
,
457 int clear_bits
, int set_bits
)
459 struct fw_ohci
*ohci
= fw_ohci(card
);
462 reg_write(ohci
, OHCI1394_PhyControl
, OHCI1394_PhyControl_Read(addr
));
465 val
= reg_read(ohci
, OHCI1394_PhyControl
);
466 if ((val
& OHCI1394_PhyControl_ReadDone
) == 0) {
467 fw_error("failed to set phy reg bits.\n");
471 old
= OHCI1394_PhyControl_ReadData(val
);
472 old
= (old
& ~clear_bits
) | set_bits
;
473 reg_write(ohci
, OHCI1394_PhyControl
,
474 OHCI1394_PhyControl_Write(addr
, old
));
479 static int ar_context_add_page(struct ar_context
*ctx
)
481 struct device
*dev
= ctx
->ohci
->card
.device
;
482 struct ar_buffer
*ab
;
483 dma_addr_t
uninitialized_var(ab_bus
);
486 ab
= dma_alloc_coherent(dev
, PAGE_SIZE
, &ab_bus
, GFP_ATOMIC
);
491 memset(&ab
->descriptor
, 0, sizeof(ab
->descriptor
));
492 ab
->descriptor
.control
= cpu_to_le16(DESCRIPTOR_INPUT_MORE
|
494 DESCRIPTOR_BRANCH_ALWAYS
);
495 offset
= offsetof(struct ar_buffer
, data
);
496 ab
->descriptor
.req_count
= cpu_to_le16(PAGE_SIZE
- offset
);
497 ab
->descriptor
.data_address
= cpu_to_le32(ab_bus
+ offset
);
498 ab
->descriptor
.res_count
= cpu_to_le16(PAGE_SIZE
- offset
);
499 ab
->descriptor
.branch_address
= 0;
501 ctx
->last_buffer
->descriptor
.branch_address
= cpu_to_le32(ab_bus
| 1);
502 ctx
->last_buffer
->next
= ab
;
503 ctx
->last_buffer
= ab
;
505 reg_write(ctx
->ohci
, CONTROL_SET(ctx
->regs
), CONTEXT_WAKE
);
506 flush_writes(ctx
->ohci
);
511 static void ar_context_release(struct ar_context
*ctx
)
513 struct ar_buffer
*ab
, *ab_next
;
517 for (ab
= ctx
->current_buffer
; ab
; ab
= ab_next
) {
519 offset
= offsetof(struct ar_buffer
, data
);
520 ab_bus
= le32_to_cpu(ab
->descriptor
.data_address
) - offset
;
521 dma_free_coherent(ctx
->ohci
->card
.device
, PAGE_SIZE
,
526 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
527 #define cond_le32_to_cpu(v) \
528 (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
530 #define cond_le32_to_cpu(v) le32_to_cpu(v)
533 static __le32
*handle_ar_packet(struct ar_context
*ctx
, __le32
*buffer
)
535 struct fw_ohci
*ohci
= ctx
->ohci
;
537 u32 status
, length
, tcode
;
540 p
.header
[0] = cond_le32_to_cpu(buffer
[0]);
541 p
.header
[1] = cond_le32_to_cpu(buffer
[1]);
542 p
.header
[2] = cond_le32_to_cpu(buffer
[2]);
544 tcode
= (p
.header
[0] >> 4) & 0x0f;
546 case TCODE_WRITE_QUADLET_REQUEST
:
547 case TCODE_READ_QUADLET_RESPONSE
:
548 p
.header
[3] = (__force __u32
) buffer
[3];
549 p
.header_length
= 16;
550 p
.payload_length
= 0;
553 case TCODE_READ_BLOCK_REQUEST
:
554 p
.header
[3] = cond_le32_to_cpu(buffer
[3]);
555 p
.header_length
= 16;
556 p
.payload_length
= 0;
559 case TCODE_WRITE_BLOCK_REQUEST
:
560 case TCODE_READ_BLOCK_RESPONSE
:
561 case TCODE_LOCK_REQUEST
:
562 case TCODE_LOCK_RESPONSE
:
563 p
.header
[3] = cond_le32_to_cpu(buffer
[3]);
564 p
.header_length
= 16;
565 p
.payload_length
= p
.header
[3] >> 16;
568 case TCODE_WRITE_RESPONSE
:
569 case TCODE_READ_QUADLET_REQUEST
:
570 case OHCI_TCODE_PHY_PACKET
:
571 p
.header_length
= 12;
572 p
.payload_length
= 0;
576 /* FIXME: Stop context, discard everything, and restart? */
578 p
.payload_length
= 0;
581 p
.payload
= (void *) buffer
+ p
.header_length
;
583 /* FIXME: What to do about evt_* errors? */
584 length
= (p
.header_length
+ p
.payload_length
+ 3) / 4;
585 status
= cond_le32_to_cpu(buffer
[length
]);
586 evt
= (status
>> 16) & 0x1f;
589 p
.speed
= (status
>> 21) & 0x7;
590 p
.timestamp
= status
& 0xffff;
591 p
.generation
= ohci
->request_generation
;
593 log_ar_at_event('R', p
.speed
, p
.header
, evt
);
596 * The OHCI bus reset handler synthesizes a phy packet with
597 * the new generation number when a bus reset happens (see
598 * section 8.4.2.3). This helps us determine when a request
599 * was received and make sure we send the response in the same
600 * generation. We only need this for requests; for responses
601 * we use the unique tlabel for finding the matching
604 * Alas some chips sometimes emit bus reset packets with a
605 * wrong generation. We set the correct generation for these
606 * at a slightly incorrect time (in bus_reset_tasklet).
608 if (evt
== OHCI1394_evt_bus_reset
) {
609 if (!ohci
->bus_reset_packet_quirk
)
610 ohci
->request_generation
= (p
.header
[2] >> 16) & 0xff;
611 } else if (ctx
== &ohci
->ar_request_ctx
) {
612 fw_core_handle_request(&ohci
->card
, &p
);
614 fw_core_handle_response(&ohci
->card
, &p
);
617 return buffer
+ length
+ 1;
620 static void ar_context_tasklet(unsigned long data
)
622 struct ar_context
*ctx
= (struct ar_context
*)data
;
623 struct fw_ohci
*ohci
= ctx
->ohci
;
624 struct ar_buffer
*ab
;
625 struct descriptor
*d
;
628 ab
= ctx
->current_buffer
;
631 if (d
->res_count
== 0) {
632 size_t size
, rest
, offset
;
633 dma_addr_t start_bus
;
637 * This descriptor is finished and we may have a
638 * packet split across this and the next buffer. We
639 * reuse the page for reassembling the split packet.
642 offset
= offsetof(struct ar_buffer
, data
);
644 start_bus
= le32_to_cpu(ab
->descriptor
.data_address
) - offset
;
648 size
= buffer
+ PAGE_SIZE
- ctx
->pointer
;
649 rest
= le16_to_cpu(d
->req_count
) - le16_to_cpu(d
->res_count
);
650 memmove(buffer
, ctx
->pointer
, size
);
651 memcpy(buffer
+ size
, ab
->data
, rest
);
652 ctx
->current_buffer
= ab
;
653 ctx
->pointer
= (void *) ab
->data
+ rest
;
654 end
= buffer
+ size
+ rest
;
657 buffer
= handle_ar_packet(ctx
, buffer
);
659 dma_free_coherent(ohci
->card
.device
, PAGE_SIZE
,
661 ar_context_add_page(ctx
);
663 buffer
= ctx
->pointer
;
665 (void *) ab
+ PAGE_SIZE
- le16_to_cpu(d
->res_count
);
668 buffer
= handle_ar_packet(ctx
, buffer
);
672 static int ar_context_init(struct ar_context
*ctx
,
673 struct fw_ohci
*ohci
, u32 regs
)
679 ctx
->last_buffer
= &ab
;
680 tasklet_init(&ctx
->tasklet
, ar_context_tasklet
, (unsigned long)ctx
);
682 ar_context_add_page(ctx
);
683 ar_context_add_page(ctx
);
684 ctx
->current_buffer
= ab
.next
;
685 ctx
->pointer
= ctx
->current_buffer
->data
;
690 static void ar_context_run(struct ar_context
*ctx
)
692 struct ar_buffer
*ab
= ctx
->current_buffer
;
696 offset
= offsetof(struct ar_buffer
, data
);
697 ab_bus
= le32_to_cpu(ab
->descriptor
.data_address
) - offset
;
699 reg_write(ctx
->ohci
, COMMAND_PTR(ctx
->regs
), ab_bus
| 1);
700 reg_write(ctx
->ohci
, CONTROL_SET(ctx
->regs
), CONTEXT_RUN
);
701 flush_writes(ctx
->ohci
);
704 static struct descriptor
*find_branch_descriptor(struct descriptor
*d
, int z
)
708 b
= (le16_to_cpu(d
->control
) & DESCRIPTOR_BRANCH_ALWAYS
) >> 2;
709 key
= (le16_to_cpu(d
->control
) & DESCRIPTOR_KEY_IMMEDIATE
) >> 8;
711 /* figure out which descriptor the branch address goes in */
712 if (z
== 2 && (b
== 3 || key
== 2))
718 static void context_tasklet(unsigned long data
)
720 struct context
*ctx
= (struct context
*) data
;
721 struct descriptor
*d
, *last
;
724 struct descriptor_buffer
*desc
;
726 desc
= list_entry(ctx
->buffer_list
.next
,
727 struct descriptor_buffer
, list
);
729 while (last
->branch_address
!= 0) {
730 struct descriptor_buffer
*old_desc
= desc
;
731 address
= le32_to_cpu(last
->branch_address
);
735 /* If the branch address points to a buffer outside of the
736 * current buffer, advance to the next buffer. */
737 if (address
< desc
->buffer_bus
||
738 address
>= desc
->buffer_bus
+ desc
->used
)
739 desc
= list_entry(desc
->list
.next
,
740 struct descriptor_buffer
, list
);
741 d
= desc
->buffer
+ (address
- desc
->buffer_bus
) / sizeof(*d
);
742 last
= find_branch_descriptor(d
, z
);
744 if (!ctx
->callback(ctx
, d
, last
))
747 if (old_desc
!= desc
) {
748 /* If we've advanced to the next buffer, move the
749 * previous buffer to the free list. */
752 spin_lock_irqsave(&ctx
->ohci
->lock
, flags
);
753 list_move_tail(&old_desc
->list
, &ctx
->buffer_list
);
754 spin_unlock_irqrestore(&ctx
->ohci
->lock
, flags
);
761 * Allocate a new buffer and add it to the list of free buffers for this
762 * context. Must be called with ohci->lock held.
764 static int context_add_buffer(struct context
*ctx
)
766 struct descriptor_buffer
*desc
;
767 dma_addr_t
uninitialized_var(bus_addr
);
771 * 16MB of descriptors should be far more than enough for any DMA
772 * program. This will catch run-away userspace or DoS attacks.
774 if (ctx
->total_allocation
>= 16*1024*1024)
777 desc
= dma_alloc_coherent(ctx
->ohci
->card
.device
, PAGE_SIZE
,
778 &bus_addr
, GFP_ATOMIC
);
782 offset
= (void *)&desc
->buffer
- (void *)desc
;
783 desc
->buffer_size
= PAGE_SIZE
- offset
;
784 desc
->buffer_bus
= bus_addr
+ offset
;
787 list_add_tail(&desc
->list
, &ctx
->buffer_list
);
788 ctx
->total_allocation
+= PAGE_SIZE
;
793 static int context_init(struct context
*ctx
, struct fw_ohci
*ohci
,
794 u32 regs
, descriptor_callback_t callback
)
798 ctx
->total_allocation
= 0;
800 INIT_LIST_HEAD(&ctx
->buffer_list
);
801 if (context_add_buffer(ctx
) < 0)
804 ctx
->buffer_tail
= list_entry(ctx
->buffer_list
.next
,
805 struct descriptor_buffer
, list
);
807 tasklet_init(&ctx
->tasklet
, context_tasklet
, (unsigned long)ctx
);
808 ctx
->callback
= callback
;
811 * We put a dummy descriptor in the buffer that has a NULL
812 * branch address and looks like it's been sent. That way we
813 * have a descriptor to append DMA programs to.
815 memset(ctx
->buffer_tail
->buffer
, 0, sizeof(*ctx
->buffer_tail
->buffer
));
816 ctx
->buffer_tail
->buffer
->control
= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST
);
817 ctx
->buffer_tail
->buffer
->transfer_status
= cpu_to_le16(0x8011);
818 ctx
->buffer_tail
->used
+= sizeof(*ctx
->buffer_tail
->buffer
);
819 ctx
->last
= ctx
->buffer_tail
->buffer
;
820 ctx
->prev
= ctx
->buffer_tail
->buffer
;
825 static void context_release(struct context
*ctx
)
827 struct fw_card
*card
= &ctx
->ohci
->card
;
828 struct descriptor_buffer
*desc
, *tmp
;
830 list_for_each_entry_safe(desc
, tmp
, &ctx
->buffer_list
, list
)
831 dma_free_coherent(card
->device
, PAGE_SIZE
, desc
,
833 ((void *)&desc
->buffer
- (void *)desc
));
836 /* Must be called with ohci->lock held */
837 static struct descriptor
*context_get_descriptors(struct context
*ctx
,
838 int z
, dma_addr_t
*d_bus
)
840 struct descriptor
*d
= NULL
;
841 struct descriptor_buffer
*desc
= ctx
->buffer_tail
;
843 if (z
* sizeof(*d
) > desc
->buffer_size
)
846 if (z
* sizeof(*d
) > desc
->buffer_size
- desc
->used
) {
847 /* No room for the descriptor in this buffer, so advance to the
850 if (desc
->list
.next
== &ctx
->buffer_list
) {
851 /* If there is no free buffer next in the list,
853 if (context_add_buffer(ctx
) < 0)
856 desc
= list_entry(desc
->list
.next
,
857 struct descriptor_buffer
, list
);
858 ctx
->buffer_tail
= desc
;
861 d
= desc
->buffer
+ desc
->used
/ sizeof(*d
);
862 memset(d
, 0, z
* sizeof(*d
));
863 *d_bus
= desc
->buffer_bus
+ desc
->used
;
868 static void context_run(struct context
*ctx
, u32 extra
)
870 struct fw_ohci
*ohci
= ctx
->ohci
;
872 reg_write(ohci
, COMMAND_PTR(ctx
->regs
),
873 le32_to_cpu(ctx
->last
->branch_address
));
874 reg_write(ohci
, CONTROL_CLEAR(ctx
->regs
), ~0);
875 reg_write(ohci
, CONTROL_SET(ctx
->regs
), CONTEXT_RUN
| extra
);
879 static void context_append(struct context
*ctx
,
880 struct descriptor
*d
, int z
, int extra
)
883 struct descriptor_buffer
*desc
= ctx
->buffer_tail
;
885 d_bus
= desc
->buffer_bus
+ (d
- desc
->buffer
) * sizeof(*d
);
887 desc
->used
+= (z
+ extra
) * sizeof(*d
);
888 ctx
->prev
->branch_address
= cpu_to_le32(d_bus
| z
);
889 ctx
->prev
= find_branch_descriptor(d
, z
);
891 reg_write(ctx
->ohci
, CONTROL_SET(ctx
->regs
), CONTEXT_WAKE
);
892 flush_writes(ctx
->ohci
);
895 static void context_stop(struct context
*ctx
)
900 reg_write(ctx
->ohci
, CONTROL_CLEAR(ctx
->regs
), CONTEXT_RUN
);
901 flush_writes(ctx
->ohci
);
903 for (i
= 0; i
< 10; i
++) {
904 reg
= reg_read(ctx
->ohci
, CONTROL_SET(ctx
->regs
));
905 if ((reg
& CONTEXT_ACTIVE
) == 0)
910 fw_error("Error: DMA context still active (0x%08x)\n", reg
);
914 struct fw_packet
*packet
;
918 * This function apppends a packet to the DMA queue for transmission.
919 * Must always be called with the ochi->lock held to ensure proper
920 * generation handling and locking around packet queue manipulation.
922 static int at_context_queue_packet(struct context
*ctx
,
923 struct fw_packet
*packet
)
925 struct fw_ohci
*ohci
= ctx
->ohci
;
926 dma_addr_t d_bus
, uninitialized_var(payload_bus
);
927 struct driver_data
*driver_data
;
928 struct descriptor
*d
, *last
;
933 d
= context_get_descriptors(ctx
, 4, &d_bus
);
935 packet
->ack
= RCODE_SEND_ERROR
;
939 d
[0].control
= cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE
);
940 d
[0].res_count
= cpu_to_le16(packet
->timestamp
);
943 * The DMA format for asyncronous link packets is different
944 * from the IEEE1394 layout, so shift the fields around
945 * accordingly. If header_length is 8, it's a PHY packet, to
946 * which we need to prepend an extra quadlet.
949 header
= (__le32
*) &d
[1];
950 switch (packet
->header_length
) {
953 header
[0] = cpu_to_le32((packet
->header
[0] & 0xffff) |
954 (packet
->speed
<< 16));
955 header
[1] = cpu_to_le32((packet
->header
[1] & 0xffff) |
956 (packet
->header
[0] & 0xffff0000));
957 header
[2] = cpu_to_le32(packet
->header
[2]);
959 tcode
= (packet
->header
[0] >> 4) & 0x0f;
960 if (TCODE_IS_BLOCK_PACKET(tcode
))
961 header
[3] = cpu_to_le32(packet
->header
[3]);
963 header
[3] = (__force __le32
) packet
->header
[3];
965 d
[0].req_count
= cpu_to_le16(packet
->header_length
);
969 header
[0] = cpu_to_le32((OHCI1394_phy_tcode
<< 4) |
970 (packet
->speed
<< 16));
971 header
[1] = cpu_to_le32(packet
->header
[0]);
972 header
[2] = cpu_to_le32(packet
->header
[1]);
973 d
[0].req_count
= cpu_to_le16(12);
977 header
[0] = cpu_to_le32((packet
->header
[0] & 0xffff) |
978 (packet
->speed
<< 16));
979 header
[1] = cpu_to_le32(packet
->header
[0] & 0xffff0000);
980 d
[0].req_count
= cpu_to_le16(8);
985 packet
->ack
= RCODE_SEND_ERROR
;
989 driver_data
= (struct driver_data
*) &d
[3];
990 driver_data
->packet
= packet
;
991 packet
->driver_data
= driver_data
;
993 if (packet
->payload_length
> 0) {
995 dma_map_single(ohci
->card
.device
, packet
->payload
,
996 packet
->payload_length
, DMA_TO_DEVICE
);
997 if (dma_mapping_error(ohci
->card
.device
, payload_bus
)) {
998 packet
->ack
= RCODE_SEND_ERROR
;
1001 packet
->payload_bus
= payload_bus
;
1002 packet
->payload_mapped
= true;
1004 d
[2].req_count
= cpu_to_le16(packet
->payload_length
);
1005 d
[2].data_address
= cpu_to_le32(payload_bus
);
1013 last
->control
|= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST
|
1014 DESCRIPTOR_IRQ_ALWAYS
|
1015 DESCRIPTOR_BRANCH_ALWAYS
);
1018 * If the controller and packet generations don't match, we need to
1019 * bail out and try again. If IntEvent.busReset is set, the AT context
1020 * is halted, so appending to the context and trying to run it is
1021 * futile. Most controllers do the right thing and just flush the AT
1022 * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
1023 * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
1024 * up stalling out. So we just bail out in software and try again
1025 * later, and everyone is happy.
1026 * FIXME: Document how the locking works.
1028 if (ohci
->generation
!= packet
->generation
||
1029 reg_read(ohci
, OHCI1394_IntEventSet
) & OHCI1394_busReset
) {
1030 if (packet
->payload_mapped
)
1031 dma_unmap_single(ohci
->card
.device
, payload_bus
,
1032 packet
->payload_length
, DMA_TO_DEVICE
);
1033 packet
->ack
= RCODE_GENERATION
;
1037 context_append(ctx
, d
, z
, 4 - z
);
1039 /* If the context isn't already running, start it up. */
1040 reg
= reg_read(ctx
->ohci
, CONTROL_SET(ctx
->regs
));
1041 if ((reg
& CONTEXT_RUN
) == 0)
1042 context_run(ctx
, 0);
1047 static int handle_at_packet(struct context
*context
,
1048 struct descriptor
*d
,
1049 struct descriptor
*last
)
1051 struct driver_data
*driver_data
;
1052 struct fw_packet
*packet
;
1053 struct fw_ohci
*ohci
= context
->ohci
;
1056 if (last
->transfer_status
== 0)
1057 /* This descriptor isn't done yet, stop iteration. */
1060 driver_data
= (struct driver_data
*) &d
[3];
1061 packet
= driver_data
->packet
;
1063 /* This packet was cancelled, just continue. */
1066 if (packet
->payload_mapped
)
1067 dma_unmap_single(ohci
->card
.device
, packet
->payload_bus
,
1068 packet
->payload_length
, DMA_TO_DEVICE
);
1070 evt
= le16_to_cpu(last
->transfer_status
) & 0x1f;
1071 packet
->timestamp
= le16_to_cpu(last
->res_count
);
1073 log_ar_at_event('T', packet
->speed
, packet
->header
, evt
);
1076 case OHCI1394_evt_timeout
:
1077 /* Async response transmit timed out. */
1078 packet
->ack
= RCODE_CANCELLED
;
1081 case OHCI1394_evt_flushed
:
1083 * The packet was flushed should give same error as
1084 * when we try to use a stale generation count.
1086 packet
->ack
= RCODE_GENERATION
;
1089 case OHCI1394_evt_missing_ack
:
1091 * Using a valid (current) generation count, but the
1092 * node is not on the bus or not sending acks.
1094 packet
->ack
= RCODE_NO_ACK
;
1097 case ACK_COMPLETE
+ 0x10:
1098 case ACK_PENDING
+ 0x10:
1099 case ACK_BUSY_X
+ 0x10:
1100 case ACK_BUSY_A
+ 0x10:
1101 case ACK_BUSY_B
+ 0x10:
1102 case ACK_DATA_ERROR
+ 0x10:
1103 case ACK_TYPE_ERROR
+ 0x10:
1104 packet
->ack
= evt
- 0x10;
1108 packet
->ack
= RCODE_SEND_ERROR
;
1112 packet
->callback(packet
, &ohci
->card
, packet
->ack
);
1117 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
1118 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
1119 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
1120 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
1121 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
1123 static void handle_local_rom(struct fw_ohci
*ohci
,
1124 struct fw_packet
*packet
, u32 csr
)
1126 struct fw_packet response
;
1127 int tcode
, length
, i
;
1129 tcode
= HEADER_GET_TCODE(packet
->header
[0]);
1130 if (TCODE_IS_BLOCK_PACKET(tcode
))
1131 length
= HEADER_GET_DATA_LENGTH(packet
->header
[3]);
1135 i
= csr
- CSR_CONFIG_ROM
;
1136 if (i
+ length
> CONFIG_ROM_SIZE
) {
1137 fw_fill_response(&response
, packet
->header
,
1138 RCODE_ADDRESS_ERROR
, NULL
, 0);
1139 } else if (!TCODE_IS_READ_REQUEST(tcode
)) {
1140 fw_fill_response(&response
, packet
->header
,
1141 RCODE_TYPE_ERROR
, NULL
, 0);
1143 fw_fill_response(&response
, packet
->header
, RCODE_COMPLETE
,
1144 (void *) ohci
->config_rom
+ i
, length
);
1147 fw_core_handle_response(&ohci
->card
, &response
);
1150 static void handle_local_lock(struct fw_ohci
*ohci
,
1151 struct fw_packet
*packet
, u32 csr
)
1153 struct fw_packet response
;
1154 int tcode
, length
, ext_tcode
, sel
;
1155 __be32
*payload
, lock_old
;
1156 u32 lock_arg
, lock_data
;
1158 tcode
= HEADER_GET_TCODE(packet
->header
[0]);
1159 length
= HEADER_GET_DATA_LENGTH(packet
->header
[3]);
1160 payload
= packet
->payload
;
1161 ext_tcode
= HEADER_GET_EXTENDED_TCODE(packet
->header
[3]);
1163 if (tcode
== TCODE_LOCK_REQUEST
&&
1164 ext_tcode
== EXTCODE_COMPARE_SWAP
&& length
== 8) {
1165 lock_arg
= be32_to_cpu(payload
[0]);
1166 lock_data
= be32_to_cpu(payload
[1]);
1167 } else if (tcode
== TCODE_READ_QUADLET_REQUEST
) {
1171 fw_fill_response(&response
, packet
->header
,
1172 RCODE_TYPE_ERROR
, NULL
, 0);
1176 sel
= (csr
- CSR_BUS_MANAGER_ID
) / 4;
1177 reg_write(ohci
, OHCI1394_CSRData
, lock_data
);
1178 reg_write(ohci
, OHCI1394_CSRCompareData
, lock_arg
);
1179 reg_write(ohci
, OHCI1394_CSRControl
, sel
);
1181 if (reg_read(ohci
, OHCI1394_CSRControl
) & 0x80000000)
1182 lock_old
= cpu_to_be32(reg_read(ohci
, OHCI1394_CSRData
));
1184 fw_notify("swap not done yet\n");
1186 fw_fill_response(&response
, packet
->header
,
1187 RCODE_COMPLETE
, &lock_old
, sizeof(lock_old
));
1189 fw_core_handle_response(&ohci
->card
, &response
);
1192 static void handle_local_request(struct context
*ctx
, struct fw_packet
*packet
)
1197 if (ctx
== &ctx
->ohci
->at_request_ctx
) {
1198 packet
->ack
= ACK_PENDING
;
1199 packet
->callback(packet
, &ctx
->ohci
->card
, packet
->ack
);
1203 ((unsigned long long)
1204 HEADER_GET_OFFSET_HIGH(packet
->header
[1]) << 32) |
1206 csr
= offset
- CSR_REGISTER_BASE
;
1208 /* Handle config rom reads. */
1209 if (csr
>= CSR_CONFIG_ROM
&& csr
< CSR_CONFIG_ROM_END
)
1210 handle_local_rom(ctx
->ohci
, packet
, csr
);
1212 case CSR_BUS_MANAGER_ID
:
1213 case CSR_BANDWIDTH_AVAILABLE
:
1214 case CSR_CHANNELS_AVAILABLE_HI
:
1215 case CSR_CHANNELS_AVAILABLE_LO
:
1216 handle_local_lock(ctx
->ohci
, packet
, csr
);
1219 if (ctx
== &ctx
->ohci
->at_request_ctx
)
1220 fw_core_handle_request(&ctx
->ohci
->card
, packet
);
1222 fw_core_handle_response(&ctx
->ohci
->card
, packet
);
1226 if (ctx
== &ctx
->ohci
->at_response_ctx
) {
1227 packet
->ack
= ACK_COMPLETE
;
1228 packet
->callback(packet
, &ctx
->ohci
->card
, packet
->ack
);
1232 static void at_context_transmit(struct context
*ctx
, struct fw_packet
*packet
)
1234 unsigned long flags
;
1237 spin_lock_irqsave(&ctx
->ohci
->lock
, flags
);
1239 if (HEADER_GET_DESTINATION(packet
->header
[0]) == ctx
->ohci
->node_id
&&
1240 ctx
->ohci
->generation
== packet
->generation
) {
1241 spin_unlock_irqrestore(&ctx
->ohci
->lock
, flags
);
1242 handle_local_request(ctx
, packet
);
1246 ret
= at_context_queue_packet(ctx
, packet
);
1247 spin_unlock_irqrestore(&ctx
->ohci
->lock
, flags
);
1250 packet
->callback(packet
, &ctx
->ohci
->card
, packet
->ack
);
1254 static void bus_reset_tasklet(unsigned long data
)
1256 struct fw_ohci
*ohci
= (struct fw_ohci
*)data
;
1257 int self_id_count
, i
, j
, reg
;
1258 int generation
, new_generation
;
1259 unsigned long flags
;
1260 void *free_rom
= NULL
;
1261 dma_addr_t free_rom_bus
= 0;
1263 reg
= reg_read(ohci
, OHCI1394_NodeID
);
1264 if (!(reg
& OHCI1394_NodeID_idValid
)) {
1265 fw_notify("node ID not valid, new bus reset in progress\n");
1268 if ((reg
& OHCI1394_NodeID_nodeNumber
) == 63) {
1269 fw_notify("malconfigured bus\n");
1272 ohci
->node_id
= reg
& (OHCI1394_NodeID_busNumber
|
1273 OHCI1394_NodeID_nodeNumber
);
1275 reg
= reg_read(ohci
, OHCI1394_SelfIDCount
);
1276 if (reg
& OHCI1394_SelfIDCount_selfIDError
) {
1277 fw_notify("inconsistent self IDs\n");
1281 * The count in the SelfIDCount register is the number of
1282 * bytes in the self ID receive buffer. Since we also receive
1283 * the inverted quadlets and a header quadlet, we shift one
1284 * bit extra to get the actual number of self IDs.
1286 self_id_count
= (reg
>> 3) & 0xff;
1287 if (self_id_count
== 0 || self_id_count
> 252) {
1288 fw_notify("inconsistent self IDs\n");
1291 generation
= (cond_le32_to_cpu(ohci
->self_id_cpu
[0]) >> 16) & 0xff;
1294 for (i
= 1, j
= 0; j
< self_id_count
; i
+= 2, j
++) {
1295 if (ohci
->self_id_cpu
[i
] != ~ohci
->self_id_cpu
[i
+ 1]) {
1296 fw_notify("inconsistent self IDs\n");
1299 ohci
->self_id_buffer
[j
] =
1300 cond_le32_to_cpu(ohci
->self_id_cpu
[i
]);
1305 * Check the consistency of the self IDs we just read. The
1306 * problem we face is that a new bus reset can start while we
1307 * read out the self IDs from the DMA buffer. If this happens,
1308 * the DMA buffer will be overwritten with new self IDs and we
1309 * will read out inconsistent data. The OHCI specification
1310 * (section 11.2) recommends a technique similar to
1311 * linux/seqlock.h, where we remember the generation of the
1312 * self IDs in the buffer before reading them out and compare
1313 * it to the current generation after reading them out. If
1314 * the two generations match we know we have a consistent set
1318 new_generation
= (reg_read(ohci
, OHCI1394_SelfIDCount
) >> 16) & 0xff;
1319 if (new_generation
!= generation
) {
1320 fw_notify("recursive bus reset detected, "
1321 "discarding self ids\n");
1325 /* FIXME: Document how the locking works. */
1326 spin_lock_irqsave(&ohci
->lock
, flags
);
1328 ohci
->generation
= generation
;
1329 context_stop(&ohci
->at_request_ctx
);
1330 context_stop(&ohci
->at_response_ctx
);
1331 reg_write(ohci
, OHCI1394_IntEventClear
, OHCI1394_busReset
);
1333 if (ohci
->bus_reset_packet_quirk
)
1334 ohci
->request_generation
= generation
;
1337 * This next bit is unrelated to the AT context stuff but we
1338 * have to do it under the spinlock also. If a new config rom
1339 * was set up before this reset, the old one is now no longer
1340 * in use and we can free it. Update the config rom pointers
1341 * to point to the current config rom and clear the
1342 * next_config_rom pointer so a new udpate can take place.
1345 if (ohci
->next_config_rom
!= NULL
) {
1346 if (ohci
->next_config_rom
!= ohci
->config_rom
) {
1347 free_rom
= ohci
->config_rom
;
1348 free_rom_bus
= ohci
->config_rom_bus
;
1350 ohci
->config_rom
= ohci
->next_config_rom
;
1351 ohci
->config_rom_bus
= ohci
->next_config_rom_bus
;
1352 ohci
->next_config_rom
= NULL
;
1355 * Restore config_rom image and manually update
1356 * config_rom registers. Writing the header quadlet
1357 * will indicate that the config rom is ready, so we
1360 reg_write(ohci
, OHCI1394_BusOptions
,
1361 be32_to_cpu(ohci
->config_rom
[2]));
1362 ohci
->config_rom
[0] = ohci
->next_header
;
1363 reg_write(ohci
, OHCI1394_ConfigROMhdr
,
1364 be32_to_cpu(ohci
->next_header
));
1367 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1368 reg_write(ohci
, OHCI1394_PhyReqFilterHiSet
, ~0);
1369 reg_write(ohci
, OHCI1394_PhyReqFilterLoSet
, ~0);
1372 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1375 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
1376 free_rom
, free_rom_bus
);
1378 log_selfids(ohci
->node_id
, generation
,
1379 self_id_count
, ohci
->self_id_buffer
);
1381 fw_core_handle_bus_reset(&ohci
->card
, ohci
->node_id
, generation
,
1382 self_id_count
, ohci
->self_id_buffer
);
1385 static irqreturn_t
irq_handler(int irq
, void *data
)
1387 struct fw_ohci
*ohci
= data
;
1388 u32 event
, iso_event
, cycle_time
;
1391 event
= reg_read(ohci
, OHCI1394_IntEventClear
);
1393 if (!event
|| !~event
)
1396 /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
1397 reg_write(ohci
, OHCI1394_IntEventClear
, event
& ~OHCI1394_busReset
);
1400 if (event
& OHCI1394_selfIDComplete
)
1401 tasklet_schedule(&ohci
->bus_reset_tasklet
);
1403 if (event
& OHCI1394_RQPkt
)
1404 tasklet_schedule(&ohci
->ar_request_ctx
.tasklet
);
1406 if (event
& OHCI1394_RSPkt
)
1407 tasklet_schedule(&ohci
->ar_response_ctx
.tasklet
);
1409 if (event
& OHCI1394_reqTxComplete
)
1410 tasklet_schedule(&ohci
->at_request_ctx
.tasklet
);
1412 if (event
& OHCI1394_respTxComplete
)
1413 tasklet_schedule(&ohci
->at_response_ctx
.tasklet
);
1415 iso_event
= reg_read(ohci
, OHCI1394_IsoRecvIntEventClear
);
1416 reg_write(ohci
, OHCI1394_IsoRecvIntEventClear
, iso_event
);
1419 i
= ffs(iso_event
) - 1;
1420 tasklet_schedule(&ohci
->ir_context_list
[i
].context
.tasklet
);
1421 iso_event
&= ~(1 << i
);
1424 iso_event
= reg_read(ohci
, OHCI1394_IsoXmitIntEventClear
);
1425 reg_write(ohci
, OHCI1394_IsoXmitIntEventClear
, iso_event
);
1428 i
= ffs(iso_event
) - 1;
1429 tasklet_schedule(&ohci
->it_context_list
[i
].context
.tasklet
);
1430 iso_event
&= ~(1 << i
);
1433 if (unlikely(event
& OHCI1394_regAccessFail
))
1434 fw_error("Register access failure - "
1435 "please notify linux1394-devel@lists.sf.net\n");
1437 if (unlikely(event
& OHCI1394_postedWriteErr
))
1438 fw_error("PCI posted write error\n");
1440 if (unlikely(event
& OHCI1394_cycleTooLong
)) {
1441 if (printk_ratelimit())
1442 fw_notify("isochronous cycle too long\n");
1443 reg_write(ohci
, OHCI1394_LinkControlSet
,
1444 OHCI1394_LinkControl_cycleMaster
);
1447 if (unlikely(event
& OHCI1394_cycleInconsistent
)) {
1449 * We need to clear this event bit in order to make
1450 * cycleMatch isochronous I/O work. In theory we should
1451 * stop active cycleMatch iso contexts now and restart
1452 * them at least two cycles later. (FIXME?)
1454 if (printk_ratelimit())
1455 fw_notify("isochronous cycle inconsistent\n");
1458 if (event
& OHCI1394_cycle64Seconds
) {
1459 cycle_time
= reg_read(ohci
, OHCI1394_IsochronousCycleTimer
);
1460 if ((cycle_time
& 0x80000000) == 0)
1461 atomic_inc(&ohci
->bus_seconds
);
1467 static int software_reset(struct fw_ohci
*ohci
)
1471 reg_write(ohci
, OHCI1394_HCControlSet
, OHCI1394_HCControl_softReset
);
1473 for (i
= 0; i
< OHCI_LOOP_COUNT
; i
++) {
1474 if ((reg_read(ohci
, OHCI1394_HCControlSet
) &
1475 OHCI1394_HCControl_softReset
) == 0)
1483 static void copy_config_rom(__be32
*dest
, const __be32
*src
, size_t length
)
1485 size_t size
= length
* 4;
1487 memcpy(dest
, src
, size
);
1488 if (size
< CONFIG_ROM_SIZE
)
1489 memset(&dest
[length
], 0, CONFIG_ROM_SIZE
- size
);
1492 static int ohci_enable(struct fw_card
*card
,
1493 const __be32
*config_rom
, size_t length
)
1495 struct fw_ohci
*ohci
= fw_ohci(card
);
1496 struct pci_dev
*dev
= to_pci_dev(card
->device
);
1500 if (software_reset(ohci
)) {
1501 fw_error("Failed to reset ohci card.\n");
1506 * Now enable LPS, which we need in order to start accessing
1507 * most of the registers. In fact, on some cards (ALI M5251),
1508 * accessing registers in the SClk domain without LPS enabled
1509 * will lock up the machine. Wait 50msec to make sure we have
1510 * full link enabled. However, with some cards (well, at least
1511 * a JMicron PCIe card), we have to try again sometimes.
1513 reg_write(ohci
, OHCI1394_HCControlSet
,
1514 OHCI1394_HCControl_LPS
|
1515 OHCI1394_HCControl_postedWriteEnable
);
1518 for (lps
= 0, i
= 0; !lps
&& i
< 3; i
++) {
1520 lps
= reg_read(ohci
, OHCI1394_HCControlSet
) &
1521 OHCI1394_HCControl_LPS
;
1525 fw_error("Failed to set Link Power Status\n");
1529 reg_write(ohci
, OHCI1394_HCControlClear
,
1530 OHCI1394_HCControl_noByteSwapData
);
1532 reg_write(ohci
, OHCI1394_SelfIDBuffer
, ohci
->self_id_bus
);
1533 reg_write(ohci
, OHCI1394_LinkControlClear
,
1534 OHCI1394_LinkControl_rcvPhyPkt
);
1535 reg_write(ohci
, OHCI1394_LinkControlSet
,
1536 OHCI1394_LinkControl_rcvSelfID
|
1537 OHCI1394_LinkControl_cycleTimerEnable
|
1538 OHCI1394_LinkControl_cycleMaster
);
1540 reg_write(ohci
, OHCI1394_ATRetries
,
1541 OHCI1394_MAX_AT_REQ_RETRIES
|
1542 (OHCI1394_MAX_AT_RESP_RETRIES
<< 4) |
1543 (OHCI1394_MAX_PHYS_RESP_RETRIES
<< 8));
1545 ar_context_run(&ohci
->ar_request_ctx
);
1546 ar_context_run(&ohci
->ar_response_ctx
);
1548 reg_write(ohci
, OHCI1394_PhyUpperBound
, 0x00010000);
1549 reg_write(ohci
, OHCI1394_IntEventClear
, ~0);
1550 reg_write(ohci
, OHCI1394_IntMaskClear
, ~0);
1551 reg_write(ohci
, OHCI1394_IntMaskSet
,
1552 OHCI1394_selfIDComplete
|
1553 OHCI1394_RQPkt
| OHCI1394_RSPkt
|
1554 OHCI1394_reqTxComplete
| OHCI1394_respTxComplete
|
1555 OHCI1394_isochRx
| OHCI1394_isochTx
|
1556 OHCI1394_postedWriteErr
| OHCI1394_cycleTooLong
|
1557 OHCI1394_cycleInconsistent
|
1558 OHCI1394_cycle64Seconds
| OHCI1394_regAccessFail
|
1559 OHCI1394_masterIntEnable
);
1560 if (param_debug
& OHCI_PARAM_DEBUG_BUSRESETS
)
1561 reg_write(ohci
, OHCI1394_IntMaskSet
, OHCI1394_busReset
);
1563 /* Activate link_on bit and contender bit in our self ID packets.*/
1564 if (ohci_update_phy_reg(card
, 4, 0,
1565 PHY_LINK_ACTIVE
| PHY_CONTENDER
) < 0)
1569 * When the link is not yet enabled, the atomic config rom
1570 * update mechanism described below in ohci_set_config_rom()
1571 * is not active. We have to update ConfigRomHeader and
1572 * BusOptions manually, and the write to ConfigROMmap takes
1573 * effect immediately. We tie this to the enabling of the
1574 * link, so we have a valid config rom before enabling - the
1575 * OHCI requires that ConfigROMhdr and BusOptions have valid
1576 * values before enabling.
1578 * However, when the ConfigROMmap is written, some controllers
1579 * always read back quadlets 0 and 2 from the config rom to
1580 * the ConfigRomHeader and BusOptions registers on bus reset.
1581 * They shouldn't do that in this initial case where the link
1582 * isn't enabled. This means we have to use the same
1583 * workaround here, setting the bus header to 0 and then write
1584 * the right values in the bus reset tasklet.
1588 ohci
->next_config_rom
=
1589 dma_alloc_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
1590 &ohci
->next_config_rom_bus
,
1592 if (ohci
->next_config_rom
== NULL
)
1595 copy_config_rom(ohci
->next_config_rom
, config_rom
, length
);
1598 * In the suspend case, config_rom is NULL, which
1599 * means that we just reuse the old config rom.
1601 ohci
->next_config_rom
= ohci
->config_rom
;
1602 ohci
->next_config_rom_bus
= ohci
->config_rom_bus
;
1605 ohci
->next_header
= ohci
->next_config_rom
[0];
1606 ohci
->next_config_rom
[0] = 0;
1607 reg_write(ohci
, OHCI1394_ConfigROMhdr
, 0);
1608 reg_write(ohci
, OHCI1394_BusOptions
,
1609 be32_to_cpu(ohci
->next_config_rom
[2]));
1610 reg_write(ohci
, OHCI1394_ConfigROMmap
, ohci
->next_config_rom_bus
);
1612 reg_write(ohci
, OHCI1394_AsReqFilterHiSet
, 0x80000000);
1614 if (request_irq(dev
->irq
, irq_handler
,
1615 IRQF_SHARED
, ohci_driver_name
, ohci
)) {
1616 fw_error("Failed to allocate shared interrupt %d.\n",
1618 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
1619 ohci
->config_rom
, ohci
->config_rom_bus
);
1623 reg_write(ohci
, OHCI1394_HCControlSet
,
1624 OHCI1394_HCControl_linkEnable
|
1625 OHCI1394_HCControl_BIBimageValid
);
1629 * We are ready to go, initiate bus reset to finish the
1633 fw_core_initiate_bus_reset(&ohci
->card
, 1);
1638 static int ohci_set_config_rom(struct fw_card
*card
,
1639 const __be32
*config_rom
, size_t length
)
1641 struct fw_ohci
*ohci
;
1642 unsigned long flags
;
1644 __be32
*next_config_rom
;
1645 dma_addr_t
uninitialized_var(next_config_rom_bus
);
1647 ohci
= fw_ohci(card
);
1650 * When the OHCI controller is enabled, the config rom update
1651 * mechanism is a bit tricky, but easy enough to use. See
1652 * section 5.5.6 in the OHCI specification.
1654 * The OHCI controller caches the new config rom address in a
1655 * shadow register (ConfigROMmapNext) and needs a bus reset
1656 * for the changes to take place. When the bus reset is
1657 * detected, the controller loads the new values for the
1658 * ConfigRomHeader and BusOptions registers from the specified
1659 * config rom and loads ConfigROMmap from the ConfigROMmapNext
1660 * shadow register. All automatically and atomically.
1662 * Now, there's a twist to this story. The automatic load of
1663 * ConfigRomHeader and BusOptions doesn't honor the
1664 * noByteSwapData bit, so with a be32 config rom, the
1665 * controller will load be32 values in to these registers
1666 * during the atomic update, even on litte endian
1667 * architectures. The workaround we use is to put a 0 in the
1668 * header quadlet; 0 is endian agnostic and means that the
1669 * config rom isn't ready yet. In the bus reset tasklet we
1670 * then set up the real values for the two registers.
1672 * We use ohci->lock to avoid racing with the code that sets
1673 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
1677 dma_alloc_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
1678 &next_config_rom_bus
, GFP_KERNEL
);
1679 if (next_config_rom
== NULL
)
1682 spin_lock_irqsave(&ohci
->lock
, flags
);
1684 if (ohci
->next_config_rom
== NULL
) {
1685 ohci
->next_config_rom
= next_config_rom
;
1686 ohci
->next_config_rom_bus
= next_config_rom_bus
;
1688 copy_config_rom(ohci
->next_config_rom
, config_rom
, length
);
1690 ohci
->next_header
= config_rom
[0];
1691 ohci
->next_config_rom
[0] = 0;
1693 reg_write(ohci
, OHCI1394_ConfigROMmap
,
1694 ohci
->next_config_rom_bus
);
1698 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1701 * Now initiate a bus reset to have the changes take
1702 * effect. We clean up the old config rom memory and DMA
1703 * mappings in the bus reset tasklet, since the OHCI
1704 * controller could need to access it before the bus reset
1708 fw_core_initiate_bus_reset(&ohci
->card
, 1);
1710 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
1711 next_config_rom
, next_config_rom_bus
);
1716 static void ohci_send_request(struct fw_card
*card
, struct fw_packet
*packet
)
1718 struct fw_ohci
*ohci
= fw_ohci(card
);
1720 at_context_transmit(&ohci
->at_request_ctx
, packet
);
1723 static void ohci_send_response(struct fw_card
*card
, struct fw_packet
*packet
)
1725 struct fw_ohci
*ohci
= fw_ohci(card
);
1727 at_context_transmit(&ohci
->at_response_ctx
, packet
);
1730 static int ohci_cancel_packet(struct fw_card
*card
, struct fw_packet
*packet
)
1732 struct fw_ohci
*ohci
= fw_ohci(card
);
1733 struct context
*ctx
= &ohci
->at_request_ctx
;
1734 struct driver_data
*driver_data
= packet
->driver_data
;
1737 tasklet_disable(&ctx
->tasklet
);
1739 if (packet
->ack
!= 0)
1742 if (packet
->payload_mapped
)
1743 dma_unmap_single(ohci
->card
.device
, packet
->payload_bus
,
1744 packet
->payload_length
, DMA_TO_DEVICE
);
1746 log_ar_at_event('T', packet
->speed
, packet
->header
, 0x20);
1747 driver_data
->packet
= NULL
;
1748 packet
->ack
= RCODE_CANCELLED
;
1749 packet
->callback(packet
, &ohci
->card
, packet
->ack
);
1752 tasklet_enable(&ctx
->tasklet
);
1757 static int ohci_enable_phys_dma(struct fw_card
*card
,
1758 int node_id
, int generation
)
1760 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1763 struct fw_ohci
*ohci
= fw_ohci(card
);
1764 unsigned long flags
;
1768 * FIXME: Make sure this bitmask is cleared when we clear the busReset
1769 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
1772 spin_lock_irqsave(&ohci
->lock
, flags
);
1774 if (ohci
->generation
!= generation
) {
1780 * Note, if the node ID contains a non-local bus ID, physical DMA is
1781 * enabled for _all_ nodes on remote buses.
1784 n
= (node_id
& 0xffc0) == LOCAL_BUS
? node_id
& 0x3f : 63;
1786 reg_write(ohci
, OHCI1394_PhyReqFilterLoSet
, 1 << n
);
1788 reg_write(ohci
, OHCI1394_PhyReqFilterHiSet
, 1 << (n
- 32));
1792 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1795 #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
1798 static u32
cycle_timer_ticks(u32 cycle_timer
)
1802 ticks
= cycle_timer
& 0xfff;
1803 ticks
+= 3072 * ((cycle_timer
>> 12) & 0x1fff);
1804 ticks
+= (3072 * 8000) * (cycle_timer
>> 25);
1810 * Some controllers exhibit one or more of the following bugs when updating the
1811 * iso cycle timer register:
1812 * - When the lowest six bits are wrapping around to zero, a read that happens
1813 * at the same time will return garbage in the lowest ten bits.
1814 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1815 * not incremented for about 60 ns.
1816 * - Occasionally, the entire register reads zero.
1818 * To catch these, we read the register three times and ensure that the
1819 * difference between each two consecutive reads is approximately the same, i.e.
1820 * less than twice the other. Furthermore, any negative difference indicates an
1821 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1822 * execute, so we have enough precision to compute the ratio of the differences.)
1824 static u64
ohci_get_bus_time(struct fw_card
*card
)
1826 struct fw_ohci
*ohci
= fw_ohci(card
);
1832 c2
= reg_read(ohci
, OHCI1394_IsochronousCycleTimer
);
1834 if (ohci
->iso_cycle_timer_quirk
) {
1837 c2
= reg_read(ohci
, OHCI1394_IsochronousCycleTimer
);
1841 c2
= reg_read(ohci
, OHCI1394_IsochronousCycleTimer
);
1842 t0
= cycle_timer_ticks(c0
);
1843 t1
= cycle_timer_ticks(c1
);
1844 t2
= cycle_timer_ticks(c2
);
1847 } while ((diff01
<= 0 || diff12
<= 0 ||
1848 diff01
/ diff12
>= 2 || diff12
/ diff01
>= 2)
1852 return ((u64
)atomic_read(&ohci
->bus_seconds
) << 32) | c2
;
1855 static void copy_iso_headers(struct iso_context
*ctx
, void *p
)
1857 int i
= ctx
->header_length
;
1859 if (i
+ ctx
->base
.header_size
> PAGE_SIZE
)
1863 * The iso header is byteswapped to little endian by
1864 * the controller, but the remaining header quadlets
1865 * are big endian. We want to present all the headers
1866 * as big endian, so we have to swap the first quadlet.
1868 if (ctx
->base
.header_size
> 0)
1869 *(u32
*) (ctx
->header
+ i
) = __swab32(*(u32
*) (p
+ 4));
1870 if (ctx
->base
.header_size
> 4)
1871 *(u32
*) (ctx
->header
+ i
+ 4) = __swab32(*(u32
*) p
);
1872 if (ctx
->base
.header_size
> 8)
1873 memcpy(ctx
->header
+ i
+ 8, p
+ 8, ctx
->base
.header_size
- 8);
1874 ctx
->header_length
+= ctx
->base
.header_size
;
1877 static int handle_ir_dualbuffer_packet(struct context
*context
,
1878 struct descriptor
*d
,
1879 struct descriptor
*last
)
1881 struct iso_context
*ctx
=
1882 container_of(context
, struct iso_context
, context
);
1883 struct db_descriptor
*db
= (struct db_descriptor
*) d
;
1885 size_t header_length
;
1888 if (db
->first_res_count
!= 0 && db
->second_res_count
!= 0) {
1889 if (ctx
->excess_bytes
<= le16_to_cpu(db
->second_req_count
)) {
1890 /* This descriptor isn't done yet, stop iteration. */
1893 ctx
->excess_bytes
-= le16_to_cpu(db
->second_req_count
);
1896 header_length
= le16_to_cpu(db
->first_req_count
) -
1897 le16_to_cpu(db
->first_res_count
);
1900 end
= p
+ header_length
;
1902 copy_iso_headers(ctx
, p
);
1903 ctx
->excess_bytes
+=
1904 (le32_to_cpu(*(__le32
*)(p
+ 4)) >> 16) & 0xffff;
1905 p
+= max(ctx
->base
.header_size
, (size_t)8);
1908 ctx
->excess_bytes
-= le16_to_cpu(db
->second_req_count
) -
1909 le16_to_cpu(db
->second_res_count
);
1911 if (le16_to_cpu(db
->control
) & DESCRIPTOR_IRQ_ALWAYS
) {
1912 ir_header
= (__le32
*) (db
+ 1);
1913 ctx
->base
.callback(&ctx
->base
,
1914 le32_to_cpu(ir_header
[0]) & 0xffff,
1915 ctx
->header_length
, ctx
->header
,
1916 ctx
->base
.callback_data
);
1917 ctx
->header_length
= 0;
1923 static int handle_ir_packet_per_buffer(struct context
*context
,
1924 struct descriptor
*d
,
1925 struct descriptor
*last
)
1927 struct iso_context
*ctx
=
1928 container_of(context
, struct iso_context
, context
);
1929 struct descriptor
*pd
;
1933 for (pd
= d
; pd
<= last
; pd
++) {
1934 if (pd
->transfer_status
)
1938 /* Descriptor(s) not done yet, stop iteration */
1942 copy_iso_headers(ctx
, p
);
1944 if (le16_to_cpu(last
->control
) & DESCRIPTOR_IRQ_ALWAYS
) {
1945 ir_header
= (__le32
*) p
;
1946 ctx
->base
.callback(&ctx
->base
,
1947 le32_to_cpu(ir_header
[0]) & 0xffff,
1948 ctx
->header_length
, ctx
->header
,
1949 ctx
->base
.callback_data
);
1950 ctx
->header_length
= 0;
1956 static int handle_it_packet(struct context
*context
,
1957 struct descriptor
*d
,
1958 struct descriptor
*last
)
1960 struct iso_context
*ctx
=
1961 container_of(context
, struct iso_context
, context
);
1963 struct descriptor
*pd
;
1965 for (pd
= d
; pd
<= last
; pd
++)
1966 if (pd
->transfer_status
)
1969 /* Descriptor(s) not done yet, stop iteration */
1972 i
= ctx
->header_length
;
1973 if (i
+ 4 < PAGE_SIZE
) {
1974 /* Present this value as big-endian to match the receive code */
1975 *(__be32
*)(ctx
->header
+ i
) = cpu_to_be32(
1976 ((u32
)le16_to_cpu(pd
->transfer_status
) << 16) |
1977 le16_to_cpu(pd
->res_count
));
1978 ctx
->header_length
+= 4;
1980 if (le16_to_cpu(last
->control
) & DESCRIPTOR_IRQ_ALWAYS
) {
1981 ctx
->base
.callback(&ctx
->base
, le16_to_cpu(last
->res_count
),
1982 ctx
->header_length
, ctx
->header
,
1983 ctx
->base
.callback_data
);
1984 ctx
->header_length
= 0;
1989 static struct fw_iso_context
*ohci_allocate_iso_context(struct fw_card
*card
,
1990 int type
, int channel
, size_t header_size
)
1992 struct fw_ohci
*ohci
= fw_ohci(card
);
1993 struct iso_context
*ctx
, *list
;
1994 descriptor_callback_t callback
;
1995 u64
*channels
, dont_care
= ~0ULL;
1997 unsigned long flags
;
1998 int index
, ret
= -ENOMEM
;
2000 if (type
== FW_ISO_CONTEXT_TRANSMIT
) {
2001 channels
= &dont_care
;
2002 mask
= &ohci
->it_context_mask
;
2003 list
= ohci
->it_context_list
;
2004 callback
= handle_it_packet
;
2006 channels
= &ohci
->ir_context_channels
;
2007 mask
= &ohci
->ir_context_mask
;
2008 list
= ohci
->ir_context_list
;
2009 if (ohci
->use_dualbuffer
)
2010 callback
= handle_ir_dualbuffer_packet
;
2012 callback
= handle_ir_packet_per_buffer
;
2015 spin_lock_irqsave(&ohci
->lock
, flags
);
2016 index
= *channels
& 1ULL << channel
? ffs(*mask
) - 1 : -1;
2018 *channels
&= ~(1ULL << channel
);
2019 *mask
&= ~(1 << index
);
2021 spin_unlock_irqrestore(&ohci
->lock
, flags
);
2024 return ERR_PTR(-EBUSY
);
2026 if (type
== FW_ISO_CONTEXT_TRANSMIT
)
2027 regs
= OHCI1394_IsoXmitContextBase(index
);
2029 regs
= OHCI1394_IsoRcvContextBase(index
);
2032 memset(ctx
, 0, sizeof(*ctx
));
2033 ctx
->header_length
= 0;
2034 ctx
->header
= (void *) __get_free_page(GFP_KERNEL
);
2035 if (ctx
->header
== NULL
)
2038 ret
= context_init(&ctx
->context
, ohci
, regs
, callback
);
2040 goto out_with_header
;
2045 free_page((unsigned long)ctx
->header
);
2047 spin_lock_irqsave(&ohci
->lock
, flags
);
2048 *mask
|= 1 << index
;
2049 spin_unlock_irqrestore(&ohci
->lock
, flags
);
2051 return ERR_PTR(ret
);
2054 static int ohci_start_iso(struct fw_iso_context
*base
,
2055 s32 cycle
, u32 sync
, u32 tags
)
2057 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
2058 struct fw_ohci
*ohci
= ctx
->context
.ohci
;
2062 if (ctx
->base
.type
== FW_ISO_CONTEXT_TRANSMIT
) {
2063 index
= ctx
- ohci
->it_context_list
;
2066 match
= IT_CONTEXT_CYCLE_MATCH_ENABLE
|
2067 (cycle
& 0x7fff) << 16;
2069 reg_write(ohci
, OHCI1394_IsoXmitIntEventClear
, 1 << index
);
2070 reg_write(ohci
, OHCI1394_IsoXmitIntMaskSet
, 1 << index
);
2071 context_run(&ctx
->context
, match
);
2073 index
= ctx
- ohci
->ir_context_list
;
2074 control
= IR_CONTEXT_ISOCH_HEADER
;
2075 if (ohci
->use_dualbuffer
)
2076 control
|= IR_CONTEXT_DUAL_BUFFER_MODE
;
2077 match
= (tags
<< 28) | (sync
<< 8) | ctx
->base
.channel
;
2079 match
|= (cycle
& 0x07fff) << 12;
2080 control
|= IR_CONTEXT_CYCLE_MATCH_ENABLE
;
2083 reg_write(ohci
, OHCI1394_IsoRecvIntEventClear
, 1 << index
);
2084 reg_write(ohci
, OHCI1394_IsoRecvIntMaskSet
, 1 << index
);
2085 reg_write(ohci
, CONTEXT_MATCH(ctx
->context
.regs
), match
);
2086 context_run(&ctx
->context
, control
);
2092 static int ohci_stop_iso(struct fw_iso_context
*base
)
2094 struct fw_ohci
*ohci
= fw_ohci(base
->card
);
2095 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
2098 if (ctx
->base
.type
== FW_ISO_CONTEXT_TRANSMIT
) {
2099 index
= ctx
- ohci
->it_context_list
;
2100 reg_write(ohci
, OHCI1394_IsoXmitIntMaskClear
, 1 << index
);
2102 index
= ctx
- ohci
->ir_context_list
;
2103 reg_write(ohci
, OHCI1394_IsoRecvIntMaskClear
, 1 << index
);
2106 context_stop(&ctx
->context
);
2111 static void ohci_free_iso_context(struct fw_iso_context
*base
)
2113 struct fw_ohci
*ohci
= fw_ohci(base
->card
);
2114 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
2115 unsigned long flags
;
2118 ohci_stop_iso(base
);
2119 context_release(&ctx
->context
);
2120 free_page((unsigned long)ctx
->header
);
2122 spin_lock_irqsave(&ohci
->lock
, flags
);
2124 if (ctx
->base
.type
== FW_ISO_CONTEXT_TRANSMIT
) {
2125 index
= ctx
- ohci
->it_context_list
;
2126 ohci
->it_context_mask
|= 1 << index
;
2128 index
= ctx
- ohci
->ir_context_list
;
2129 ohci
->ir_context_mask
|= 1 << index
;
2130 ohci
->ir_context_channels
|= 1ULL << base
->channel
;
2133 spin_unlock_irqrestore(&ohci
->lock
, flags
);
2136 static int ohci_queue_iso_transmit(struct fw_iso_context
*base
,
2137 struct fw_iso_packet
*packet
,
2138 struct fw_iso_buffer
*buffer
,
2139 unsigned long payload
)
2141 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
2142 struct descriptor
*d
, *last
, *pd
;
2143 struct fw_iso_packet
*p
;
2145 dma_addr_t d_bus
, page_bus
;
2146 u32 z
, header_z
, payload_z
, irq
;
2147 u32 payload_index
, payload_end_index
, next_page_index
;
2148 int page
, end_page
, i
, length
, offset
;
2151 * FIXME: Cycle lost behavior should be configurable: lose
2152 * packet, retransmit or terminate..
2156 payload_index
= payload
;
2162 if (p
->header_length
> 0)
2165 /* Determine the first page the payload isn't contained in. */
2166 end_page
= PAGE_ALIGN(payload_index
+ p
->payload_length
) >> PAGE_SHIFT
;
2167 if (p
->payload_length
> 0)
2168 payload_z
= end_page
- (payload_index
>> PAGE_SHIFT
);
2174 /* Get header size in number of descriptors. */
2175 header_z
= DIV_ROUND_UP(p
->header_length
, sizeof(*d
));
2177 d
= context_get_descriptors(&ctx
->context
, z
+ header_z
, &d_bus
);
2182 d
[0].control
= cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE
);
2183 d
[0].req_count
= cpu_to_le16(8);
2185 header
= (__le32
*) &d
[1];
2186 header
[0] = cpu_to_le32(IT_HEADER_SY(p
->sy
) |
2187 IT_HEADER_TAG(p
->tag
) |
2188 IT_HEADER_TCODE(TCODE_STREAM_DATA
) |
2189 IT_HEADER_CHANNEL(ctx
->base
.channel
) |
2190 IT_HEADER_SPEED(ctx
->base
.speed
));
2192 cpu_to_le32(IT_HEADER_DATA_LENGTH(p
->header_length
+
2193 p
->payload_length
));
2196 if (p
->header_length
> 0) {
2197 d
[2].req_count
= cpu_to_le16(p
->header_length
);
2198 d
[2].data_address
= cpu_to_le32(d_bus
+ z
* sizeof(*d
));
2199 memcpy(&d
[z
], p
->header
, p
->header_length
);
2202 pd
= d
+ z
- payload_z
;
2203 payload_end_index
= payload_index
+ p
->payload_length
;
2204 for (i
= 0; i
< payload_z
; i
++) {
2205 page
= payload_index
>> PAGE_SHIFT
;
2206 offset
= payload_index
& ~PAGE_MASK
;
2207 next_page_index
= (page
+ 1) << PAGE_SHIFT
;
2209 min(next_page_index
, payload_end_index
) - payload_index
;
2210 pd
[i
].req_count
= cpu_to_le16(length
);
2212 page_bus
= page_private(buffer
->pages
[page
]);
2213 pd
[i
].data_address
= cpu_to_le32(page_bus
+ offset
);
2215 payload_index
+= length
;
2219 irq
= DESCRIPTOR_IRQ_ALWAYS
;
2221 irq
= DESCRIPTOR_NO_IRQ
;
2223 last
= z
== 2 ? d
: d
+ z
- 1;
2224 last
->control
|= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST
|
2226 DESCRIPTOR_BRANCH_ALWAYS
|
2229 context_append(&ctx
->context
, d
, z
, header_z
);
2234 static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context
*base
,
2235 struct fw_iso_packet
*packet
,
2236 struct fw_iso_buffer
*buffer
,
2237 unsigned long payload
)
2239 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
2240 struct db_descriptor
*db
= NULL
;
2241 struct descriptor
*d
;
2242 struct fw_iso_packet
*p
;
2243 dma_addr_t d_bus
, page_bus
;
2244 u32 z
, header_z
, length
, rest
;
2245 int page
, offset
, packet_count
, header_size
;
2248 * FIXME: Cycle lost behavior should be configurable: lose
2249 * packet, retransmit or terminate..
2256 * The OHCI controller puts the isochronous header and trailer in the
2257 * buffer, so we need at least 8 bytes.
2259 packet_count
= p
->header_length
/ ctx
->base
.header_size
;
2260 header_size
= packet_count
* max(ctx
->base
.header_size
, (size_t)8);
2262 /* Get header size in number of descriptors. */
2263 header_z
= DIV_ROUND_UP(header_size
, sizeof(*d
));
2264 page
= payload
>> PAGE_SHIFT
;
2265 offset
= payload
& ~PAGE_MASK
;
2266 rest
= p
->payload_length
;
2268 * The controllers I've tested have not worked correctly when
2269 * second_req_count is zero. Rather than do something we know won't
2270 * work, return an error
2276 d
= context_get_descriptors(&ctx
->context
,
2277 z
+ header_z
, &d_bus
);
2281 db
= (struct db_descriptor
*) d
;
2282 db
->control
= cpu_to_le16(DESCRIPTOR_STATUS
|
2283 DESCRIPTOR_BRANCH_ALWAYS
);
2285 cpu_to_le16(max(ctx
->base
.header_size
, (size_t)8));
2286 if (p
->skip
&& rest
== p
->payload_length
) {
2287 db
->control
|= cpu_to_le16(DESCRIPTOR_WAIT
);
2288 db
->first_req_count
= db
->first_size
;
2290 db
->first_req_count
= cpu_to_le16(header_size
);
2292 db
->first_res_count
= db
->first_req_count
;
2293 db
->first_buffer
= cpu_to_le32(d_bus
+ sizeof(*db
));
2295 if (p
->skip
&& rest
== p
->payload_length
)
2297 else if (offset
+ rest
< PAGE_SIZE
)
2300 length
= PAGE_SIZE
- offset
;
2302 db
->second_req_count
= cpu_to_le16(length
);
2303 db
->second_res_count
= db
->second_req_count
;
2304 page_bus
= page_private(buffer
->pages
[page
]);
2305 db
->second_buffer
= cpu_to_le32(page_bus
+ offset
);
2307 if (p
->interrupt
&& length
== rest
)
2308 db
->control
|= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS
);
2310 context_append(&ctx
->context
, d
, z
, header_z
);
2311 offset
= (offset
+ length
) & ~PAGE_MASK
;
2320 static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context
*base
,
2321 struct fw_iso_packet
*packet
,
2322 struct fw_iso_buffer
*buffer
,
2323 unsigned long payload
)
2325 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
2326 struct descriptor
*d
, *pd
;
2327 struct fw_iso_packet
*p
= packet
;
2328 dma_addr_t d_bus
, page_bus
;
2329 u32 z
, header_z
, rest
;
2331 int page
, offset
, packet_count
, header_size
, payload_per_buffer
;
2334 * The OHCI controller puts the isochronous header and trailer in the
2335 * buffer, so we need at least 8 bytes.
2337 packet_count
= p
->header_length
/ ctx
->base
.header_size
;
2338 header_size
= max(ctx
->base
.header_size
, (size_t)8);
2340 /* Get header size in number of descriptors. */
2341 header_z
= DIV_ROUND_UP(header_size
, sizeof(*d
));
2342 page
= payload
>> PAGE_SHIFT
;
2343 offset
= payload
& ~PAGE_MASK
;
2344 payload_per_buffer
= p
->payload_length
/ packet_count
;
2346 for (i
= 0; i
< packet_count
; i
++) {
2347 /* d points to the header descriptor */
2348 z
= DIV_ROUND_UP(payload_per_buffer
+ offset
, PAGE_SIZE
) + 1;
2349 d
= context_get_descriptors(&ctx
->context
,
2350 z
+ header_z
, &d_bus
);
2354 d
->control
= cpu_to_le16(DESCRIPTOR_STATUS
|
2355 DESCRIPTOR_INPUT_MORE
);
2356 if (p
->skip
&& i
== 0)
2357 d
->control
|= cpu_to_le16(DESCRIPTOR_WAIT
);
2358 d
->req_count
= cpu_to_le16(header_size
);
2359 d
->res_count
= d
->req_count
;
2360 d
->transfer_status
= 0;
2361 d
->data_address
= cpu_to_le32(d_bus
+ (z
* sizeof(*d
)));
2363 rest
= payload_per_buffer
;
2365 for (j
= 1; j
< z
; j
++) {
2367 pd
->control
= cpu_to_le16(DESCRIPTOR_STATUS
|
2368 DESCRIPTOR_INPUT_MORE
);
2370 if (offset
+ rest
< PAGE_SIZE
)
2373 length
= PAGE_SIZE
- offset
;
2374 pd
->req_count
= cpu_to_le16(length
);
2375 pd
->res_count
= pd
->req_count
;
2376 pd
->transfer_status
= 0;
2378 page_bus
= page_private(buffer
->pages
[page
]);
2379 pd
->data_address
= cpu_to_le32(page_bus
+ offset
);
2381 offset
= (offset
+ length
) & ~PAGE_MASK
;
2386 pd
->control
= cpu_to_le16(DESCRIPTOR_STATUS
|
2387 DESCRIPTOR_INPUT_LAST
|
2388 DESCRIPTOR_BRANCH_ALWAYS
);
2389 if (p
->interrupt
&& i
== packet_count
- 1)
2390 pd
->control
|= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS
);
2392 context_append(&ctx
->context
, d
, z
, header_z
);
2398 static int ohci_queue_iso(struct fw_iso_context
*base
,
2399 struct fw_iso_packet
*packet
,
2400 struct fw_iso_buffer
*buffer
,
2401 unsigned long payload
)
2403 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
2404 unsigned long flags
;
2407 spin_lock_irqsave(&ctx
->context
.ohci
->lock
, flags
);
2408 if (base
->type
== FW_ISO_CONTEXT_TRANSMIT
)
2409 ret
= ohci_queue_iso_transmit(base
, packet
, buffer
, payload
);
2410 else if (ctx
->context
.ohci
->use_dualbuffer
)
2411 ret
= ohci_queue_iso_receive_dualbuffer(base
, packet
,
2414 ret
= ohci_queue_iso_receive_packet_per_buffer(base
, packet
,
2416 spin_unlock_irqrestore(&ctx
->context
.ohci
->lock
, flags
);
2421 static const struct fw_card_driver ohci_driver
= {
2422 .enable
= ohci_enable
,
2423 .update_phy_reg
= ohci_update_phy_reg
,
2424 .set_config_rom
= ohci_set_config_rom
,
2425 .send_request
= ohci_send_request
,
2426 .send_response
= ohci_send_response
,
2427 .cancel_packet
= ohci_cancel_packet
,
2428 .enable_phys_dma
= ohci_enable_phys_dma
,
2429 .get_bus_time
= ohci_get_bus_time
,
2431 .allocate_iso_context
= ohci_allocate_iso_context
,
2432 .free_iso_context
= ohci_free_iso_context
,
2433 .queue_iso
= ohci_queue_iso
,
2434 .start_iso
= ohci_start_iso
,
2435 .stop_iso
= ohci_stop_iso
,
2438 #ifdef CONFIG_PPC_PMAC
2439 static void ohci_pmac_on(struct pci_dev
*dev
)
2441 if (machine_is(powermac
)) {
2442 struct device_node
*ofn
= pci_device_to_OF_node(dev
);
2445 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER
, ofn
, 0, 1);
2446 pmac_call_feature(PMAC_FTR_1394_ENABLE
, ofn
, 0, 1);
2451 static void ohci_pmac_off(struct pci_dev
*dev
)
2453 if (machine_is(powermac
)) {
2454 struct device_node
*ofn
= pci_device_to_OF_node(dev
);
2457 pmac_call_feature(PMAC_FTR_1394_ENABLE
, ofn
, 0, 0);
2458 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER
, ofn
, 0, 0);
2463 #define ohci_pmac_on(dev)
2464 #define ohci_pmac_off(dev)
2465 #endif /* CONFIG_PPC_PMAC */
2467 #define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
2468 #define PCI_DEVICE_ID_AGERE_FW643 0x5901
2470 static int __devinit
pci_probe(struct pci_dev
*dev
,
2471 const struct pci_device_id
*ent
)
2473 struct fw_ohci
*ohci
;
2474 u32 bus_options
, max_receive
, link_speed
, version
;
2479 ohci
= kzalloc(sizeof(*ohci
), GFP_KERNEL
);
2485 fw_card_initialize(&ohci
->card
, &ohci_driver
, &dev
->dev
);
2489 err
= pci_enable_device(dev
);
2491 fw_error("Failed to enable OHCI hardware\n");
2495 pci_set_master(dev
);
2496 pci_write_config_dword(dev
, OHCI1394_PCI_HCI_Control
, 0);
2497 pci_set_drvdata(dev
, ohci
);
2499 spin_lock_init(&ohci
->lock
);
2501 tasklet_init(&ohci
->bus_reset_tasklet
,
2502 bus_reset_tasklet
, (unsigned long)ohci
);
2504 err
= pci_request_region(dev
, 0, ohci_driver_name
);
2506 fw_error("MMIO resource unavailable\n");
2510 ohci
->registers
= pci_iomap(dev
, 0, OHCI1394_REGISTER_SIZE
);
2511 if (ohci
->registers
== NULL
) {
2512 fw_error("Failed to remap registers\n");
2517 version
= reg_read(ohci
, OHCI1394_Version
) & 0x00ff00ff;
2519 /* FIXME: make it a context option or remove dual-buffer mode */
2520 ohci
->use_dualbuffer
= version
>= OHCI_VERSION_1_1
;
2523 /* dual-buffer mode is broken if more than one IR context is active */
2524 if (dev
->vendor
== PCI_VENDOR_ID_AGERE
&&
2525 dev
->device
== PCI_DEVICE_ID_AGERE_FW643
)
2526 ohci
->use_dualbuffer
= false;
2528 /* dual-buffer mode is broken */
2529 if (dev
->vendor
== PCI_VENDOR_ID_RICOH
&&
2530 dev
->device
== PCI_DEVICE_ID_RICOH_R5C832
)
2531 ohci
->use_dualbuffer
= false;
2533 /* x86-32 currently doesn't use highmem for dma_alloc_coherent */
2534 #if !defined(CONFIG_X86_32)
2535 /* dual-buffer mode is broken with descriptor addresses above 2G */
2536 if (dev
->vendor
== PCI_VENDOR_ID_TI
&&
2537 dev
->device
== PCI_DEVICE_ID_TI_TSB43AB22
)
2538 ohci
->use_dualbuffer
= false;
2541 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
2542 ohci
->old_uninorth
= dev
->vendor
== PCI_VENDOR_ID_APPLE
&&
2543 dev
->device
== PCI_DEVICE_ID_APPLE_UNI_N_FW
;
2545 ohci
->bus_reset_packet_quirk
= dev
->vendor
== PCI_VENDOR_ID_TI
;
2547 ohci
->iso_cycle_timer_quirk
= dev
->vendor
== PCI_VENDOR_ID_AL
||
2548 dev
->vendor
== PCI_VENDOR_ID_NEC
||
2549 dev
->vendor
== PCI_VENDOR_ID_VIA
;
2551 ar_context_init(&ohci
->ar_request_ctx
, ohci
,
2552 OHCI1394_AsReqRcvContextControlSet
);
2554 ar_context_init(&ohci
->ar_response_ctx
, ohci
,
2555 OHCI1394_AsRspRcvContextControlSet
);
2557 context_init(&ohci
->at_request_ctx
, ohci
,
2558 OHCI1394_AsReqTrContextControlSet
, handle_at_packet
);
2560 context_init(&ohci
->at_response_ctx
, ohci
,
2561 OHCI1394_AsRspTrContextControlSet
, handle_at_packet
);
2563 reg_write(ohci
, OHCI1394_IsoRecvIntMaskSet
, ~0);
2564 ohci
->it_context_mask
= reg_read(ohci
, OHCI1394_IsoRecvIntMaskSet
);
2565 reg_write(ohci
, OHCI1394_IsoRecvIntMaskClear
, ~0);
2566 size
= sizeof(struct iso_context
) * hweight32(ohci
->it_context_mask
);
2567 ohci
->it_context_list
= kzalloc(size
, GFP_KERNEL
);
2569 reg_write(ohci
, OHCI1394_IsoXmitIntMaskSet
, ~0);
2570 ohci
->ir_context_channels
= ~0ULL;
2571 ohci
->ir_context_mask
= reg_read(ohci
, OHCI1394_IsoXmitIntMaskSet
);
2572 reg_write(ohci
, OHCI1394_IsoXmitIntMaskClear
, ~0);
2573 size
= sizeof(struct iso_context
) * hweight32(ohci
->ir_context_mask
);
2574 ohci
->ir_context_list
= kzalloc(size
, GFP_KERNEL
);
2576 if (ohci
->it_context_list
== NULL
|| ohci
->ir_context_list
== NULL
) {
2581 /* self-id dma buffer allocation */
2582 ohci
->self_id_cpu
= dma_alloc_coherent(ohci
->card
.device
,
2586 if (ohci
->self_id_cpu
== NULL
) {
2591 bus_options
= reg_read(ohci
, OHCI1394_BusOptions
);
2592 max_receive
= (bus_options
>> 12) & 0xf;
2593 link_speed
= bus_options
& 0x7;
2594 guid
= ((u64
) reg_read(ohci
, OHCI1394_GUIDHi
) << 32) |
2595 reg_read(ohci
, OHCI1394_GUIDLo
);
2597 err
= fw_card_add(&ohci
->card
, max_receive
, link_speed
, guid
);
2601 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
2602 dev_name(&dev
->dev
), version
>> 16, version
& 0xff);
2607 dma_free_coherent(ohci
->card
.device
, SELF_ID_BUF_SIZE
,
2608 ohci
->self_id_cpu
, ohci
->self_id_bus
);
2610 kfree(ohci
->ir_context_list
);
2611 kfree(ohci
->it_context_list
);
2612 context_release(&ohci
->at_response_ctx
);
2613 context_release(&ohci
->at_request_ctx
);
2614 ar_context_release(&ohci
->ar_response_ctx
);
2615 ar_context_release(&ohci
->ar_request_ctx
);
2616 pci_iounmap(dev
, ohci
->registers
);
2618 pci_release_region(dev
, 0);
2620 pci_disable_device(dev
);
2626 fw_error("Out of memory\n");
2631 static void pci_remove(struct pci_dev
*dev
)
2633 struct fw_ohci
*ohci
;
2635 ohci
= pci_get_drvdata(dev
);
2636 reg_write(ohci
, OHCI1394_IntMaskClear
, ~0);
2638 fw_core_remove_card(&ohci
->card
);
2641 * FIXME: Fail all pending packets here, now that the upper
2642 * layers can't queue any more.
2645 software_reset(ohci
);
2646 free_irq(dev
->irq
, ohci
);
2648 if (ohci
->next_config_rom
&& ohci
->next_config_rom
!= ohci
->config_rom
)
2649 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
2650 ohci
->next_config_rom
, ohci
->next_config_rom_bus
);
2651 if (ohci
->config_rom
)
2652 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
2653 ohci
->config_rom
, ohci
->config_rom_bus
);
2654 dma_free_coherent(ohci
->card
.device
, SELF_ID_BUF_SIZE
,
2655 ohci
->self_id_cpu
, ohci
->self_id_bus
);
2656 ar_context_release(&ohci
->ar_request_ctx
);
2657 ar_context_release(&ohci
->ar_response_ctx
);
2658 context_release(&ohci
->at_request_ctx
);
2659 context_release(&ohci
->at_response_ctx
);
2660 kfree(ohci
->it_context_list
);
2661 kfree(ohci
->ir_context_list
);
2662 pci_iounmap(dev
, ohci
->registers
);
2663 pci_release_region(dev
, 0);
2664 pci_disable_device(dev
);
2668 fw_notify("Removed fw-ohci device.\n");
2672 static int pci_suspend(struct pci_dev
*dev
, pm_message_t state
)
2674 struct fw_ohci
*ohci
= pci_get_drvdata(dev
);
2677 software_reset(ohci
);
2678 free_irq(dev
->irq
, ohci
);
2679 err
= pci_save_state(dev
);
2681 fw_error("pci_save_state failed\n");
2684 err
= pci_set_power_state(dev
, pci_choose_state(dev
, state
));
2686 fw_error("pci_set_power_state failed with %d\n", err
);
2692 static int pci_resume(struct pci_dev
*dev
)
2694 struct fw_ohci
*ohci
= pci_get_drvdata(dev
);
2698 pci_set_power_state(dev
, PCI_D0
);
2699 pci_restore_state(dev
);
2700 err
= pci_enable_device(dev
);
2702 fw_error("pci_enable_device failed\n");
2706 return ohci_enable(&ohci
->card
, NULL
, 0);
2710 static const struct pci_device_id pci_table
[] = {
2711 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI
, ~0) },
2715 MODULE_DEVICE_TABLE(pci
, pci_table
);
2717 static struct pci_driver fw_ohci_pci_driver
= {
2718 .name
= ohci_driver_name
,
2719 .id_table
= pci_table
,
2721 .remove
= pci_remove
,
2723 .resume
= pci_resume
,
2724 .suspend
= pci_suspend
,
2728 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
2729 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
2730 MODULE_LICENSE("GPL");
2732 /* Provide a module alias so root-on-sbp2 initrds don't break. */
2733 #ifndef CONFIG_IEEE1394_OHCI1394_MODULE
2734 MODULE_ALIAS("ohci1394");
2737 static int __init
fw_ohci_init(void)
2739 return pci_register_driver(&fw_ohci_pci_driver
);
2742 static void __exit
fw_ohci_cleanup(void)
2744 pci_unregister_driver(&fw_ohci_pci_driver
);
2747 module_init(fw_ohci_init
);
2748 module_exit(fw_ohci_cleanup
);