1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Driver for OHCI 1394 controllers
5 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
8 #include <linux/bitops.h>
10 #include <linux/compiler.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/firewire.h>
15 #include <linux/firewire-constants.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/mutex.h>
25 #include <linux/pci.h>
26 #include <linux/pci_ids.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/string.h>
30 #include <linux/time.h>
31 #include <linux/vmalloc.h>
32 #include <linux/workqueue.h>
34 #include <asm/byteorder.h>
37 #ifdef CONFIG_PPC_PMAC
38 #include <asm/pmac_feature.h>
43 #include "packet-header-definitions.h"
44 #include "phy-packet-definitions.h"
46 #include <trace/events/firewire.h>
48 static u32
cond_le32_to_cpu(__le32 value
, bool has_be_header_quirk
);
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/firewire_ohci.h>
53 #define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
54 #define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
55 #define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
57 #define DESCRIPTOR_OUTPUT_MORE 0
58 #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
59 #define DESCRIPTOR_INPUT_MORE (2 << 12)
60 #define DESCRIPTOR_INPUT_LAST (3 << 12)
61 #define DESCRIPTOR_STATUS (1 << 11)
62 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
63 #define DESCRIPTOR_PING (1 << 7)
64 #define DESCRIPTOR_YY (1 << 6)
65 #define DESCRIPTOR_NO_IRQ (0 << 4)
66 #define DESCRIPTOR_IRQ_ERROR (1 << 4)
67 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
68 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
69 #define DESCRIPTOR_WAIT (3 << 0)
71 #define DESCRIPTOR_CMD (0xf << 12)
77 __le32 branch_address
;
79 __le16 transfer_status
;
80 } __attribute__((aligned(16)));
82 #define CONTROL_SET(regs) (regs)
83 #define CONTROL_CLEAR(regs) ((regs) + 4)
84 #define COMMAND_PTR(regs) ((regs) + 12)
85 #define CONTEXT_MATCH(regs) ((regs) + 16)
87 #define AR_BUFFER_SIZE (32*1024)
88 #define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
89 /* we need at least two pages for proper list management */
90 #define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
92 #define MAX_ASYNC_PAYLOAD 4096
93 #define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4)
94 #define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
98 struct page
*pages
[AR_BUFFERS
];
100 struct descriptor
*descriptors
;
101 dma_addr_t descriptors_bus
;
103 unsigned int last_buffer_index
;
105 struct tasklet_struct tasklet
;
110 typedef int (*descriptor_callback_t
)(struct context
*ctx
,
111 struct descriptor
*d
,
112 struct descriptor
*last
);
115 * A buffer that contains a block of DMA-able coherent memory used for
116 * storing a portion of a DMA descriptor program.
118 struct descriptor_buffer
{
119 struct list_head list
;
120 dma_addr_t buffer_bus
;
123 struct descriptor buffer
[];
127 struct fw_ohci
*ohci
;
129 int total_allocation
;
135 * List of page-sized buffers for storing DMA descriptors.
136 * Head of list contains buffers in use and tail of list contains
139 struct list_head buffer_list
;
142 * Pointer to a buffer inside buffer_list that contains the tail
143 * end of the current DMA program.
145 struct descriptor_buffer
*buffer_tail
;
148 * The descriptor containing the branch address of the first
149 * descriptor that has not yet been filled by the device.
151 struct descriptor
*last
;
154 * The last descriptor block in the DMA program. It contains the branch
155 * address that must be updated upon appending a new descriptor.
157 struct descriptor
*prev
;
160 descriptor_callback_t callback
;
162 struct tasklet_struct tasklet
;
165 #define IT_HEADER_SY(v) ((v) << 0)
166 #define IT_HEADER_TCODE(v) ((v) << 4)
167 #define IT_HEADER_CHANNEL(v) ((v) << 8)
168 #define IT_HEADER_TAG(v) ((v) << 14)
169 #define IT_HEADER_SPEED(v) ((v) << 16)
170 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
173 struct fw_iso_context base
;
174 struct context context
;
176 size_t header_length
;
177 unsigned long flushing_completions
;
185 #define CONFIG_ROM_SIZE 1024
190 __iomem
char *registers
;
193 int request_generation
; /* for timestamping incoming requests */
195 unsigned int pri_req_max
;
197 bool bus_time_running
;
199 bool csr_state_setclear_abdicate
;
203 * Spinlock for accessing fw_ohci data. Never call out of
204 * this driver with this lock held.
208 struct mutex phy_reg_mutex
;
211 dma_addr_t misc_buffer_bus
;
213 struct ar_context ar_request_ctx
;
214 struct ar_context ar_response_ctx
;
215 struct context at_request_ctx
;
216 struct context at_response_ctx
;
218 u32 it_context_support
;
219 u32 it_context_mask
; /* unoccupied IT contexts */
220 struct iso_context
*it_context_list
;
221 u64 ir_context_channels
; /* unoccupied channels */
222 u32 ir_context_support
;
223 u32 ir_context_mask
; /* unoccupied IR contexts */
224 struct iso_context
*ir_context_list
;
225 u64 mc_channels
; /* channels in use by the multichannel IR context */
229 dma_addr_t config_rom_bus
;
230 __be32
*next_config_rom
;
231 dma_addr_t next_config_rom_bus
;
235 dma_addr_t self_id_bus
;
236 struct work_struct bus_reset_work
;
238 u32 self_id_buffer
[512];
241 static struct workqueue_struct
*selfid_workqueue
;
243 static inline struct fw_ohci
*fw_ohci(struct fw_card
*card
)
245 return container_of(card
, struct fw_ohci
, card
);
248 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
249 #define IR_CONTEXT_BUFFER_FILL 0x80000000
250 #define IR_CONTEXT_ISOCH_HEADER 0x40000000
251 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
252 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
253 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
255 #define CONTEXT_RUN 0x8000
256 #define CONTEXT_WAKE 0x1000
257 #define CONTEXT_DEAD 0x0800
258 #define CONTEXT_ACTIVE 0x0400
260 #define OHCI1394_MAX_AT_REQ_RETRIES 0xf
261 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
262 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
264 #define OHCI1394_REGISTER_SIZE 0x800
265 #define OHCI1394_PCI_HCI_Control 0x40
266 #define SELF_ID_BUF_SIZE 0x800
267 #define OHCI_TCODE_PHY_PACKET 0x0e
268 #define OHCI_VERSION_1_1 0x010010
270 static char ohci_driver_name
[] = KBUILD_MODNAME
;
272 #define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
273 #define PCI_DEVICE_ID_AGERE_FW643 0x5901
274 #define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001
275 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
276 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
277 #define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
278 #define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
279 #define PCI_DEVICE_ID_VIA_VT630X 0x3044
280 #define PCI_REV_ID_VIA_VT6306 0x46
281 #define PCI_DEVICE_ID_VIA_VT6315 0x3403
283 #define QUIRK_CYCLE_TIMER 0x1
284 #define QUIRK_RESET_PACKET 0x2
285 #define QUIRK_BE_HEADERS 0x4
286 #define QUIRK_NO_1394A 0x8
287 #define QUIRK_NO_MSI 0x10
288 #define QUIRK_TI_SLLZ059 0x20
289 #define QUIRK_IR_WAKE 0x40
291 // On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia
292 // ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register
293 // (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not
294 // clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register,
295 // while it is probable due to detection of any type of PCIe error.
296 #define QUIRK_REBOOT_BY_CYCLE_TIMER_READ 0x80000000
298 #if IS_ENABLED(CONFIG_X86)
300 static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci
*ohci
)
302 return !!(ohci
->quirks
& QUIRK_REBOOT_BY_CYCLE_TIMER_READ
);
305 #define PCI_DEVICE_ID_ASMEDIA_ASM108X 0x1080
307 static bool detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev
*pdev
)
309 const struct pci_dev
*pcie_to_pci_bridge
;
311 // Detect any type of AMD Ryzen machine.
312 if (!static_cpu_has(X86_FEATURE_ZEN
))
315 // Detect VIA VT6306/6307/6308.
316 if (pdev
->vendor
!= PCI_VENDOR_ID_VIA
)
318 if (pdev
->device
!= PCI_DEVICE_ID_VIA_VT630X
)
321 // Detect Asmedia ASM1083/1085.
322 pcie_to_pci_bridge
= pdev
->bus
->self
;
323 if (pcie_to_pci_bridge
->vendor
!= PCI_VENDOR_ID_ASMEDIA
)
325 if (pcie_to_pci_bridge
->device
!= PCI_DEVICE_ID_ASMEDIA_ASM108X
)
332 #define has_reboot_by_cycle_timer_read_quirk(ohci) false
333 #define detect_vt630x_with_asm1083_on_amd_ryzen_machine(pdev) false
336 /* In case of multiple matches in ohci_quirks[], only the first one is used. */
337 static const struct {
338 unsigned short vendor
, device
, revision
, flags
;
340 {PCI_VENDOR_ID_AL
, PCI_ANY_ID
, PCI_ANY_ID
,
343 {PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_FW
, PCI_ANY_ID
,
346 {PCI_VENDOR_ID_ATT
, PCI_DEVICE_ID_AGERE_FW643
, 6,
349 {PCI_VENDOR_ID_CREATIVE
, PCI_DEVICE_ID_CREATIVE_SB1394
, PCI_ANY_ID
,
352 {PCI_VENDOR_ID_JMICRON
, PCI_DEVICE_ID_JMICRON_JMB38X_FW
, PCI_ANY_ID
,
355 {PCI_VENDOR_ID_NEC
, PCI_ANY_ID
, PCI_ANY_ID
,
358 {PCI_VENDOR_ID_O2
, PCI_ANY_ID
, PCI_ANY_ID
,
361 {PCI_VENDOR_ID_RICOH
, PCI_ANY_ID
, PCI_ANY_ID
,
362 QUIRK_CYCLE_TIMER
| QUIRK_NO_MSI
},
364 {PCI_VENDOR_ID_TI
, PCI_DEVICE_ID_TI_TSB12LV22
, PCI_ANY_ID
,
365 QUIRK_CYCLE_TIMER
| QUIRK_RESET_PACKET
| QUIRK_NO_1394A
},
367 {PCI_VENDOR_ID_TI
, PCI_DEVICE_ID_TI_TSB12LV26
, PCI_ANY_ID
,
368 QUIRK_RESET_PACKET
| QUIRK_TI_SLLZ059
},
370 {PCI_VENDOR_ID_TI
, PCI_DEVICE_ID_TI_TSB82AA2
, PCI_ANY_ID
,
371 QUIRK_RESET_PACKET
| QUIRK_TI_SLLZ059
},
373 {PCI_VENDOR_ID_TI
, PCI_ANY_ID
, PCI_ANY_ID
,
376 {PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_VT630X
, PCI_REV_ID_VIA_VT6306
,
377 QUIRK_CYCLE_TIMER
| QUIRK_IR_WAKE
},
379 {PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_VT6315
, 0,
380 QUIRK_CYCLE_TIMER
/* FIXME: necessary? */ | QUIRK_NO_MSI
},
382 {PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_VT6315
, PCI_ANY_ID
,
385 {PCI_VENDOR_ID_VIA
, PCI_ANY_ID
, PCI_ANY_ID
,
386 QUIRK_CYCLE_TIMER
| QUIRK_NO_MSI
},
389 /* This overrides anything that was found in ohci_quirks[]. */
390 static int param_quirks
;
391 module_param_named(quirks
, param_quirks
, int, 0644);
392 MODULE_PARM_DESC(quirks
, "Chip quirks (default = 0"
393 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER
)
394 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET
)
395 ", AR/selfID endianness = " __stringify(QUIRK_BE_HEADERS
)
396 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A
)
397 ", disable MSI = " __stringify(QUIRK_NO_MSI
)
398 ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059
)
399 ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE
)
402 #define OHCI_PARAM_DEBUG_AT_AR 1
403 #define OHCI_PARAM_DEBUG_SELFIDS 2
404 #define OHCI_PARAM_DEBUG_IRQS 4
406 static int param_debug
;
407 module_param_named(debug
, param_debug
, int, 0644);
408 MODULE_PARM_DESC(debug
, "Verbose logging (default = 0"
409 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR
)
410 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS
)
411 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS
)
412 ", or a combination, or all = -1)");
414 static bool param_remote_dma
;
415 module_param_named(remote_dma
, param_remote_dma
, bool, 0444);
416 MODULE_PARM_DESC(remote_dma
, "Enable unfiltered remote DMA (default = N)");
418 static void log_irqs(struct fw_ohci
*ohci
, u32 evt
)
420 if (likely(!(param_debug
& OHCI_PARAM_DEBUG_IRQS
)))
423 ohci_notice(ohci
, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt
,
424 evt
& OHCI1394_selfIDComplete
? " selfID" : "",
425 evt
& OHCI1394_RQPkt
? " AR_req" : "",
426 evt
& OHCI1394_RSPkt
? " AR_resp" : "",
427 evt
& OHCI1394_reqTxComplete
? " AT_req" : "",
428 evt
& OHCI1394_respTxComplete
? " AT_resp" : "",
429 evt
& OHCI1394_isochRx
? " IR" : "",
430 evt
& OHCI1394_isochTx
? " IT" : "",
431 evt
& OHCI1394_postedWriteErr
? " postedWriteErr" : "",
432 evt
& OHCI1394_cycleTooLong
? " cycleTooLong" : "",
433 evt
& OHCI1394_cycle64Seconds
? " cycle64Seconds" : "",
434 evt
& OHCI1394_cycleInconsistent
? " cycleInconsistent" : "",
435 evt
& OHCI1394_regAccessFail
? " regAccessFail" : "",
436 evt
& OHCI1394_unrecoverableError
? " unrecoverableError" : "",
437 evt
& OHCI1394_busReset
? " busReset" : "",
438 evt
& ~(OHCI1394_selfIDComplete
| OHCI1394_RQPkt
|
439 OHCI1394_RSPkt
| OHCI1394_reqTxComplete
|
440 OHCI1394_respTxComplete
| OHCI1394_isochRx
|
441 OHCI1394_isochTx
| OHCI1394_postedWriteErr
|
442 OHCI1394_cycleTooLong
| OHCI1394_cycle64Seconds
|
443 OHCI1394_cycleInconsistent
|
444 OHCI1394_regAccessFail
| OHCI1394_busReset
)
448 static void log_selfids(struct fw_ohci
*ohci
, int generation
, int self_id_count
)
450 static const char *const speed
[] = {
451 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
453 static const char *const power
[] = {
454 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
455 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
457 static const char port
[] = {
458 [PHY_PACKET_SELF_ID_PORT_STATUS_NONE
] = '.',
459 [PHY_PACKET_SELF_ID_PORT_STATUS_NCONN
] = '-',
460 [PHY_PACKET_SELF_ID_PORT_STATUS_PARENT
] = 'p',
461 [PHY_PACKET_SELF_ID_PORT_STATUS_CHILD
] = 'c',
463 struct self_id_sequence_enumerator enumerator
= {
464 .cursor
= ohci
->self_id_buffer
,
465 .quadlet_count
= self_id_count
,
468 if (likely(!(param_debug
& OHCI_PARAM_DEBUG_SELFIDS
)))
471 ohci_notice(ohci
, "%d selfIDs, generation %d, local node ID %04x\n",
472 self_id_count
, generation
, ohci
->node_id
);
474 while (enumerator
.quadlet_count
> 0) {
475 unsigned int quadlet_count
;
476 unsigned int port_index
;
480 s
= self_id_sequence_enumerator_next(&enumerator
, &quadlet_count
);
485 "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
487 phy_packet_self_id_get_phy_id(*s
),
488 port
[self_id_sequence_get_port_status(s
, quadlet_count
, 0)],
489 port
[self_id_sequence_get_port_status(s
, quadlet_count
, 1)],
490 port
[self_id_sequence_get_port_status(s
, quadlet_count
, 2)],
491 speed
[*s
>> 14 & 3], *s
>> 16 & 63,
492 power
[*s
>> 8 & 7], *s
>> 22 & 1 ? "L" : "",
493 *s
>> 11 & 1 ? "c" : "", *s
& 2 ? "i" : "");
496 for (i
= 1; i
< quadlet_count
; ++i
) {
498 "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
500 phy_packet_self_id_get_phy_id(s
[i
]),
501 port
[self_id_sequence_get_port_status(s
, quadlet_count
, port_index
)],
502 port
[self_id_sequence_get_port_status(s
, quadlet_count
, port_index
+ 1)],
503 port
[self_id_sequence_get_port_status(s
, quadlet_count
, port_index
+ 2)],
504 port
[self_id_sequence_get_port_status(s
, quadlet_count
, port_index
+ 3)],
505 port
[self_id_sequence_get_port_status(s
, quadlet_count
, port_index
+ 4)],
506 port
[self_id_sequence_get_port_status(s
, quadlet_count
, port_index
+ 5)],
507 port
[self_id_sequence_get_port_status(s
, quadlet_count
, port_index
+ 6)],
508 port
[self_id_sequence_get_port_status(s
, quadlet_count
, port_index
+ 7)]
516 static const char *evts
[] = {
517 [0x00] = "evt_no_status", [0x01] = "-reserved-",
518 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
519 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
520 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
521 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
522 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
523 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
524 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
525 [0x10] = "-reserved-", [0x11] = "ack_complete",
526 [0x12] = "ack_pending ", [0x13] = "-reserved-",
527 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
528 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
529 [0x18] = "-reserved-", [0x19] = "-reserved-",
530 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
531 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
532 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
533 [0x20] = "pending/cancelled",
535 static const char *tcodes
[] = {
536 [0x0] = "QW req", [0x1] = "BW req",
537 [0x2] = "W resp", [0x3] = "-reserved-",
538 [0x4] = "QR req", [0x5] = "BR req",
539 [0x6] = "QR resp", [0x7] = "BR resp",
540 [0x8] = "cycle start", [0x9] = "Lk req",
541 [0xa] = "async stream packet", [0xb] = "Lk resp",
542 [0xc] = "-reserved-", [0xd] = "-reserved-",
543 [0xe] = "link internal", [0xf] = "-reserved-",
546 static void log_ar_at_event(struct fw_ohci
*ohci
,
547 char dir
, int speed
, u32
*header
, int evt
)
549 int tcode
= async_header_get_tcode(header
);
552 if (likely(!(param_debug
& OHCI_PARAM_DEBUG_AT_AR
)))
555 if (unlikely(evt
>= ARRAY_SIZE(evts
)))
558 if (evt
== OHCI1394_evt_bus_reset
) {
559 ohci_notice(ohci
, "A%c evt_bus_reset, generation %d\n",
560 dir
, (header
[2] >> 16) & 0xff);
565 case TCODE_WRITE_QUADLET_REQUEST
:
566 case TCODE_READ_QUADLET_RESPONSE
:
567 case TCODE_CYCLE_START
:
568 snprintf(specific
, sizeof(specific
), " = %08x",
569 be32_to_cpu((__force __be32
)header
[3]));
571 case TCODE_WRITE_BLOCK_REQUEST
:
572 case TCODE_READ_BLOCK_REQUEST
:
573 case TCODE_READ_BLOCK_RESPONSE
:
574 case TCODE_LOCK_REQUEST
:
575 case TCODE_LOCK_RESPONSE
:
576 snprintf(specific
, sizeof(specific
), " %x,%x",
577 async_header_get_data_length(header
),
578 async_header_get_extended_tcode(header
));
585 case TCODE_STREAM_DATA
:
586 ohci_notice(ohci
, "A%c %s, %s\n",
587 dir
, evts
[evt
], tcodes
[tcode
]);
590 ohci_notice(ohci
, "A%c %s, PHY %08x %08x\n",
591 dir
, evts
[evt
], header
[1], header
[2]);
593 case TCODE_WRITE_QUADLET_REQUEST
:
594 case TCODE_WRITE_BLOCK_REQUEST
:
595 case TCODE_READ_QUADLET_REQUEST
:
596 case TCODE_READ_BLOCK_REQUEST
:
597 case TCODE_LOCK_REQUEST
:
599 "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %012llx%s\n",
600 dir
, speed
, async_header_get_tlabel(header
),
601 async_header_get_source(header
), async_header_get_destination(header
),
602 evts
[evt
], tcodes
[tcode
], async_header_get_offset(header
), specific
);
606 "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
607 dir
, speed
, async_header_get_tlabel(header
),
608 async_header_get_source(header
), async_header_get_destination(header
),
609 evts
[evt
], tcodes
[tcode
], specific
);
613 static inline void reg_write(const struct fw_ohci
*ohci
, int offset
, u32 data
)
615 writel(data
, ohci
->registers
+ offset
);
618 static inline u32
reg_read(const struct fw_ohci
*ohci
, int offset
)
620 return readl(ohci
->registers
+ offset
);
623 static inline void flush_writes(const struct fw_ohci
*ohci
)
625 /* Do a dummy read to flush writes. */
626 reg_read(ohci
, OHCI1394_Version
);
630 * Beware! read_phy_reg(), write_phy_reg(), update_phy_reg(), and
631 * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
632 * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg()
633 * directly. Exceptions are intrinsically serialized contexts like pci_probe.
635 static int read_phy_reg(struct fw_ohci
*ohci
, int addr
)
640 reg_write(ohci
, OHCI1394_PhyControl
, OHCI1394_PhyControl_Read(addr
));
641 for (i
= 0; i
< 3 + 100; i
++) {
642 val
= reg_read(ohci
, OHCI1394_PhyControl
);
644 return -ENODEV
; /* Card was ejected. */
646 if (val
& OHCI1394_PhyControl_ReadDone
)
647 return OHCI1394_PhyControl_ReadData(val
);
650 * Try a few times without waiting. Sleeping is necessary
651 * only when the link/PHY interface is busy.
656 ohci_err(ohci
, "failed to read phy reg %d\n", addr
);
662 static int write_phy_reg(const struct fw_ohci
*ohci
, int addr
, u32 val
)
666 reg_write(ohci
, OHCI1394_PhyControl
,
667 OHCI1394_PhyControl_Write(addr
, val
));
668 for (i
= 0; i
< 3 + 100; i
++) {
669 val
= reg_read(ohci
, OHCI1394_PhyControl
);
671 return -ENODEV
; /* Card was ejected. */
673 if (!(val
& OHCI1394_PhyControl_WritePending
))
679 ohci_err(ohci
, "failed to write phy reg %d, val %u\n", addr
, val
);
685 static int update_phy_reg(struct fw_ohci
*ohci
, int addr
,
686 int clear_bits
, int set_bits
)
688 int ret
= read_phy_reg(ohci
, addr
);
693 * The interrupt status bits are cleared by writing a one bit.
694 * Avoid clearing them unless explicitly requested in set_bits.
697 clear_bits
|= PHY_INT_STATUS_BITS
;
699 return write_phy_reg(ohci
, addr
, (ret
& ~clear_bits
) | set_bits
);
702 static int read_paged_phy_reg(struct fw_ohci
*ohci
, int page
, int addr
)
706 ret
= update_phy_reg(ohci
, 7, PHY_PAGE_SELECT
, page
<< 5);
710 return read_phy_reg(ohci
, addr
);
713 static int ohci_read_phy_reg(struct fw_card
*card
, int addr
)
715 struct fw_ohci
*ohci
= fw_ohci(card
);
718 mutex_lock(&ohci
->phy_reg_mutex
);
719 ret
= read_phy_reg(ohci
, addr
);
720 mutex_unlock(&ohci
->phy_reg_mutex
);
725 static int ohci_update_phy_reg(struct fw_card
*card
, int addr
,
726 int clear_bits
, int set_bits
)
728 struct fw_ohci
*ohci
= fw_ohci(card
);
731 mutex_lock(&ohci
->phy_reg_mutex
);
732 ret
= update_phy_reg(ohci
, addr
, clear_bits
, set_bits
);
733 mutex_unlock(&ohci
->phy_reg_mutex
);
738 static inline dma_addr_t
ar_buffer_bus(struct ar_context
*ctx
, unsigned int i
)
740 return page_private(ctx
->pages
[i
]);
743 static void ar_context_link_page(struct ar_context
*ctx
, unsigned int index
)
745 struct descriptor
*d
;
747 d
= &ctx
->descriptors
[index
];
748 d
->branch_address
&= cpu_to_le32(~0xf);
749 d
->res_count
= cpu_to_le16(PAGE_SIZE
);
750 d
->transfer_status
= 0;
752 wmb(); /* finish init of new descriptors before branch_address update */
753 d
= &ctx
->descriptors
[ctx
->last_buffer_index
];
754 d
->branch_address
|= cpu_to_le32(1);
756 ctx
->last_buffer_index
= index
;
758 reg_write(ctx
->ohci
, CONTROL_SET(ctx
->regs
), CONTEXT_WAKE
);
761 static void ar_context_release(struct ar_context
*ctx
)
763 struct device
*dev
= ctx
->ohci
->card
.device
;
771 for (i
= 0; i
< AR_BUFFERS
; i
++) {
773 dma_free_pages(dev
, PAGE_SIZE
, ctx
->pages
[i
],
774 ar_buffer_bus(ctx
, i
), DMA_FROM_DEVICE
);
778 static void ar_context_abort(struct ar_context
*ctx
, const char *error_msg
)
780 struct fw_ohci
*ohci
= ctx
->ohci
;
782 if (reg_read(ohci
, CONTROL_CLEAR(ctx
->regs
)) & CONTEXT_RUN
) {
783 reg_write(ohci
, CONTROL_CLEAR(ctx
->regs
), CONTEXT_RUN
);
786 ohci_err(ohci
, "AR error: %s; DMA stopped\n", error_msg
);
788 /* FIXME: restart? */
791 static inline unsigned int ar_next_buffer_index(unsigned int index
)
793 return (index
+ 1) % AR_BUFFERS
;
796 static inline unsigned int ar_first_buffer_index(struct ar_context
*ctx
)
798 return ar_next_buffer_index(ctx
->last_buffer_index
);
802 * We search for the buffer that contains the last AR packet DMA data written
805 static unsigned int ar_search_last_active_buffer(struct ar_context
*ctx
,
806 unsigned int *buffer_offset
)
808 unsigned int i
, next_i
, last
= ctx
->last_buffer_index
;
809 __le16 res_count
, next_res_count
;
811 i
= ar_first_buffer_index(ctx
);
812 res_count
= READ_ONCE(ctx
->descriptors
[i
].res_count
);
814 /* A buffer that is not yet completely filled must be the last one. */
815 while (i
!= last
&& res_count
== 0) {
817 /* Peek at the next descriptor. */
818 next_i
= ar_next_buffer_index(i
);
819 rmb(); /* read descriptors in order */
820 next_res_count
= READ_ONCE(ctx
->descriptors
[next_i
].res_count
);
822 * If the next descriptor is still empty, we must stop at this
825 if (next_res_count
== cpu_to_le16(PAGE_SIZE
)) {
827 * The exception is when the DMA data for one packet is
828 * split over three buffers; in this case, the middle
829 * buffer's descriptor might be never updated by the
830 * controller and look still empty, and we have to peek
833 if (MAX_AR_PACKET_SIZE
> PAGE_SIZE
&& i
!= last
) {
834 next_i
= ar_next_buffer_index(next_i
);
836 next_res_count
= READ_ONCE(ctx
->descriptors
[next_i
].res_count
);
837 if (next_res_count
!= cpu_to_le16(PAGE_SIZE
))
838 goto next_buffer_is_active
;
844 next_buffer_is_active
:
846 res_count
= next_res_count
;
849 rmb(); /* read res_count before the DMA data */
851 *buffer_offset
= PAGE_SIZE
- le16_to_cpu(res_count
);
852 if (*buffer_offset
> PAGE_SIZE
) {
854 ar_context_abort(ctx
, "corrupted descriptor");
860 static void ar_sync_buffers_for_cpu(struct ar_context
*ctx
,
861 unsigned int end_buffer_index
,
862 unsigned int end_buffer_offset
)
866 i
= ar_first_buffer_index(ctx
);
867 while (i
!= end_buffer_index
) {
868 dma_sync_single_for_cpu(ctx
->ohci
->card
.device
,
869 ar_buffer_bus(ctx
, i
),
870 PAGE_SIZE
, DMA_FROM_DEVICE
);
871 i
= ar_next_buffer_index(i
);
873 if (end_buffer_offset
> 0)
874 dma_sync_single_for_cpu(ctx
->ohci
->card
.device
,
875 ar_buffer_bus(ctx
, i
),
876 end_buffer_offset
, DMA_FROM_DEVICE
);
879 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
880 static u32
cond_le32_to_cpu(__le32 value
, bool has_be_header_quirk
)
882 return has_be_header_quirk
? (__force __u32
)value
: le32_to_cpu(value
);
885 static bool has_be_header_quirk(const struct fw_ohci
*ohci
)
887 return !!(ohci
->quirks
& QUIRK_BE_HEADERS
);
890 static u32
cond_le32_to_cpu(__le32 value
, bool has_be_header_quirk __maybe_unused
)
892 return le32_to_cpu(value
);
895 static bool has_be_header_quirk(const struct fw_ohci
*ohci
)
901 static __le32
*handle_ar_packet(struct ar_context
*ctx
, __le32
*buffer
)
903 struct fw_ohci
*ohci
= ctx
->ohci
;
905 u32 status
, length
, tcode
;
908 p
.header
[0] = cond_le32_to_cpu(buffer
[0], has_be_header_quirk(ohci
));
909 p
.header
[1] = cond_le32_to_cpu(buffer
[1], has_be_header_quirk(ohci
));
910 p
.header
[2] = cond_le32_to_cpu(buffer
[2], has_be_header_quirk(ohci
));
912 tcode
= async_header_get_tcode(p
.header
);
914 case TCODE_WRITE_QUADLET_REQUEST
:
915 case TCODE_READ_QUADLET_RESPONSE
:
916 p
.header
[3] = (__force __u32
) buffer
[3];
917 p
.header_length
= 16;
918 p
.payload_length
= 0;
921 case TCODE_READ_BLOCK_REQUEST
:
922 p
.header
[3] = cond_le32_to_cpu(buffer
[3], has_be_header_quirk(ohci
));
923 p
.header_length
= 16;
924 p
.payload_length
= 0;
927 case TCODE_WRITE_BLOCK_REQUEST
:
928 case TCODE_READ_BLOCK_RESPONSE
:
929 case TCODE_LOCK_REQUEST
:
930 case TCODE_LOCK_RESPONSE
:
931 p
.header
[3] = cond_le32_to_cpu(buffer
[3], has_be_header_quirk(ohci
));
932 p
.header_length
= 16;
933 p
.payload_length
= async_header_get_data_length(p
.header
);
934 if (p
.payload_length
> MAX_ASYNC_PAYLOAD
) {
935 ar_context_abort(ctx
, "invalid packet length");
940 case TCODE_WRITE_RESPONSE
:
941 case TCODE_READ_QUADLET_REQUEST
:
942 case OHCI_TCODE_PHY_PACKET
:
943 p
.header_length
= 12;
944 p
.payload_length
= 0;
948 ar_context_abort(ctx
, "invalid tcode");
952 p
.payload
= (void *) buffer
+ p
.header_length
;
954 /* FIXME: What to do about evt_* errors? */
955 length
= (p
.header_length
+ p
.payload_length
+ 3) / 4;
956 status
= cond_le32_to_cpu(buffer
[length
], has_be_header_quirk(ohci
));
957 evt
= (status
>> 16) & 0x1f;
960 p
.speed
= (status
>> 21) & 0x7;
961 p
.timestamp
= status
& 0xffff;
962 p
.generation
= ohci
->request_generation
;
964 log_ar_at_event(ohci
, 'R', p
.speed
, p
.header
, evt
);
967 * Several controllers, notably from NEC and VIA, forget to
968 * write ack_complete status at PHY packet reception.
970 if (evt
== OHCI1394_evt_no_status
&& tcode
== OHCI1394_phy_tcode
)
971 p
.ack
= ACK_COMPLETE
;
974 * The OHCI bus reset handler synthesizes a PHY packet with
975 * the new generation number when a bus reset happens (see
976 * section 8.4.2.3). This helps us determine when a request
977 * was received and make sure we send the response in the same
978 * generation. We only need this for requests; for responses
979 * we use the unique tlabel for finding the matching
982 * Alas some chips sometimes emit bus reset packets with a
983 * wrong generation. We set the correct generation for these
984 * at a slightly incorrect time (in bus_reset_work).
986 if (evt
== OHCI1394_evt_bus_reset
) {
987 if (!(ohci
->quirks
& QUIRK_RESET_PACKET
))
988 ohci
->request_generation
= (p
.header
[2] >> 16) & 0xff;
989 } else if (ctx
== &ohci
->ar_request_ctx
) {
990 fw_core_handle_request(&ohci
->card
, &p
);
992 fw_core_handle_response(&ohci
->card
, &p
);
995 return buffer
+ length
+ 1;
998 static void *handle_ar_packets(struct ar_context
*ctx
, void *p
, void *end
)
1003 next
= handle_ar_packet(ctx
, p
);
1012 static void ar_recycle_buffers(struct ar_context
*ctx
, unsigned int end_buffer
)
1016 i
= ar_first_buffer_index(ctx
);
1017 while (i
!= end_buffer
) {
1018 dma_sync_single_for_device(ctx
->ohci
->card
.device
,
1019 ar_buffer_bus(ctx
, i
),
1020 PAGE_SIZE
, DMA_FROM_DEVICE
);
1021 ar_context_link_page(ctx
, i
);
1022 i
= ar_next_buffer_index(i
);
1026 static void ar_context_tasklet(unsigned long data
)
1028 struct ar_context
*ctx
= (struct ar_context
*)data
;
1029 unsigned int end_buffer_index
, end_buffer_offset
;
1036 end_buffer_index
= ar_search_last_active_buffer(ctx
,
1037 &end_buffer_offset
);
1038 ar_sync_buffers_for_cpu(ctx
, end_buffer_index
, end_buffer_offset
);
1039 end
= ctx
->buffer
+ end_buffer_index
* PAGE_SIZE
+ end_buffer_offset
;
1041 if (end_buffer_index
< ar_first_buffer_index(ctx
)) {
1043 * The filled part of the overall buffer wraps around; handle
1044 * all packets up to the buffer end here. If the last packet
1045 * wraps around, its tail will be visible after the buffer end
1046 * because the buffer start pages are mapped there again.
1048 void *buffer_end
= ctx
->buffer
+ AR_BUFFERS
* PAGE_SIZE
;
1049 p
= handle_ar_packets(ctx
, p
, buffer_end
);
1052 /* adjust p to point back into the actual buffer */
1053 p
-= AR_BUFFERS
* PAGE_SIZE
;
1056 p
= handle_ar_packets(ctx
, p
, end
);
1059 ar_context_abort(ctx
, "inconsistent descriptor");
1064 ar_recycle_buffers(ctx
, end_buffer_index
);
1069 ctx
->pointer
= NULL
;
1072 static int ar_context_init(struct ar_context
*ctx
, struct fw_ohci
*ohci
,
1073 unsigned int descriptors_offset
, u32 regs
)
1075 struct device
*dev
= ohci
->card
.device
;
1077 dma_addr_t dma_addr
;
1078 struct page
*pages
[AR_BUFFERS
+ AR_WRAPAROUND_PAGES
];
1079 struct descriptor
*d
;
1083 tasklet_init(&ctx
->tasklet
, ar_context_tasklet
, (unsigned long)ctx
);
1085 for (i
= 0; i
< AR_BUFFERS
; i
++) {
1086 ctx
->pages
[i
] = dma_alloc_pages(dev
, PAGE_SIZE
, &dma_addr
,
1087 DMA_FROM_DEVICE
, GFP_KERNEL
);
1090 set_page_private(ctx
->pages
[i
], dma_addr
);
1091 dma_sync_single_for_device(dev
, dma_addr
, PAGE_SIZE
,
1095 for (i
= 0; i
< AR_BUFFERS
; i
++)
1096 pages
[i
] = ctx
->pages
[i
];
1097 for (i
= 0; i
< AR_WRAPAROUND_PAGES
; i
++)
1098 pages
[AR_BUFFERS
+ i
] = ctx
->pages
[i
];
1099 ctx
->buffer
= vmap(pages
, ARRAY_SIZE(pages
), VM_MAP
, PAGE_KERNEL
);
1103 ctx
->descriptors
= ohci
->misc_buffer
+ descriptors_offset
;
1104 ctx
->descriptors_bus
= ohci
->misc_buffer_bus
+ descriptors_offset
;
1106 for (i
= 0; i
< AR_BUFFERS
; i
++) {
1107 d
= &ctx
->descriptors
[i
];
1108 d
->req_count
= cpu_to_le16(PAGE_SIZE
);
1109 d
->control
= cpu_to_le16(DESCRIPTOR_INPUT_MORE
|
1111 DESCRIPTOR_BRANCH_ALWAYS
);
1112 d
->data_address
= cpu_to_le32(ar_buffer_bus(ctx
, i
));
1113 d
->branch_address
= cpu_to_le32(ctx
->descriptors_bus
+
1114 ar_next_buffer_index(i
) * sizeof(struct descriptor
));
1120 ar_context_release(ctx
);
1125 static void ar_context_run(struct ar_context
*ctx
)
1129 for (i
= 0; i
< AR_BUFFERS
; i
++)
1130 ar_context_link_page(ctx
, i
);
1132 ctx
->pointer
= ctx
->buffer
;
1134 reg_write(ctx
->ohci
, COMMAND_PTR(ctx
->regs
), ctx
->descriptors_bus
| 1);
1135 reg_write(ctx
->ohci
, CONTROL_SET(ctx
->regs
), CONTEXT_RUN
);
1138 static struct descriptor
*find_branch_descriptor(struct descriptor
*d
, int z
)
1142 branch
= d
->control
& cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS
);
1144 /* figure out which descriptor the branch address goes in */
1145 if (z
== 2 && branch
== cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS
))
1151 static void context_tasklet(unsigned long data
)
1153 struct context
*ctx
= (struct context
*) data
;
1154 struct descriptor
*d
, *last
;
1157 struct descriptor_buffer
*desc
;
1159 desc
= list_entry(ctx
->buffer_list
.next
,
1160 struct descriptor_buffer
, list
);
1162 while (last
->branch_address
!= 0) {
1163 struct descriptor_buffer
*old_desc
= desc
;
1164 address
= le32_to_cpu(last
->branch_address
);
1167 ctx
->current_bus
= address
;
1169 /* If the branch address points to a buffer outside of the
1170 * current buffer, advance to the next buffer. */
1171 if (address
< desc
->buffer_bus
||
1172 address
>= desc
->buffer_bus
+ desc
->used
)
1173 desc
= list_entry(desc
->list
.next
,
1174 struct descriptor_buffer
, list
);
1175 d
= desc
->buffer
+ (address
- desc
->buffer_bus
) / sizeof(*d
);
1176 last
= find_branch_descriptor(d
, z
);
1178 if (!ctx
->callback(ctx
, d
, last
))
1181 if (old_desc
!= desc
) {
1182 /* If we've advanced to the next buffer, move the
1183 * previous buffer to the free list. */
1184 unsigned long flags
;
1186 spin_lock_irqsave(&ctx
->ohci
->lock
, flags
);
1187 list_move_tail(&old_desc
->list
, &ctx
->buffer_list
);
1188 spin_unlock_irqrestore(&ctx
->ohci
->lock
, flags
);
1195 * Allocate a new buffer and add it to the list of free buffers for this
1196 * context. Must be called with ohci->lock held.
1198 static int context_add_buffer(struct context
*ctx
)
1200 struct descriptor_buffer
*desc
;
1201 dma_addr_t bus_addr
;
1205 * 16MB of descriptors should be far more than enough for any DMA
1206 * program. This will catch run-away userspace or DoS attacks.
1208 if (ctx
->total_allocation
>= 16*1024*1024)
1211 desc
= dmam_alloc_coherent(ctx
->ohci
->card
.device
, PAGE_SIZE
, &bus_addr
, GFP_ATOMIC
);
1215 offset
= (void *)&desc
->buffer
- (void *)desc
;
1217 * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads
1218 * for descriptors, even 0x10-byte ones. This can cause page faults when
1219 * an IOMMU is in use and the oversized read crosses a page boundary.
1220 * Work around this by always leaving at least 0x10 bytes of padding.
1222 desc
->buffer_size
= PAGE_SIZE
- offset
- 0x10;
1223 desc
->buffer_bus
= bus_addr
+ offset
;
1226 list_add_tail(&desc
->list
, &ctx
->buffer_list
);
1227 ctx
->total_allocation
+= PAGE_SIZE
;
1232 static int context_init(struct context
*ctx
, struct fw_ohci
*ohci
,
1233 u32 regs
, descriptor_callback_t callback
)
1237 ctx
->total_allocation
= 0;
1239 INIT_LIST_HEAD(&ctx
->buffer_list
);
1240 if (context_add_buffer(ctx
) < 0)
1243 ctx
->buffer_tail
= list_entry(ctx
->buffer_list
.next
,
1244 struct descriptor_buffer
, list
);
1246 tasklet_init(&ctx
->tasklet
, context_tasklet
, (unsigned long)ctx
);
1247 ctx
->callback
= callback
;
1250 * We put a dummy descriptor in the buffer that has a NULL
1251 * branch address and looks like it's been sent. That way we
1252 * have a descriptor to append DMA programs to.
1254 memset(ctx
->buffer_tail
->buffer
, 0, sizeof(*ctx
->buffer_tail
->buffer
));
1255 ctx
->buffer_tail
->buffer
->control
= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST
);
1256 ctx
->buffer_tail
->buffer
->transfer_status
= cpu_to_le16(0x8011);
1257 ctx
->buffer_tail
->used
+= sizeof(*ctx
->buffer_tail
->buffer
);
1258 ctx
->last
= ctx
->buffer_tail
->buffer
;
1259 ctx
->prev
= ctx
->buffer_tail
->buffer
;
1265 static void context_release(struct context
*ctx
)
1267 struct fw_card
*card
= &ctx
->ohci
->card
;
1268 struct descriptor_buffer
*desc
, *tmp
;
1270 list_for_each_entry_safe(desc
, tmp
, &ctx
->buffer_list
, list
) {
1271 dmam_free_coherent(card
->device
, PAGE_SIZE
, desc
,
1272 desc
->buffer_bus
- ((void *)&desc
->buffer
- (void *)desc
));
1276 /* Must be called with ohci->lock held */
1277 static struct descriptor
*context_get_descriptors(struct context
*ctx
,
1278 int z
, dma_addr_t
*d_bus
)
1280 struct descriptor
*d
= NULL
;
1281 struct descriptor_buffer
*desc
= ctx
->buffer_tail
;
1283 if (z
* sizeof(*d
) > desc
->buffer_size
)
1286 if (z
* sizeof(*d
) > desc
->buffer_size
- desc
->used
) {
1287 /* No room for the descriptor in this buffer, so advance to the
1290 if (desc
->list
.next
== &ctx
->buffer_list
) {
1291 /* If there is no free buffer next in the list,
1293 if (context_add_buffer(ctx
) < 0)
1296 desc
= list_entry(desc
->list
.next
,
1297 struct descriptor_buffer
, list
);
1298 ctx
->buffer_tail
= desc
;
1301 d
= desc
->buffer
+ desc
->used
/ sizeof(*d
);
1302 memset(d
, 0, z
* sizeof(*d
));
1303 *d_bus
= desc
->buffer_bus
+ desc
->used
;
1308 static void context_run(struct context
*ctx
, u32 extra
)
1310 struct fw_ohci
*ohci
= ctx
->ohci
;
1312 reg_write(ohci
, COMMAND_PTR(ctx
->regs
),
1313 le32_to_cpu(ctx
->last
->branch_address
));
1314 reg_write(ohci
, CONTROL_CLEAR(ctx
->regs
), ~0);
1315 reg_write(ohci
, CONTROL_SET(ctx
->regs
), CONTEXT_RUN
| extra
);
1316 ctx
->running
= true;
1320 static void context_append(struct context
*ctx
,
1321 struct descriptor
*d
, int z
, int extra
)
1324 struct descriptor_buffer
*desc
= ctx
->buffer_tail
;
1325 struct descriptor
*d_branch
;
1327 d_bus
= desc
->buffer_bus
+ (d
- desc
->buffer
) * sizeof(*d
);
1329 desc
->used
+= (z
+ extra
) * sizeof(*d
);
1331 wmb(); /* finish init of new descriptors before branch_address update */
1333 d_branch
= find_branch_descriptor(ctx
->prev
, ctx
->prev_z
);
1334 d_branch
->branch_address
= cpu_to_le32(d_bus
| z
);
1337 * VT6306 incorrectly checks only the single descriptor at the
1338 * CommandPtr when the wake bit is written, so if it's a
1339 * multi-descriptor block starting with an INPUT_MORE, put a copy of
1340 * the branch address in the first descriptor.
1342 * Not doing this for transmit contexts since not sure how it interacts
1343 * with skip addresses.
1345 if (unlikely(ctx
->ohci
->quirks
& QUIRK_IR_WAKE
) &&
1346 d_branch
!= ctx
->prev
&&
1347 (ctx
->prev
->control
& cpu_to_le16(DESCRIPTOR_CMD
)) ==
1348 cpu_to_le16(DESCRIPTOR_INPUT_MORE
)) {
1349 ctx
->prev
->branch_address
= cpu_to_le32(d_bus
| z
);
1356 static void context_stop(struct context
*ctx
)
1358 struct fw_ohci
*ohci
= ctx
->ohci
;
1362 reg_write(ohci
, CONTROL_CLEAR(ctx
->regs
), CONTEXT_RUN
);
1363 ctx
->running
= false;
1365 for (i
= 0; i
< 1000; i
++) {
1366 reg
= reg_read(ohci
, CONTROL_SET(ctx
->regs
));
1367 if ((reg
& CONTEXT_ACTIVE
) == 0)
1373 ohci_err(ohci
, "DMA context still active (0x%08x)\n", reg
);
1376 struct driver_data
{
1378 struct fw_packet
*packet
;
1382 * This function apppends a packet to the DMA queue for transmission.
1383 * Must always be called with the ochi->lock held to ensure proper
1384 * generation handling and locking around packet queue manipulation.
1386 static int at_context_queue_packet(struct context
*ctx
,
1387 struct fw_packet
*packet
)
1389 struct fw_ohci
*ohci
= ctx
->ohci
;
1390 dma_addr_t d_bus
, payload_bus
;
1391 struct driver_data
*driver_data
;
1392 struct descriptor
*d
, *last
;
1396 d
= context_get_descriptors(ctx
, 4, &d_bus
);
1398 packet
->ack
= RCODE_SEND_ERROR
;
1402 d
[0].control
= cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE
);
1403 d
[0].res_count
= cpu_to_le16(packet
->timestamp
);
1406 * The DMA format for asynchronous link packets is different
1407 * from the IEEE1394 layout, so shift the fields around
1411 tcode
= async_header_get_tcode(packet
->header
);
1412 header
= (__le32
*) &d
[1];
1414 case TCODE_WRITE_QUADLET_REQUEST
:
1415 case TCODE_WRITE_BLOCK_REQUEST
:
1416 case TCODE_WRITE_RESPONSE
:
1417 case TCODE_READ_QUADLET_REQUEST
:
1418 case TCODE_READ_BLOCK_REQUEST
:
1419 case TCODE_READ_QUADLET_RESPONSE
:
1420 case TCODE_READ_BLOCK_RESPONSE
:
1421 case TCODE_LOCK_REQUEST
:
1422 case TCODE_LOCK_RESPONSE
:
1423 header
[0] = cpu_to_le32((packet
->header
[0] & 0xffff) |
1424 (packet
->speed
<< 16));
1425 header
[1] = cpu_to_le32((packet
->header
[1] & 0xffff) |
1426 (packet
->header
[0] & 0xffff0000));
1427 header
[2] = cpu_to_le32(packet
->header
[2]);
1429 if (tcode_is_block_packet(tcode
))
1430 header
[3] = cpu_to_le32(packet
->header
[3]);
1432 header
[3] = (__force __le32
) packet
->header
[3];
1434 d
[0].req_count
= cpu_to_le16(packet
->header_length
);
1437 case TCODE_LINK_INTERNAL
:
1438 header
[0] = cpu_to_le32((OHCI1394_phy_tcode
<< 4) |
1439 (packet
->speed
<< 16));
1440 header
[1] = cpu_to_le32(packet
->header
[1]);
1441 header
[2] = cpu_to_le32(packet
->header
[2]);
1442 d
[0].req_count
= cpu_to_le16(12);
1444 if (is_ping_packet(&packet
->header
[1]))
1445 d
[0].control
|= cpu_to_le16(DESCRIPTOR_PING
);
1448 case TCODE_STREAM_DATA
:
1449 header
[0] = cpu_to_le32((packet
->header
[0] & 0xffff) |
1450 (packet
->speed
<< 16));
1451 header
[1] = cpu_to_le32(packet
->header
[0] & 0xffff0000);
1452 d
[0].req_count
= cpu_to_le16(8);
1457 packet
->ack
= RCODE_SEND_ERROR
;
1461 BUILD_BUG_ON(sizeof(struct driver_data
) > sizeof(struct descriptor
));
1462 driver_data
= (struct driver_data
*) &d
[3];
1463 driver_data
->packet
= packet
;
1464 packet
->driver_data
= driver_data
;
1466 if (packet
->payload_length
> 0) {
1467 if (packet
->payload_length
> sizeof(driver_data
->inline_data
)) {
1468 payload_bus
= dma_map_single(ohci
->card
.device
,
1470 packet
->payload_length
,
1472 if (dma_mapping_error(ohci
->card
.device
, payload_bus
)) {
1473 packet
->ack
= RCODE_SEND_ERROR
;
1476 packet
->payload_bus
= payload_bus
;
1477 packet
->payload_mapped
= true;
1479 memcpy(driver_data
->inline_data
, packet
->payload
,
1480 packet
->payload_length
);
1481 payload_bus
= d_bus
+ 3 * sizeof(*d
);
1484 d
[2].req_count
= cpu_to_le16(packet
->payload_length
);
1485 d
[2].data_address
= cpu_to_le32(payload_bus
);
1493 last
->control
|= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST
|
1494 DESCRIPTOR_IRQ_ALWAYS
|
1495 DESCRIPTOR_BRANCH_ALWAYS
);
1497 /* FIXME: Document how the locking works. */
1498 if (ohci
->generation
!= packet
->generation
) {
1499 if (packet
->payload_mapped
)
1500 dma_unmap_single(ohci
->card
.device
, payload_bus
,
1501 packet
->payload_length
, DMA_TO_DEVICE
);
1502 packet
->ack
= RCODE_GENERATION
;
1506 context_append(ctx
, d
, z
, 4 - z
);
1509 reg_write(ohci
, CONTROL_SET(ctx
->regs
), CONTEXT_WAKE
);
1511 context_run(ctx
, 0);
1516 static void at_context_flush(struct context
*ctx
)
1518 tasklet_disable(&ctx
->tasklet
);
1520 ctx
->flushing
= true;
1521 context_tasklet((unsigned long)ctx
);
1522 ctx
->flushing
= false;
1524 tasklet_enable(&ctx
->tasklet
);
1527 static int handle_at_packet(struct context
*context
,
1528 struct descriptor
*d
,
1529 struct descriptor
*last
)
1531 struct driver_data
*driver_data
;
1532 struct fw_packet
*packet
;
1533 struct fw_ohci
*ohci
= context
->ohci
;
1536 if (last
->transfer_status
== 0 && !context
->flushing
)
1537 /* This descriptor isn't done yet, stop iteration. */
1540 driver_data
= (struct driver_data
*) &d
[3];
1541 packet
= driver_data
->packet
;
1543 /* This packet was cancelled, just continue. */
1546 if (packet
->payload_mapped
)
1547 dma_unmap_single(ohci
->card
.device
, packet
->payload_bus
,
1548 packet
->payload_length
, DMA_TO_DEVICE
);
1550 evt
= le16_to_cpu(last
->transfer_status
) & 0x1f;
1551 packet
->timestamp
= le16_to_cpu(last
->res_count
);
1553 log_ar_at_event(ohci
, 'T', packet
->speed
, packet
->header
, evt
);
1556 case OHCI1394_evt_timeout
:
1557 /* Async response transmit timed out. */
1558 packet
->ack
= RCODE_CANCELLED
;
1561 case OHCI1394_evt_flushed
:
1563 * The packet was flushed should give same error as
1564 * when we try to use a stale generation count.
1566 packet
->ack
= RCODE_GENERATION
;
1569 case OHCI1394_evt_missing_ack
:
1570 if (context
->flushing
)
1571 packet
->ack
= RCODE_GENERATION
;
1574 * Using a valid (current) generation count, but the
1575 * node is not on the bus or not sending acks.
1577 packet
->ack
= RCODE_NO_ACK
;
1581 case ACK_COMPLETE
+ 0x10:
1582 case ACK_PENDING
+ 0x10:
1583 case ACK_BUSY_X
+ 0x10:
1584 case ACK_BUSY_A
+ 0x10:
1585 case ACK_BUSY_B
+ 0x10:
1586 case ACK_DATA_ERROR
+ 0x10:
1587 case ACK_TYPE_ERROR
+ 0x10:
1588 packet
->ack
= evt
- 0x10;
1591 case OHCI1394_evt_no_status
:
1592 if (context
->flushing
) {
1593 packet
->ack
= RCODE_GENERATION
;
1599 packet
->ack
= RCODE_SEND_ERROR
;
1603 packet
->callback(packet
, &ohci
->card
, packet
->ack
);
1608 static u32
get_cycle_time(struct fw_ohci
*ohci
);
1610 static void handle_local_rom(struct fw_ohci
*ohci
,
1611 struct fw_packet
*packet
, u32 csr
)
1613 struct fw_packet response
;
1614 int tcode
, length
, i
;
1616 tcode
= async_header_get_tcode(packet
->header
);
1617 if (tcode_is_block_packet(tcode
))
1618 length
= async_header_get_data_length(packet
->header
);
1622 i
= csr
- CSR_CONFIG_ROM
;
1623 if (i
+ length
> CONFIG_ROM_SIZE
) {
1624 fw_fill_response(&response
, packet
->header
,
1625 RCODE_ADDRESS_ERROR
, NULL
, 0);
1626 } else if (!tcode_is_read_request(tcode
)) {
1627 fw_fill_response(&response
, packet
->header
,
1628 RCODE_TYPE_ERROR
, NULL
, 0);
1630 fw_fill_response(&response
, packet
->header
, RCODE_COMPLETE
,
1631 (void *) ohci
->config_rom
+ i
, length
);
1634 // Timestamping on behalf of the hardware.
1635 response
.timestamp
= cycle_time_to_ohci_tstamp(get_cycle_time(ohci
));
1636 fw_core_handle_response(&ohci
->card
, &response
);
1639 static void handle_local_lock(struct fw_ohci
*ohci
,
1640 struct fw_packet
*packet
, u32 csr
)
1642 struct fw_packet response
;
1643 int tcode
, length
, ext_tcode
, sel
, try;
1644 __be32
*payload
, lock_old
;
1645 u32 lock_arg
, lock_data
;
1647 tcode
= async_header_get_tcode(packet
->header
);
1648 length
= async_header_get_data_length(packet
->header
);
1649 payload
= packet
->payload
;
1650 ext_tcode
= async_header_get_extended_tcode(packet
->header
);
1652 if (tcode
== TCODE_LOCK_REQUEST
&&
1653 ext_tcode
== EXTCODE_COMPARE_SWAP
&& length
== 8) {
1654 lock_arg
= be32_to_cpu(payload
[0]);
1655 lock_data
= be32_to_cpu(payload
[1]);
1656 } else if (tcode
== TCODE_READ_QUADLET_REQUEST
) {
1660 fw_fill_response(&response
, packet
->header
,
1661 RCODE_TYPE_ERROR
, NULL
, 0);
1665 sel
= (csr
- CSR_BUS_MANAGER_ID
) / 4;
1666 reg_write(ohci
, OHCI1394_CSRData
, lock_data
);
1667 reg_write(ohci
, OHCI1394_CSRCompareData
, lock_arg
);
1668 reg_write(ohci
, OHCI1394_CSRControl
, sel
);
1670 for (try = 0; try < 20; try++)
1671 if (reg_read(ohci
, OHCI1394_CSRControl
) & 0x80000000) {
1672 lock_old
= cpu_to_be32(reg_read(ohci
,
1674 fw_fill_response(&response
, packet
->header
,
1676 &lock_old
, sizeof(lock_old
));
1680 ohci_err(ohci
, "swap not done (CSR lock timeout)\n");
1681 fw_fill_response(&response
, packet
->header
, RCODE_BUSY
, NULL
, 0);
1684 // Timestamping on behalf of the hardware.
1685 response
.timestamp
= cycle_time_to_ohci_tstamp(get_cycle_time(ohci
));
1686 fw_core_handle_response(&ohci
->card
, &response
);
1689 static void handle_local_request(struct context
*ctx
, struct fw_packet
*packet
)
1693 if (ctx
== &ctx
->ohci
->at_request_ctx
) {
1694 packet
->ack
= ACK_PENDING
;
1695 packet
->callback(packet
, &ctx
->ohci
->card
, packet
->ack
);
1698 offset
= async_header_get_offset(packet
->header
);
1699 csr
= offset
- CSR_REGISTER_BASE
;
1701 /* Handle config rom reads. */
1702 if (csr
>= CSR_CONFIG_ROM
&& csr
< CSR_CONFIG_ROM_END
)
1703 handle_local_rom(ctx
->ohci
, packet
, csr
);
1705 case CSR_BUS_MANAGER_ID
:
1706 case CSR_BANDWIDTH_AVAILABLE
:
1707 case CSR_CHANNELS_AVAILABLE_HI
:
1708 case CSR_CHANNELS_AVAILABLE_LO
:
1709 handle_local_lock(ctx
->ohci
, packet
, csr
);
1712 if (ctx
== &ctx
->ohci
->at_request_ctx
)
1713 fw_core_handle_request(&ctx
->ohci
->card
, packet
);
1715 fw_core_handle_response(&ctx
->ohci
->card
, packet
);
1719 if (ctx
== &ctx
->ohci
->at_response_ctx
) {
1720 packet
->ack
= ACK_COMPLETE
;
1721 packet
->callback(packet
, &ctx
->ohci
->card
, packet
->ack
);
1725 static void at_context_transmit(struct context
*ctx
, struct fw_packet
*packet
)
1727 unsigned long flags
;
1730 spin_lock_irqsave(&ctx
->ohci
->lock
, flags
);
1732 if (async_header_get_destination(packet
->header
) == ctx
->ohci
->node_id
&&
1733 ctx
->ohci
->generation
== packet
->generation
) {
1734 spin_unlock_irqrestore(&ctx
->ohci
->lock
, flags
);
1736 // Timestamping on behalf of the hardware.
1737 packet
->timestamp
= cycle_time_to_ohci_tstamp(get_cycle_time(ctx
->ohci
));
1739 handle_local_request(ctx
, packet
);
1743 ret
= at_context_queue_packet(ctx
, packet
);
1744 spin_unlock_irqrestore(&ctx
->ohci
->lock
, flags
);
1747 // Timestamping on behalf of the hardware.
1748 packet
->timestamp
= cycle_time_to_ohci_tstamp(get_cycle_time(ctx
->ohci
));
1750 packet
->callback(packet
, &ctx
->ohci
->card
, packet
->ack
);
1754 static void detect_dead_context(struct fw_ohci
*ohci
,
1755 const char *name
, unsigned int regs
)
1759 ctl
= reg_read(ohci
, CONTROL_SET(regs
));
1760 if (ctl
& CONTEXT_DEAD
)
1761 ohci_err(ohci
, "DMA context %s has stopped, error code: %s\n",
1762 name
, evts
[ctl
& 0x1f]);
1765 static void handle_dead_contexts(struct fw_ohci
*ohci
)
1770 detect_dead_context(ohci
, "ATReq", OHCI1394_AsReqTrContextBase
);
1771 detect_dead_context(ohci
, "ATRsp", OHCI1394_AsRspTrContextBase
);
1772 detect_dead_context(ohci
, "ARReq", OHCI1394_AsReqRcvContextBase
);
1773 detect_dead_context(ohci
, "ARRsp", OHCI1394_AsRspRcvContextBase
);
1774 for (i
= 0; i
< 32; ++i
) {
1775 if (!(ohci
->it_context_support
& (1 << i
)))
1777 sprintf(name
, "IT%u", i
);
1778 detect_dead_context(ohci
, name
, OHCI1394_IsoXmitContextBase(i
));
1780 for (i
= 0; i
< 32; ++i
) {
1781 if (!(ohci
->ir_context_support
& (1 << i
)))
1783 sprintf(name
, "IR%u", i
);
1784 detect_dead_context(ohci
, name
, OHCI1394_IsoRcvContextBase(i
));
1786 /* TODO: maybe try to flush and restart the dead contexts */
1789 static u32
cycle_timer_ticks(u32 cycle_timer
)
1793 ticks
= cycle_timer
& 0xfff;
1794 ticks
+= 3072 * ((cycle_timer
>> 12) & 0x1fff);
1795 ticks
+= (3072 * 8000) * (cycle_timer
>> 25);
1801 * Some controllers exhibit one or more of the following bugs when updating the
1802 * iso cycle timer register:
1803 * - When the lowest six bits are wrapping around to zero, a read that happens
1804 * at the same time will return garbage in the lowest ten bits.
1805 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1806 * not incremented for about 60 ns.
1807 * - Occasionally, the entire register reads zero.
1809 * To catch these, we read the register three times and ensure that the
1810 * difference between each two consecutive reads is approximately the same, i.e.
1811 * less than twice the other. Furthermore, any negative difference indicates an
1812 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1813 * execute, so we have enough precision to compute the ratio of the differences.)
1815 static u32
get_cycle_time(struct fw_ohci
*ohci
)
1822 if (has_reboot_by_cycle_timer_read_quirk(ohci
))
1825 c2
= reg_read(ohci
, OHCI1394_IsochronousCycleTimer
);
1827 if (ohci
->quirks
& QUIRK_CYCLE_TIMER
) {
1830 c2
= reg_read(ohci
, OHCI1394_IsochronousCycleTimer
);
1834 c2
= reg_read(ohci
, OHCI1394_IsochronousCycleTimer
);
1835 t0
= cycle_timer_ticks(c0
);
1836 t1
= cycle_timer_ticks(c1
);
1837 t2
= cycle_timer_ticks(c2
);
1840 } while ((diff01
<= 0 || diff12
<= 0 ||
1841 diff01
/ diff12
>= 2 || diff12
/ diff01
>= 2)
1849 * This function has to be called at least every 64 seconds. The bus_time
1850 * field stores not only the upper 25 bits of the BUS_TIME register but also
1851 * the most significant bit of the cycle timer in bit 6 so that we can detect
1852 * changes in this bit.
1854 static u32
update_bus_time(struct fw_ohci
*ohci
)
1856 u32 cycle_time_seconds
= get_cycle_time(ohci
) >> 25;
1858 if (unlikely(!ohci
->bus_time_running
)) {
1859 reg_write(ohci
, OHCI1394_IntMaskSet
, OHCI1394_cycle64Seconds
);
1860 ohci
->bus_time
= (lower_32_bits(ktime_get_seconds()) & ~0x7f) |
1861 (cycle_time_seconds
& 0x40);
1862 ohci
->bus_time_running
= true;
1865 if ((ohci
->bus_time
& 0x40) != (cycle_time_seconds
& 0x40))
1866 ohci
->bus_time
+= 0x40;
1868 return ohci
->bus_time
| cycle_time_seconds
;
1871 static int get_status_for_port(struct fw_ohci
*ohci
, int port_index
,
1872 enum phy_packet_self_id_port_status
*status
)
1876 mutex_lock(&ohci
->phy_reg_mutex
);
1877 reg
= write_phy_reg(ohci
, 7, port_index
);
1879 reg
= read_phy_reg(ohci
, 8);
1880 mutex_unlock(&ohci
->phy_reg_mutex
);
1884 switch (reg
& 0x0f) {
1886 // is child node (connected to parent node)
1887 *status
= PHY_PACKET_SELF_ID_PORT_STATUS_PARENT
;
1890 // is parent node (connected to child node)
1891 *status
= PHY_PACKET_SELF_ID_PORT_STATUS_CHILD
;
1895 *status
= PHY_PACKET_SELF_ID_PORT_STATUS_NCONN
;
1902 static int get_self_id_pos(struct fw_ohci
*ohci
, u32 self_id
,
1905 unsigned int left_phy_id
= phy_packet_self_id_get_phy_id(self_id
);
1908 for (i
= 0; i
< self_id_count
; i
++) {
1909 u32 entry
= ohci
->self_id_buffer
[i
];
1910 unsigned int right_phy_id
= phy_packet_self_id_get_phy_id(entry
);
1912 if (left_phy_id
== right_phy_id
)
1914 if (left_phy_id
< right_phy_id
)
1920 static bool initiated_reset(struct fw_ohci
*ohci
)
1925 mutex_lock(&ohci
->phy_reg_mutex
);
1926 reg
= write_phy_reg(ohci
, 7, 0xe0); /* Select page 7 */
1928 reg
= read_phy_reg(ohci
, 8);
1930 reg
= write_phy_reg(ohci
, 8, reg
); /* set PMODE bit */
1932 reg
= read_phy_reg(ohci
, 12); /* read register 12 */
1934 if ((reg
& 0x08) == 0x08) {
1935 /* bit 3 indicates "initiated reset" */
1941 mutex_unlock(&ohci
->phy_reg_mutex
);
1946 * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally
1947 * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059.
1948 * Construct the selfID from phy register contents.
1950 static int find_and_insert_self_id(struct fw_ohci
*ohci
, int self_id_count
)
1955 // link active 1, speed 3, bridge 0, contender 1, more packets 0.
1956 phy_packet_set_packet_identifier(&self_id
, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID
);
1957 phy_packet_self_id_zero_set_link_active(&self_id
, true);
1958 phy_packet_self_id_zero_set_scode(&self_id
, SCODE_800
);
1959 phy_packet_self_id_zero_set_contender(&self_id
, true);
1961 reg
= reg_read(ohci
, OHCI1394_NodeID
);
1962 if (!(reg
& OHCI1394_NodeID_idValid
)) {
1964 "node ID not valid, new bus reset in progress\n");
1967 phy_packet_self_id_set_phy_id(&self_id
, reg
& 0x3f);
1969 reg
= ohci_read_phy_reg(&ohci
->card
, 4);
1972 phy_packet_self_id_zero_set_power_class(&self_id
, reg
& 0x07);
1974 reg
= ohci_read_phy_reg(&ohci
->card
, 1);
1977 phy_packet_self_id_zero_set_gap_count(&self_id
, reg
& 0x3f);
1979 for (i
= 0; i
< 3; i
++) {
1980 enum phy_packet_self_id_port_status status
;
1983 err
= get_status_for_port(ohci
, i
, &status
);
1987 self_id_sequence_set_port_status(&self_id
, 1, i
, status
);
1990 phy_packet_self_id_zero_set_initiated_reset(&self_id
, initiated_reset(ohci
));
1992 pos
= get_self_id_pos(ohci
, self_id
, self_id_count
);
1994 memmove(&(ohci
->self_id_buffer
[pos
+1]),
1995 &(ohci
->self_id_buffer
[pos
]),
1996 (self_id_count
- pos
) * sizeof(*ohci
->self_id_buffer
));
1997 ohci
->self_id_buffer
[pos
] = self_id
;
2000 return self_id_count
;
2003 static void bus_reset_work(struct work_struct
*work
)
2005 struct fw_ohci
*ohci
=
2006 container_of(work
, struct fw_ohci
, bus_reset_work
);
2007 int self_id_count
, generation
, new_generation
, i
, j
;
2009 void *free_rom
= NULL
;
2010 dma_addr_t free_rom_bus
= 0;
2013 reg
= reg_read(ohci
, OHCI1394_NodeID
);
2014 if (!(reg
& OHCI1394_NodeID_idValid
)) {
2016 "node ID not valid, new bus reset in progress\n");
2019 if ((reg
& OHCI1394_NodeID_nodeNumber
) == 63) {
2020 ohci_notice(ohci
, "malconfigured bus\n");
2023 ohci
->node_id
= reg
& (OHCI1394_NodeID_busNumber
|
2024 OHCI1394_NodeID_nodeNumber
);
2026 is_new_root
= (reg
& OHCI1394_NodeID_root
) != 0;
2027 if (!(ohci
->is_root
&& is_new_root
))
2028 reg_write(ohci
, OHCI1394_LinkControlSet
,
2029 OHCI1394_LinkControl_cycleMaster
);
2030 ohci
->is_root
= is_new_root
;
2032 reg
= reg_read(ohci
, OHCI1394_SelfIDCount
);
2033 if (ohci1394_self_id_count_is_error(reg
)) {
2034 ohci_notice(ohci
, "self ID receive error\n");
2038 * The count in the SelfIDCount register is the number of
2039 * bytes in the self ID receive buffer. Since we also receive
2040 * the inverted quadlets and a header quadlet, we shift one
2041 * bit extra to get the actual number of self IDs.
2043 self_id_count
= ohci1394_self_id_count_get_size(reg
) >> 1;
2045 if (self_id_count
> 252) {
2046 ohci_notice(ohci
, "bad selfIDSize (%08x)\n", reg
);
2050 quadlet
= cond_le32_to_cpu(ohci
->self_id
[0], has_be_header_quirk(ohci
));
2051 generation
= ohci1394_self_id_receive_q0_get_generation(quadlet
);
2054 for (i
= 1, j
= 0; j
< self_id_count
; i
+= 2, j
++) {
2055 u32 id
= cond_le32_to_cpu(ohci
->self_id
[i
], has_be_header_quirk(ohci
));
2056 u32 id2
= cond_le32_to_cpu(ohci
->self_id
[i
+ 1], has_be_header_quirk(ohci
));
2060 * If the invalid data looks like a cycle start packet,
2061 * it's likely to be the result of the cycle master
2062 * having a wrong gap count. In this case, the self IDs
2063 * so far are valid and should be processed so that the
2064 * bus manager can then correct the gap count.
2066 if (id
== 0xffff008f) {
2067 ohci_notice(ohci
, "ignoring spurious self IDs\n");
2072 ohci_notice(ohci
, "bad self ID %d/%d (%08x != ~%08x)\n",
2073 j
, self_id_count
, id
, id2
);
2076 ohci
->self_id_buffer
[j
] = id
;
2079 if (ohci
->quirks
& QUIRK_TI_SLLZ059
) {
2080 self_id_count
= find_and_insert_self_id(ohci
, self_id_count
);
2081 if (self_id_count
< 0) {
2083 "could not construct local self ID\n");
2088 if (self_id_count
== 0) {
2089 ohci_notice(ohci
, "no self IDs\n");
2095 * Check the consistency of the self IDs we just read. The
2096 * problem we face is that a new bus reset can start while we
2097 * read out the self IDs from the DMA buffer. If this happens,
2098 * the DMA buffer will be overwritten with new self IDs and we
2099 * will read out inconsistent data. The OHCI specification
2100 * (section 11.2) recommends a technique similar to
2101 * linux/seqlock.h, where we remember the generation of the
2102 * self IDs in the buffer before reading them out and compare
2103 * it to the current generation after reading them out. If
2104 * the two generations match we know we have a consistent set
2108 reg
= reg_read(ohci
, OHCI1394_SelfIDCount
);
2109 new_generation
= ohci1394_self_id_count_get_generation(reg
);
2110 if (new_generation
!= generation
) {
2111 ohci_notice(ohci
, "new bus reset, discarding self ids\n");
2115 /* FIXME: Document how the locking works. */
2116 spin_lock_irq(&ohci
->lock
);
2118 ohci
->generation
= -1; /* prevent AT packet queueing */
2119 context_stop(&ohci
->at_request_ctx
);
2120 context_stop(&ohci
->at_response_ctx
);
2122 spin_unlock_irq(&ohci
->lock
);
2125 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
2126 * packets in the AT queues and software needs to drain them.
2127 * Some OHCI 1.1 controllers (JMicron) apparently require this too.
2129 at_context_flush(&ohci
->at_request_ctx
);
2130 at_context_flush(&ohci
->at_response_ctx
);
2132 spin_lock_irq(&ohci
->lock
);
2134 ohci
->generation
= generation
;
2135 reg_write(ohci
, OHCI1394_IntEventClear
, OHCI1394_busReset
);
2136 reg_write(ohci
, OHCI1394_IntMaskSet
, OHCI1394_busReset
);
2138 if (ohci
->quirks
& QUIRK_RESET_PACKET
)
2139 ohci
->request_generation
= generation
;
2142 * This next bit is unrelated to the AT context stuff but we
2143 * have to do it under the spinlock also. If a new config rom
2144 * was set up before this reset, the old one is now no longer
2145 * in use and we can free it. Update the config rom pointers
2146 * to point to the current config rom and clear the
2147 * next_config_rom pointer so a new update can take place.
2150 if (ohci
->next_config_rom
!= NULL
) {
2151 if (ohci
->next_config_rom
!= ohci
->config_rom
) {
2152 free_rom
= ohci
->config_rom
;
2153 free_rom_bus
= ohci
->config_rom_bus
;
2155 ohci
->config_rom
= ohci
->next_config_rom
;
2156 ohci
->config_rom_bus
= ohci
->next_config_rom_bus
;
2157 ohci
->next_config_rom
= NULL
;
2160 * Restore config_rom image and manually update
2161 * config_rom registers. Writing the header quadlet
2162 * will indicate that the config rom is ready, so we
2165 reg_write(ohci
, OHCI1394_BusOptions
,
2166 be32_to_cpu(ohci
->config_rom
[2]));
2167 ohci
->config_rom
[0] = ohci
->next_header
;
2168 reg_write(ohci
, OHCI1394_ConfigROMhdr
,
2169 be32_to_cpu(ohci
->next_header
));
2172 if (param_remote_dma
) {
2173 reg_write(ohci
, OHCI1394_PhyReqFilterHiSet
, ~0);
2174 reg_write(ohci
, OHCI1394_PhyReqFilterLoSet
, ~0);
2177 spin_unlock_irq(&ohci
->lock
);
2180 dmam_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
, free_rom
, free_rom_bus
);
2182 log_selfids(ohci
, generation
, self_id_count
);
2184 fw_core_handle_bus_reset(&ohci
->card
, ohci
->node_id
, generation
,
2185 self_id_count
, ohci
->self_id_buffer
,
2186 ohci
->csr_state_setclear_abdicate
);
2187 ohci
->csr_state_setclear_abdicate
= false;
2190 static irqreturn_t
irq_handler(int irq
, void *data
)
2192 struct fw_ohci
*ohci
= data
;
2193 u32 event
, iso_event
;
2196 event
= reg_read(ohci
, OHCI1394_IntEventClear
);
2198 if (!event
|| !~event
)
2202 * busReset and postedWriteErr events must not be cleared yet
2203 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
2205 reg_write(ohci
, OHCI1394_IntEventClear
,
2206 event
& ~(OHCI1394_busReset
| OHCI1394_postedWriteErr
));
2207 trace_irqs(ohci
->card
.index
, event
);
2208 log_irqs(ohci
, event
);
2209 // The flag is masked again at bus_reset_work() scheduled by selfID event.
2210 if (event
& OHCI1394_busReset
)
2211 reg_write(ohci
, OHCI1394_IntMaskClear
, OHCI1394_busReset
);
2213 if (event
& OHCI1394_selfIDComplete
) {
2214 if (trace_self_id_complete_enabled()) {
2215 u32 reg
= reg_read(ohci
, OHCI1394_SelfIDCount
);
2217 trace_self_id_complete(ohci
->card
.index
, reg
, ohci
->self_id
,
2218 has_be_header_quirk(ohci
));
2220 queue_work(selfid_workqueue
, &ohci
->bus_reset_work
);
2223 if (event
& OHCI1394_RQPkt
)
2224 tasklet_schedule(&ohci
->ar_request_ctx
.tasklet
);
2226 if (event
& OHCI1394_RSPkt
)
2227 tasklet_schedule(&ohci
->ar_response_ctx
.tasklet
);
2229 if (event
& OHCI1394_reqTxComplete
)
2230 tasklet_schedule(&ohci
->at_request_ctx
.tasklet
);
2232 if (event
& OHCI1394_respTxComplete
)
2233 tasklet_schedule(&ohci
->at_response_ctx
.tasklet
);
2235 if (event
& OHCI1394_isochRx
) {
2236 iso_event
= reg_read(ohci
, OHCI1394_IsoRecvIntEventClear
);
2237 reg_write(ohci
, OHCI1394_IsoRecvIntEventClear
, iso_event
);
2240 i
= ffs(iso_event
) - 1;
2242 &ohci
->ir_context_list
[i
].context
.tasklet
);
2243 iso_event
&= ~(1 << i
);
2247 if (event
& OHCI1394_isochTx
) {
2248 iso_event
= reg_read(ohci
, OHCI1394_IsoXmitIntEventClear
);
2249 reg_write(ohci
, OHCI1394_IsoXmitIntEventClear
, iso_event
);
2252 i
= ffs(iso_event
) - 1;
2254 &ohci
->it_context_list
[i
].context
.tasklet
);
2255 iso_event
&= ~(1 << i
);
2259 if (unlikely(event
& OHCI1394_regAccessFail
))
2260 ohci_err(ohci
, "register access failure\n");
2262 if (unlikely(event
& OHCI1394_postedWriteErr
)) {
2263 reg_read(ohci
, OHCI1394_PostedWriteAddressHi
);
2264 reg_read(ohci
, OHCI1394_PostedWriteAddressLo
);
2265 reg_write(ohci
, OHCI1394_IntEventClear
,
2266 OHCI1394_postedWriteErr
);
2267 if (printk_ratelimit())
2268 ohci_err(ohci
, "PCI posted write error\n");
2271 if (unlikely(event
& OHCI1394_cycleTooLong
)) {
2272 if (printk_ratelimit())
2273 ohci_notice(ohci
, "isochronous cycle too long\n");
2274 reg_write(ohci
, OHCI1394_LinkControlSet
,
2275 OHCI1394_LinkControl_cycleMaster
);
2278 if (unlikely(event
& OHCI1394_cycleInconsistent
)) {
2280 * We need to clear this event bit in order to make
2281 * cycleMatch isochronous I/O work. In theory we should
2282 * stop active cycleMatch iso contexts now and restart
2283 * them at least two cycles later. (FIXME?)
2285 if (printk_ratelimit())
2286 ohci_notice(ohci
, "isochronous cycle inconsistent\n");
2289 if (unlikely(event
& OHCI1394_unrecoverableError
))
2290 handle_dead_contexts(ohci
);
2292 if (event
& OHCI1394_cycle64Seconds
) {
2293 spin_lock(&ohci
->lock
);
2294 update_bus_time(ohci
);
2295 spin_unlock(&ohci
->lock
);
2302 static int software_reset(struct fw_ohci
*ohci
)
2307 reg_write(ohci
, OHCI1394_HCControlSet
, OHCI1394_HCControl_softReset
);
2308 for (i
= 0; i
< 500; i
++) {
2309 val
= reg_read(ohci
, OHCI1394_HCControlSet
);
2311 return -ENODEV
; /* Card was ejected. */
2313 if (!(val
& OHCI1394_HCControl_softReset
))
2322 static void copy_config_rom(__be32
*dest
, const __be32
*src
, size_t length
)
2324 size_t size
= length
* 4;
2326 memcpy(dest
, src
, size
);
2327 if (size
< CONFIG_ROM_SIZE
)
2328 memset(&dest
[length
], 0, CONFIG_ROM_SIZE
- size
);
2331 static int configure_1394a_enhancements(struct fw_ohci
*ohci
)
2334 int ret
, clear
, set
, offset
;
2336 /* Check if the driver should configure link and PHY. */
2337 if (!(reg_read(ohci
, OHCI1394_HCControlSet
) &
2338 OHCI1394_HCControl_programPhyEnable
))
2341 /* Paranoia: check whether the PHY supports 1394a, too. */
2342 enable_1394a
= false;
2343 ret
= read_phy_reg(ohci
, 2);
2346 if ((ret
& PHY_EXTENDED_REGISTERS
) == PHY_EXTENDED_REGISTERS
) {
2347 ret
= read_paged_phy_reg(ohci
, 1, 8);
2351 enable_1394a
= true;
2354 if (ohci
->quirks
& QUIRK_NO_1394A
)
2355 enable_1394a
= false;
2357 /* Configure PHY and link consistently. */
2360 set
= PHY_ENABLE_ACCEL
| PHY_ENABLE_MULTI
;
2362 clear
= PHY_ENABLE_ACCEL
| PHY_ENABLE_MULTI
;
2365 ret
= update_phy_reg(ohci
, 5, clear
, set
);
2370 offset
= OHCI1394_HCControlSet
;
2372 offset
= OHCI1394_HCControlClear
;
2373 reg_write(ohci
, offset
, OHCI1394_HCControl_aPhyEnhanceEnable
);
2375 /* Clean up: configuration has been taken care of. */
2376 reg_write(ohci
, OHCI1394_HCControlClear
,
2377 OHCI1394_HCControl_programPhyEnable
);
2382 static int probe_tsb41ba3d(struct fw_ohci
*ohci
)
2384 /* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */
2385 static const u8 id
[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, };
2388 reg
= read_phy_reg(ohci
, 2);
2391 if ((reg
& PHY_EXTENDED_REGISTERS
) != PHY_EXTENDED_REGISTERS
)
2394 for (i
= ARRAY_SIZE(id
) - 1; i
>= 0; i
--) {
2395 reg
= read_paged_phy_reg(ohci
, 1, i
+ 10);
2404 static int ohci_enable(struct fw_card
*card
,
2405 const __be32
*config_rom
, size_t length
)
2407 struct fw_ohci
*ohci
= fw_ohci(card
);
2408 u32 lps
, version
, irqs
;
2411 ret
= software_reset(ohci
);
2413 ohci_err(ohci
, "failed to reset ohci card\n");
2418 * Now enable LPS, which we need in order to start accessing
2419 * most of the registers. In fact, on some cards (ALI M5251),
2420 * accessing registers in the SClk domain without LPS enabled
2421 * will lock up the machine. Wait 50msec to make sure we have
2422 * full link enabled. However, with some cards (well, at least
2423 * a JMicron PCIe card), we have to try again sometimes.
2425 * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
2426 * cannot actually use the phy at that time. These need tens of
2427 * millisecods pause between LPS write and first phy access too.
2430 reg_write(ohci
, OHCI1394_HCControlSet
,
2431 OHCI1394_HCControl_LPS
|
2432 OHCI1394_HCControl_postedWriteEnable
);
2435 for (lps
= 0, i
= 0; !lps
&& i
< 3; i
++) {
2437 lps
= reg_read(ohci
, OHCI1394_HCControlSet
) &
2438 OHCI1394_HCControl_LPS
;
2442 ohci_err(ohci
, "failed to set Link Power Status\n");
2446 if (ohci
->quirks
& QUIRK_TI_SLLZ059
) {
2447 ret
= probe_tsb41ba3d(ohci
);
2451 ohci_notice(ohci
, "local TSB41BA3D phy\n");
2453 ohci
->quirks
&= ~QUIRK_TI_SLLZ059
;
2456 reg_write(ohci
, OHCI1394_HCControlClear
,
2457 OHCI1394_HCControl_noByteSwapData
);
2459 reg_write(ohci
, OHCI1394_SelfIDBuffer
, ohci
->self_id_bus
);
2460 reg_write(ohci
, OHCI1394_LinkControlSet
,
2461 OHCI1394_LinkControl_cycleTimerEnable
|
2462 OHCI1394_LinkControl_cycleMaster
);
2464 reg_write(ohci
, OHCI1394_ATRetries
,
2465 OHCI1394_MAX_AT_REQ_RETRIES
|
2466 (OHCI1394_MAX_AT_RESP_RETRIES
<< 4) |
2467 (OHCI1394_MAX_PHYS_RESP_RETRIES
<< 8) |
2470 ohci
->bus_time_running
= false;
2472 for (i
= 0; i
< 32; i
++)
2473 if (ohci
->ir_context_support
& (1 << i
))
2474 reg_write(ohci
, OHCI1394_IsoRcvContextControlClear(i
),
2475 IR_CONTEXT_MULTI_CHANNEL_MODE
);
2477 version
= reg_read(ohci
, OHCI1394_Version
) & 0x00ff00ff;
2478 if (version
>= OHCI_VERSION_1_1
) {
2479 reg_write(ohci
, OHCI1394_InitialChannelsAvailableHi
,
2481 card
->broadcast_channel_auto_allocated
= true;
2484 /* Get implemented bits of the priority arbitration request counter. */
2485 reg_write(ohci
, OHCI1394_FairnessControl
, 0x3f);
2486 ohci
->pri_req_max
= reg_read(ohci
, OHCI1394_FairnessControl
) & 0x3f;
2487 reg_write(ohci
, OHCI1394_FairnessControl
, 0);
2488 card
->priority_budget_implemented
= ohci
->pri_req_max
!= 0;
2490 reg_write(ohci
, OHCI1394_PhyUpperBound
, FW_MAX_PHYSICAL_RANGE
>> 16);
2491 reg_write(ohci
, OHCI1394_IntEventClear
, ~0);
2492 reg_write(ohci
, OHCI1394_IntMaskClear
, ~0);
2494 ret
= configure_1394a_enhancements(ohci
);
2498 /* Activate link_on bit and contender bit in our self ID packets.*/
2499 ret
= ohci_update_phy_reg(card
, 4, 0, PHY_LINK_ACTIVE
| PHY_CONTENDER
);
2504 * When the link is not yet enabled, the atomic config rom
2505 * update mechanism described below in ohci_set_config_rom()
2506 * is not active. We have to update ConfigRomHeader and
2507 * BusOptions manually, and the write to ConfigROMmap takes
2508 * effect immediately. We tie this to the enabling of the
2509 * link, so we have a valid config rom before enabling - the
2510 * OHCI requires that ConfigROMhdr and BusOptions have valid
2511 * values before enabling.
2513 * However, when the ConfigROMmap is written, some controllers
2514 * always read back quadlets 0 and 2 from the config rom to
2515 * the ConfigRomHeader and BusOptions registers on bus reset.
2516 * They shouldn't do that in this initial case where the link
2517 * isn't enabled. This means we have to use the same
2518 * workaround here, setting the bus header to 0 and then write
2519 * the right values in the bus reset tasklet.
2523 ohci
->next_config_rom
= dmam_alloc_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
2524 &ohci
->next_config_rom_bus
, GFP_KERNEL
);
2525 if (ohci
->next_config_rom
== NULL
)
2528 copy_config_rom(ohci
->next_config_rom
, config_rom
, length
);
2531 * In the suspend case, config_rom is NULL, which
2532 * means that we just reuse the old config rom.
2534 ohci
->next_config_rom
= ohci
->config_rom
;
2535 ohci
->next_config_rom_bus
= ohci
->config_rom_bus
;
2538 ohci
->next_header
= ohci
->next_config_rom
[0];
2539 ohci
->next_config_rom
[0] = 0;
2540 reg_write(ohci
, OHCI1394_ConfigROMhdr
, 0);
2541 reg_write(ohci
, OHCI1394_BusOptions
,
2542 be32_to_cpu(ohci
->next_config_rom
[2]));
2543 reg_write(ohci
, OHCI1394_ConfigROMmap
, ohci
->next_config_rom_bus
);
2545 reg_write(ohci
, OHCI1394_AsReqFilterHiSet
, 0x80000000);
2547 irqs
= OHCI1394_reqTxComplete
| OHCI1394_respTxComplete
|
2548 OHCI1394_RQPkt
| OHCI1394_RSPkt
|
2549 OHCI1394_isochTx
| OHCI1394_isochRx
|
2550 OHCI1394_postedWriteErr
|
2551 OHCI1394_selfIDComplete
|
2552 OHCI1394_regAccessFail
|
2553 OHCI1394_cycleInconsistent
|
2554 OHCI1394_unrecoverableError
|
2555 OHCI1394_cycleTooLong
|
2556 OHCI1394_masterIntEnable
|
2558 reg_write(ohci
, OHCI1394_IntMaskSet
, irqs
);
2560 reg_write(ohci
, OHCI1394_HCControlSet
,
2561 OHCI1394_HCControl_linkEnable
|
2562 OHCI1394_HCControl_BIBimageValid
);
2564 reg_write(ohci
, OHCI1394_LinkControlSet
,
2565 OHCI1394_LinkControl_rcvSelfID
|
2566 OHCI1394_LinkControl_rcvPhyPkt
);
2568 ar_context_run(&ohci
->ar_request_ctx
);
2569 ar_context_run(&ohci
->ar_response_ctx
);
2573 /* We are ready to go, reset bus to finish initialization. */
2574 fw_schedule_bus_reset(&ohci
->card
, false, true);
2579 static int ohci_set_config_rom(struct fw_card
*card
,
2580 const __be32
*config_rom
, size_t length
)
2582 struct fw_ohci
*ohci
;
2583 __be32
*next_config_rom
;
2584 dma_addr_t next_config_rom_bus
;
2586 ohci
= fw_ohci(card
);
2589 * When the OHCI controller is enabled, the config rom update
2590 * mechanism is a bit tricky, but easy enough to use. See
2591 * section 5.5.6 in the OHCI specification.
2593 * The OHCI controller caches the new config rom address in a
2594 * shadow register (ConfigROMmapNext) and needs a bus reset
2595 * for the changes to take place. When the bus reset is
2596 * detected, the controller loads the new values for the
2597 * ConfigRomHeader and BusOptions registers from the specified
2598 * config rom and loads ConfigROMmap from the ConfigROMmapNext
2599 * shadow register. All automatically and atomically.
2601 * Now, there's a twist to this story. The automatic load of
2602 * ConfigRomHeader and BusOptions doesn't honor the
2603 * noByteSwapData bit, so with a be32 config rom, the
2604 * controller will load be32 values in to these registers
2605 * during the atomic update, even on litte endian
2606 * architectures. The workaround we use is to put a 0 in the
2607 * header quadlet; 0 is endian agnostic and means that the
2608 * config rom isn't ready yet. In the bus reset tasklet we
2609 * then set up the real values for the two registers.
2611 * We use ohci->lock to avoid racing with the code that sets
2612 * ohci->next_config_rom to NULL (see bus_reset_work).
2615 next_config_rom
= dmam_alloc_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
2616 &next_config_rom_bus
, GFP_KERNEL
);
2617 if (next_config_rom
== NULL
)
2620 spin_lock_irq(&ohci
->lock
);
2623 * If there is not an already pending config_rom update,
2624 * push our new allocation into the ohci->next_config_rom
2625 * and then mark the local variable as null so that we
2626 * won't deallocate the new buffer.
2628 * OTOH, if there is a pending config_rom update, just
2629 * use that buffer with the new config_rom data, and
2630 * let this routine free the unused DMA allocation.
2633 if (ohci
->next_config_rom
== NULL
) {
2634 ohci
->next_config_rom
= next_config_rom
;
2635 ohci
->next_config_rom_bus
= next_config_rom_bus
;
2636 next_config_rom
= NULL
;
2639 copy_config_rom(ohci
->next_config_rom
, config_rom
, length
);
2641 ohci
->next_header
= config_rom
[0];
2642 ohci
->next_config_rom
[0] = 0;
2644 reg_write(ohci
, OHCI1394_ConfigROMmap
, ohci
->next_config_rom_bus
);
2646 spin_unlock_irq(&ohci
->lock
);
2648 /* If we didn't use the DMA allocation, delete it. */
2649 if (next_config_rom
!= NULL
) {
2650 dmam_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
, next_config_rom
,
2651 next_config_rom_bus
);
2655 * Now initiate a bus reset to have the changes take
2656 * effect. We clean up the old config rom memory and DMA
2657 * mappings in the bus reset tasklet, since the OHCI
2658 * controller could need to access it before the bus reset
2662 fw_schedule_bus_reset(&ohci
->card
, true, true);
2667 static void ohci_send_request(struct fw_card
*card
, struct fw_packet
*packet
)
2669 struct fw_ohci
*ohci
= fw_ohci(card
);
2671 at_context_transmit(&ohci
->at_request_ctx
, packet
);
2674 static void ohci_send_response(struct fw_card
*card
, struct fw_packet
*packet
)
2676 struct fw_ohci
*ohci
= fw_ohci(card
);
2678 at_context_transmit(&ohci
->at_response_ctx
, packet
);
2681 static int ohci_cancel_packet(struct fw_card
*card
, struct fw_packet
*packet
)
2683 struct fw_ohci
*ohci
= fw_ohci(card
);
2684 struct context
*ctx
= &ohci
->at_request_ctx
;
2685 struct driver_data
*driver_data
= packet
->driver_data
;
2688 tasklet_disable_in_atomic(&ctx
->tasklet
);
2690 if (packet
->ack
!= 0)
2693 if (packet
->payload_mapped
)
2694 dma_unmap_single(ohci
->card
.device
, packet
->payload_bus
,
2695 packet
->payload_length
, DMA_TO_DEVICE
);
2697 log_ar_at_event(ohci
, 'T', packet
->speed
, packet
->header
, 0x20);
2698 driver_data
->packet
= NULL
;
2699 packet
->ack
= RCODE_CANCELLED
;
2701 // Timestamping on behalf of the hardware.
2702 packet
->timestamp
= cycle_time_to_ohci_tstamp(get_cycle_time(ohci
));
2704 packet
->callback(packet
, &ohci
->card
, packet
->ack
);
2707 tasklet_enable(&ctx
->tasklet
);
2712 static int ohci_enable_phys_dma(struct fw_card
*card
,
2713 int node_id
, int generation
)
2715 struct fw_ohci
*ohci
= fw_ohci(card
);
2716 unsigned long flags
;
2719 if (param_remote_dma
)
2723 * FIXME: Make sure this bitmask is cleared when we clear the busReset
2724 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
2727 spin_lock_irqsave(&ohci
->lock
, flags
);
2729 if (ohci
->generation
!= generation
) {
2735 * Note, if the node ID contains a non-local bus ID, physical DMA is
2736 * enabled for _all_ nodes on remote buses.
2739 n
= (node_id
& 0xffc0) == LOCAL_BUS
? node_id
& 0x3f : 63;
2741 reg_write(ohci
, OHCI1394_PhyReqFilterLoSet
, 1 << n
);
2743 reg_write(ohci
, OHCI1394_PhyReqFilterHiSet
, 1 << (n
- 32));
2747 spin_unlock_irqrestore(&ohci
->lock
, flags
);
2752 static u32
ohci_read_csr(struct fw_card
*card
, int csr_offset
)
2754 struct fw_ohci
*ohci
= fw_ohci(card
);
2755 unsigned long flags
;
2758 switch (csr_offset
) {
2759 case CSR_STATE_CLEAR
:
2761 if (ohci
->is_root
&&
2762 (reg_read(ohci
, OHCI1394_LinkControlSet
) &
2763 OHCI1394_LinkControl_cycleMaster
))
2764 value
= CSR_STATE_BIT_CMSTR
;
2767 if (ohci
->csr_state_setclear_abdicate
)
2768 value
|= CSR_STATE_BIT_ABDICATE
;
2773 return reg_read(ohci
, OHCI1394_NodeID
) << 16;
2775 case CSR_CYCLE_TIME
:
2776 return get_cycle_time(ohci
);
2780 * We might be called just after the cycle timer has wrapped
2781 * around but just before the cycle64Seconds handler, so we
2782 * better check here, too, if the bus time needs to be updated.
2784 spin_lock_irqsave(&ohci
->lock
, flags
);
2785 value
= update_bus_time(ohci
);
2786 spin_unlock_irqrestore(&ohci
->lock
, flags
);
2789 case CSR_BUSY_TIMEOUT
:
2790 value
= reg_read(ohci
, OHCI1394_ATRetries
);
2791 return (value
>> 4) & 0x0ffff00f;
2793 case CSR_PRIORITY_BUDGET
:
2794 return (reg_read(ohci
, OHCI1394_FairnessControl
) & 0x3f) |
2795 (ohci
->pri_req_max
<< 8);
2803 static void ohci_write_csr(struct fw_card
*card
, int csr_offset
, u32 value
)
2805 struct fw_ohci
*ohci
= fw_ohci(card
);
2806 unsigned long flags
;
2808 switch (csr_offset
) {
2809 case CSR_STATE_CLEAR
:
2810 if ((value
& CSR_STATE_BIT_CMSTR
) && ohci
->is_root
) {
2811 reg_write(ohci
, OHCI1394_LinkControlClear
,
2812 OHCI1394_LinkControl_cycleMaster
);
2815 if (value
& CSR_STATE_BIT_ABDICATE
)
2816 ohci
->csr_state_setclear_abdicate
= false;
2820 if ((value
& CSR_STATE_BIT_CMSTR
) && ohci
->is_root
) {
2821 reg_write(ohci
, OHCI1394_LinkControlSet
,
2822 OHCI1394_LinkControl_cycleMaster
);
2825 if (value
& CSR_STATE_BIT_ABDICATE
)
2826 ohci
->csr_state_setclear_abdicate
= true;
2830 reg_write(ohci
, OHCI1394_NodeID
, value
>> 16);
2834 case CSR_CYCLE_TIME
:
2835 reg_write(ohci
, OHCI1394_IsochronousCycleTimer
, value
);
2836 reg_write(ohci
, OHCI1394_IntEventSet
,
2837 OHCI1394_cycleInconsistent
);
2842 spin_lock_irqsave(&ohci
->lock
, flags
);
2843 ohci
->bus_time
= (update_bus_time(ohci
) & 0x40) |
2845 spin_unlock_irqrestore(&ohci
->lock
, flags
);
2848 case CSR_BUSY_TIMEOUT
:
2849 value
= (value
& 0xf) | ((value
& 0xf) << 4) |
2850 ((value
& 0xf) << 8) | ((value
& 0x0ffff000) << 4);
2851 reg_write(ohci
, OHCI1394_ATRetries
, value
);
2855 case CSR_PRIORITY_BUDGET
:
2856 reg_write(ohci
, OHCI1394_FairnessControl
, value
& 0x3f);
2866 static void flush_iso_completions(struct iso_context
*ctx
, enum fw_iso_context_completions_cause cause
)
2868 trace_isoc_inbound_single_completions(&ctx
->base
, ctx
->last_timestamp
, cause
, ctx
->header
,
2869 ctx
->header_length
);
2870 trace_isoc_outbound_completions(&ctx
->base
, ctx
->last_timestamp
, cause
, ctx
->header
,
2871 ctx
->header_length
);
2873 ctx
->base
.callback
.sc(&ctx
->base
, ctx
->last_timestamp
,
2874 ctx
->header_length
, ctx
->header
,
2875 ctx
->base
.callback_data
);
2876 ctx
->header_length
= 0;
2879 static void copy_iso_headers(struct iso_context
*ctx
, const u32
*dma_hdr
)
2883 if (ctx
->header_length
+ ctx
->base
.header_size
> PAGE_SIZE
) {
2884 if (ctx
->base
.drop_overflow_headers
)
2886 flush_iso_completions(ctx
, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW
);
2889 ctx_hdr
= ctx
->header
+ ctx
->header_length
;
2890 ctx
->last_timestamp
= (u16
)le32_to_cpu((__force __le32
)dma_hdr
[0]);
2893 * The two iso header quadlets are byteswapped to little
2894 * endian by the controller, but we want to present them
2895 * as big endian for consistency with the bus endianness.
2897 if (ctx
->base
.header_size
> 0)
2898 ctx_hdr
[0] = swab32(dma_hdr
[1]); /* iso packet header */
2899 if (ctx
->base
.header_size
> 4)
2900 ctx_hdr
[1] = swab32(dma_hdr
[0]); /* timestamp */
2901 if (ctx
->base
.header_size
> 8)
2902 memcpy(&ctx_hdr
[2], &dma_hdr
[2], ctx
->base
.header_size
- 8);
2903 ctx
->header_length
+= ctx
->base
.header_size
;
2906 static int handle_ir_packet_per_buffer(struct context
*context
,
2907 struct descriptor
*d
,
2908 struct descriptor
*last
)
2910 struct iso_context
*ctx
=
2911 container_of(context
, struct iso_context
, context
);
2912 struct descriptor
*pd
;
2915 for (pd
= d
; pd
<= last
; pd
++)
2916 if (pd
->transfer_status
)
2919 /* Descriptor(s) not done yet, stop iteration */
2922 while (!(d
->control
& cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS
))) {
2924 buffer_dma
= le32_to_cpu(d
->data_address
);
2925 dma_sync_single_range_for_cpu(context
->ohci
->card
.device
,
2926 buffer_dma
& PAGE_MASK
,
2927 buffer_dma
& ~PAGE_MASK
,
2928 le16_to_cpu(d
->req_count
),
2932 copy_iso_headers(ctx
, (u32
*) (last
+ 1));
2934 if (last
->control
& cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS
))
2935 flush_iso_completions(ctx
, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ
);
2940 /* d == last because each descriptor block is only a single descriptor. */
2941 static int handle_ir_buffer_fill(struct context
*context
,
2942 struct descriptor
*d
,
2943 struct descriptor
*last
)
2945 struct iso_context
*ctx
=
2946 container_of(context
, struct iso_context
, context
);
2947 unsigned int req_count
, res_count
, completed
;
2950 req_count
= le16_to_cpu(last
->req_count
);
2951 res_count
= le16_to_cpu(READ_ONCE(last
->res_count
));
2952 completed
= req_count
- res_count
;
2953 buffer_dma
= le32_to_cpu(last
->data_address
);
2955 if (completed
> 0) {
2956 ctx
->mc_buffer_bus
= buffer_dma
;
2957 ctx
->mc_completed
= completed
;
2961 /* Descriptor(s) not done yet, stop iteration */
2964 dma_sync_single_range_for_cpu(context
->ohci
->card
.device
,
2965 buffer_dma
& PAGE_MASK
,
2966 buffer_dma
& ~PAGE_MASK
,
2967 completed
, DMA_FROM_DEVICE
);
2969 if (last
->control
& cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS
)) {
2970 trace_isoc_inbound_multiple_completions(&ctx
->base
, completed
,
2971 FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ
);
2973 ctx
->base
.callback
.mc(&ctx
->base
,
2974 buffer_dma
+ completed
,
2975 ctx
->base
.callback_data
);
2976 ctx
->mc_completed
= 0;
2982 static void flush_ir_buffer_fill(struct iso_context
*ctx
)
2984 dma_sync_single_range_for_cpu(ctx
->context
.ohci
->card
.device
,
2985 ctx
->mc_buffer_bus
& PAGE_MASK
,
2986 ctx
->mc_buffer_bus
& ~PAGE_MASK
,
2987 ctx
->mc_completed
, DMA_FROM_DEVICE
);
2989 trace_isoc_inbound_multiple_completions(&ctx
->base
, ctx
->mc_completed
,
2990 FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH
);
2992 ctx
->base
.callback
.mc(&ctx
->base
,
2993 ctx
->mc_buffer_bus
+ ctx
->mc_completed
,
2994 ctx
->base
.callback_data
);
2995 ctx
->mc_completed
= 0;
2998 static inline void sync_it_packet_for_cpu(struct context
*context
,
2999 struct descriptor
*pd
)
3004 /* only packets beginning with OUTPUT_MORE* have data buffers */
3005 if (pd
->control
& cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS
))
3008 /* skip over the OUTPUT_MORE_IMMEDIATE descriptor */
3012 * If the packet has a header, the first OUTPUT_MORE/LAST descriptor's
3013 * data buffer is in the context program's coherent page and must not
3016 if ((le32_to_cpu(pd
->data_address
) & PAGE_MASK
) ==
3017 (context
->current_bus
& PAGE_MASK
)) {
3018 if (pd
->control
& cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS
))
3024 buffer_dma
= le32_to_cpu(pd
->data_address
);
3025 dma_sync_single_range_for_cpu(context
->ohci
->card
.device
,
3026 buffer_dma
& PAGE_MASK
,
3027 buffer_dma
& ~PAGE_MASK
,
3028 le16_to_cpu(pd
->req_count
),
3030 control
= pd
->control
;
3032 } while (!(control
& cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS
)));
3035 static int handle_it_packet(struct context
*context
,
3036 struct descriptor
*d
,
3037 struct descriptor
*last
)
3039 struct iso_context
*ctx
=
3040 container_of(context
, struct iso_context
, context
);
3041 struct descriptor
*pd
;
3044 for (pd
= d
; pd
<= last
; pd
++)
3045 if (pd
->transfer_status
)
3048 /* Descriptor(s) not done yet, stop iteration */
3051 sync_it_packet_for_cpu(context
, d
);
3053 if (ctx
->header_length
+ 4 > PAGE_SIZE
) {
3054 if (ctx
->base
.drop_overflow_headers
)
3056 flush_iso_completions(ctx
, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW
);
3059 ctx_hdr
= ctx
->header
+ ctx
->header_length
;
3060 ctx
->last_timestamp
= le16_to_cpu(last
->res_count
);
3061 /* Present this value as big-endian to match the receive code */
3062 *ctx_hdr
= cpu_to_be32((le16_to_cpu(pd
->transfer_status
) << 16) |
3063 le16_to_cpu(pd
->res_count
));
3064 ctx
->header_length
+= 4;
3066 if (last
->control
& cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS
))
3067 flush_iso_completions(ctx
, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ
);
3072 static void set_multichannel_mask(struct fw_ohci
*ohci
, u64 channels
)
3074 u32 hi
= channels
>> 32, lo
= channels
;
3076 reg_write(ohci
, OHCI1394_IRMultiChanMaskHiClear
, ~hi
);
3077 reg_write(ohci
, OHCI1394_IRMultiChanMaskLoClear
, ~lo
);
3078 reg_write(ohci
, OHCI1394_IRMultiChanMaskHiSet
, hi
);
3079 reg_write(ohci
, OHCI1394_IRMultiChanMaskLoSet
, lo
);
3080 ohci
->mc_channels
= channels
;
3083 static struct fw_iso_context
*ohci_allocate_iso_context(struct fw_card
*card
,
3084 int type
, int channel
, size_t header_size
)
3086 struct fw_ohci
*ohci
= fw_ohci(card
);
3087 struct iso_context
*ctx
;
3088 descriptor_callback_t callback
;
3091 int index
, ret
= -EBUSY
;
3093 spin_lock_irq(&ohci
->lock
);
3096 case FW_ISO_CONTEXT_TRANSMIT
:
3097 mask
= &ohci
->it_context_mask
;
3098 callback
= handle_it_packet
;
3099 index
= ffs(*mask
) - 1;
3101 *mask
&= ~(1 << index
);
3102 regs
= OHCI1394_IsoXmitContextBase(index
);
3103 ctx
= &ohci
->it_context_list
[index
];
3107 case FW_ISO_CONTEXT_RECEIVE
:
3108 channels
= &ohci
->ir_context_channels
;
3109 mask
= &ohci
->ir_context_mask
;
3110 callback
= handle_ir_packet_per_buffer
;
3111 index
= *channels
& 1ULL << channel
? ffs(*mask
) - 1 : -1;
3113 *channels
&= ~(1ULL << channel
);
3114 *mask
&= ~(1 << index
);
3115 regs
= OHCI1394_IsoRcvContextBase(index
);
3116 ctx
= &ohci
->ir_context_list
[index
];
3120 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
3121 mask
= &ohci
->ir_context_mask
;
3122 callback
= handle_ir_buffer_fill
;
3123 index
= !ohci
->mc_allocated
? ffs(*mask
) - 1 : -1;
3125 ohci
->mc_allocated
= true;
3126 *mask
&= ~(1 << index
);
3127 regs
= OHCI1394_IsoRcvContextBase(index
);
3128 ctx
= &ohci
->ir_context_list
[index
];
3137 spin_unlock_irq(&ohci
->lock
);
3140 return ERR_PTR(ret
);
3142 memset(ctx
, 0, sizeof(*ctx
));
3143 ctx
->header_length
= 0;
3144 ctx
->header
= (void *) __get_free_page(GFP_KERNEL
);
3145 if (ctx
->header
== NULL
) {
3149 ret
= context_init(&ctx
->context
, ohci
, regs
, callback
);
3151 goto out_with_header
;
3153 if (type
== FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
) {
3154 set_multichannel_mask(ohci
, 0);
3155 ctx
->mc_completed
= 0;
3161 free_page((unsigned long)ctx
->header
);
3163 spin_lock_irq(&ohci
->lock
);
3166 case FW_ISO_CONTEXT_RECEIVE
:
3167 *channels
|= 1ULL << channel
;
3170 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
3171 ohci
->mc_allocated
= false;
3174 *mask
|= 1 << index
;
3176 spin_unlock_irq(&ohci
->lock
);
3178 return ERR_PTR(ret
);
3181 static int ohci_start_iso(struct fw_iso_context
*base
,
3182 s32 cycle
, u32 sync
, u32 tags
)
3184 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
3185 struct fw_ohci
*ohci
= ctx
->context
.ohci
;
3186 u32 control
= IR_CONTEXT_ISOCH_HEADER
, match
;
3189 /* the controller cannot start without any queued packets */
3190 if (ctx
->context
.last
->branch_address
== 0)
3193 switch (ctx
->base
.type
) {
3194 case FW_ISO_CONTEXT_TRANSMIT
:
3195 index
= ctx
- ohci
->it_context_list
;
3198 match
= IT_CONTEXT_CYCLE_MATCH_ENABLE
|
3199 (cycle
& 0x7fff) << 16;
3201 reg_write(ohci
, OHCI1394_IsoXmitIntEventClear
, 1 << index
);
3202 reg_write(ohci
, OHCI1394_IsoXmitIntMaskSet
, 1 << index
);
3203 context_run(&ctx
->context
, match
);
3206 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
3207 control
|= IR_CONTEXT_BUFFER_FILL
|IR_CONTEXT_MULTI_CHANNEL_MODE
;
3209 case FW_ISO_CONTEXT_RECEIVE
:
3210 index
= ctx
- ohci
->ir_context_list
;
3211 match
= (tags
<< 28) | (sync
<< 8) | ctx
->base
.channel
;
3213 match
|= (cycle
& 0x07fff) << 12;
3214 control
|= IR_CONTEXT_CYCLE_MATCH_ENABLE
;
3217 reg_write(ohci
, OHCI1394_IsoRecvIntEventClear
, 1 << index
);
3218 reg_write(ohci
, OHCI1394_IsoRecvIntMaskSet
, 1 << index
);
3219 reg_write(ohci
, CONTEXT_MATCH(ctx
->context
.regs
), match
);
3220 context_run(&ctx
->context
, control
);
3231 static int ohci_stop_iso(struct fw_iso_context
*base
)
3233 struct fw_ohci
*ohci
= fw_ohci(base
->card
);
3234 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
3237 switch (ctx
->base
.type
) {
3238 case FW_ISO_CONTEXT_TRANSMIT
:
3239 index
= ctx
- ohci
->it_context_list
;
3240 reg_write(ohci
, OHCI1394_IsoXmitIntMaskClear
, 1 << index
);
3243 case FW_ISO_CONTEXT_RECEIVE
:
3244 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
3245 index
= ctx
- ohci
->ir_context_list
;
3246 reg_write(ohci
, OHCI1394_IsoRecvIntMaskClear
, 1 << index
);
3250 context_stop(&ctx
->context
);
3251 tasklet_kill(&ctx
->context
.tasklet
);
3256 static void ohci_free_iso_context(struct fw_iso_context
*base
)
3258 struct fw_ohci
*ohci
= fw_ohci(base
->card
);
3259 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
3260 unsigned long flags
;
3263 ohci_stop_iso(base
);
3264 context_release(&ctx
->context
);
3265 free_page((unsigned long)ctx
->header
);
3267 spin_lock_irqsave(&ohci
->lock
, flags
);
3269 switch (base
->type
) {
3270 case FW_ISO_CONTEXT_TRANSMIT
:
3271 index
= ctx
- ohci
->it_context_list
;
3272 ohci
->it_context_mask
|= 1 << index
;
3275 case FW_ISO_CONTEXT_RECEIVE
:
3276 index
= ctx
- ohci
->ir_context_list
;
3277 ohci
->ir_context_mask
|= 1 << index
;
3278 ohci
->ir_context_channels
|= 1ULL << base
->channel
;
3281 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
3282 index
= ctx
- ohci
->ir_context_list
;
3283 ohci
->ir_context_mask
|= 1 << index
;
3284 ohci
->ir_context_channels
|= ohci
->mc_channels
;
3285 ohci
->mc_channels
= 0;
3286 ohci
->mc_allocated
= false;
3290 spin_unlock_irqrestore(&ohci
->lock
, flags
);
3293 static int ohci_set_iso_channels(struct fw_iso_context
*base
, u64
*channels
)
3295 struct fw_ohci
*ohci
= fw_ohci(base
->card
);
3296 unsigned long flags
;
3299 switch (base
->type
) {
3300 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
3302 spin_lock_irqsave(&ohci
->lock
, flags
);
3304 /* Don't allow multichannel to grab other contexts' channels. */
3305 if (~ohci
->ir_context_channels
& ~ohci
->mc_channels
& *channels
) {
3306 *channels
= ohci
->ir_context_channels
;
3309 set_multichannel_mask(ohci
, *channels
);
3313 spin_unlock_irqrestore(&ohci
->lock
, flags
);
3324 static void ohci_resume_iso_dma(struct fw_ohci
*ohci
)
3327 struct iso_context
*ctx
;
3329 for (i
= 0 ; i
< ohci
->n_ir
; i
++) {
3330 ctx
= &ohci
->ir_context_list
[i
];
3331 if (ctx
->context
.running
)
3332 ohci_start_iso(&ctx
->base
, 0, ctx
->sync
, ctx
->tags
);
3335 for (i
= 0 ; i
< ohci
->n_it
; i
++) {
3336 ctx
= &ohci
->it_context_list
[i
];
3337 if (ctx
->context
.running
)
3338 ohci_start_iso(&ctx
->base
, 0, ctx
->sync
, ctx
->tags
);
3343 static int queue_iso_transmit(struct iso_context
*ctx
,
3344 struct fw_iso_packet
*packet
,
3345 struct fw_iso_buffer
*buffer
,
3346 unsigned long payload
)
3348 struct descriptor
*d
, *last
, *pd
;
3349 struct fw_iso_packet
*p
;
3351 dma_addr_t d_bus
, page_bus
;
3352 u32 z
, header_z
, payload_z
, irq
;
3353 u32 payload_index
, payload_end_index
, next_page_index
;
3354 int page
, end_page
, i
, length
, offset
;
3357 payload_index
= payload
;
3363 if (p
->header_length
> 0)
3366 /* Determine the first page the payload isn't contained in. */
3367 end_page
= PAGE_ALIGN(payload_index
+ p
->payload_length
) >> PAGE_SHIFT
;
3368 if (p
->payload_length
> 0)
3369 payload_z
= end_page
- (payload_index
>> PAGE_SHIFT
);
3375 /* Get header size in number of descriptors. */
3376 header_z
= DIV_ROUND_UP(p
->header_length
, sizeof(*d
));
3378 d
= context_get_descriptors(&ctx
->context
, z
+ header_z
, &d_bus
);
3383 d
[0].control
= cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE
);
3384 d
[0].req_count
= cpu_to_le16(8);
3386 * Link the skip address to this descriptor itself. This causes
3387 * a context to skip a cycle whenever lost cycles or FIFO
3388 * overruns occur, without dropping the data. The application
3389 * should then decide whether this is an error condition or not.
3390 * FIXME: Make the context's cycle-lost behaviour configurable?
3392 d
[0].branch_address
= cpu_to_le32(d_bus
| z
);
3394 header
= (__le32
*) &d
[1];
3395 header
[0] = cpu_to_le32(IT_HEADER_SY(p
->sy
) |
3396 IT_HEADER_TAG(p
->tag
) |
3397 IT_HEADER_TCODE(TCODE_STREAM_DATA
) |
3398 IT_HEADER_CHANNEL(ctx
->base
.channel
) |
3399 IT_HEADER_SPEED(ctx
->base
.speed
));
3401 cpu_to_le32(IT_HEADER_DATA_LENGTH(p
->header_length
+
3402 p
->payload_length
));
3405 if (p
->header_length
> 0) {
3406 d
[2].req_count
= cpu_to_le16(p
->header_length
);
3407 d
[2].data_address
= cpu_to_le32(d_bus
+ z
* sizeof(*d
));
3408 memcpy(&d
[z
], p
->header
, p
->header_length
);
3411 pd
= d
+ z
- payload_z
;
3412 payload_end_index
= payload_index
+ p
->payload_length
;
3413 for (i
= 0; i
< payload_z
; i
++) {
3414 page
= payload_index
>> PAGE_SHIFT
;
3415 offset
= payload_index
& ~PAGE_MASK
;
3416 next_page_index
= (page
+ 1) << PAGE_SHIFT
;
3418 min(next_page_index
, payload_end_index
) - payload_index
;
3419 pd
[i
].req_count
= cpu_to_le16(length
);
3421 page_bus
= page_private(buffer
->pages
[page
]);
3422 pd
[i
].data_address
= cpu_to_le32(page_bus
+ offset
);
3424 dma_sync_single_range_for_device(ctx
->context
.ohci
->card
.device
,
3425 page_bus
, offset
, length
,
3428 payload_index
+= length
;
3432 irq
= DESCRIPTOR_IRQ_ALWAYS
;
3434 irq
= DESCRIPTOR_NO_IRQ
;
3436 last
= z
== 2 ? d
: d
+ z
- 1;
3437 last
->control
|= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST
|
3439 DESCRIPTOR_BRANCH_ALWAYS
|
3442 context_append(&ctx
->context
, d
, z
, header_z
);
3447 static int queue_iso_packet_per_buffer(struct iso_context
*ctx
,
3448 struct fw_iso_packet
*packet
,
3449 struct fw_iso_buffer
*buffer
,
3450 unsigned long payload
)
3452 struct device
*device
= ctx
->context
.ohci
->card
.device
;
3453 struct descriptor
*d
, *pd
;
3454 dma_addr_t d_bus
, page_bus
;
3455 u32 z
, header_z
, rest
;
3457 int page
, offset
, packet_count
, header_size
, payload_per_buffer
;
3460 * The OHCI controller puts the isochronous header and trailer in the
3461 * buffer, so we need at least 8 bytes.
3463 packet_count
= packet
->header_length
/ ctx
->base
.header_size
;
3464 header_size
= max(ctx
->base
.header_size
, (size_t)8);
3466 /* Get header size in number of descriptors. */
3467 header_z
= DIV_ROUND_UP(header_size
, sizeof(*d
));
3468 page
= payload
>> PAGE_SHIFT
;
3469 offset
= payload
& ~PAGE_MASK
;
3470 payload_per_buffer
= packet
->payload_length
/ packet_count
;
3472 for (i
= 0; i
< packet_count
; i
++) {
3473 /* d points to the header descriptor */
3474 z
= DIV_ROUND_UP(payload_per_buffer
+ offset
, PAGE_SIZE
) + 1;
3475 d
= context_get_descriptors(&ctx
->context
,
3476 z
+ header_z
, &d_bus
);
3480 d
->control
= cpu_to_le16(DESCRIPTOR_STATUS
|
3481 DESCRIPTOR_INPUT_MORE
);
3482 if (packet
->skip
&& i
== 0)
3483 d
->control
|= cpu_to_le16(DESCRIPTOR_WAIT
);
3484 d
->req_count
= cpu_to_le16(header_size
);
3485 d
->res_count
= d
->req_count
;
3486 d
->transfer_status
= 0;
3487 d
->data_address
= cpu_to_le32(d_bus
+ (z
* sizeof(*d
)));
3489 rest
= payload_per_buffer
;
3491 for (j
= 1; j
< z
; j
++) {
3493 pd
->control
= cpu_to_le16(DESCRIPTOR_STATUS
|
3494 DESCRIPTOR_INPUT_MORE
);
3496 if (offset
+ rest
< PAGE_SIZE
)
3499 length
= PAGE_SIZE
- offset
;
3500 pd
->req_count
= cpu_to_le16(length
);
3501 pd
->res_count
= pd
->req_count
;
3502 pd
->transfer_status
= 0;
3504 page_bus
= page_private(buffer
->pages
[page
]);
3505 pd
->data_address
= cpu_to_le32(page_bus
+ offset
);
3507 dma_sync_single_range_for_device(device
, page_bus
,
3511 offset
= (offset
+ length
) & ~PAGE_MASK
;
3516 pd
->control
= cpu_to_le16(DESCRIPTOR_STATUS
|
3517 DESCRIPTOR_INPUT_LAST
|
3518 DESCRIPTOR_BRANCH_ALWAYS
);
3519 if (packet
->interrupt
&& i
== packet_count
- 1)
3520 pd
->control
|= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS
);
3522 context_append(&ctx
->context
, d
, z
, header_z
);
3528 static int queue_iso_buffer_fill(struct iso_context
*ctx
,
3529 struct fw_iso_packet
*packet
,
3530 struct fw_iso_buffer
*buffer
,
3531 unsigned long payload
)
3533 struct descriptor
*d
;
3534 dma_addr_t d_bus
, page_bus
;
3535 int page
, offset
, rest
, z
, i
, length
;
3537 page
= payload
>> PAGE_SHIFT
;
3538 offset
= payload
& ~PAGE_MASK
;
3539 rest
= packet
->payload_length
;
3541 /* We need one descriptor for each page in the buffer. */
3542 z
= DIV_ROUND_UP(offset
+ rest
, PAGE_SIZE
);
3544 if (WARN_ON(offset
& 3 || rest
& 3 || page
+ z
> buffer
->page_count
))
3547 for (i
= 0; i
< z
; i
++) {
3548 d
= context_get_descriptors(&ctx
->context
, 1, &d_bus
);
3552 d
->control
= cpu_to_le16(DESCRIPTOR_INPUT_MORE
|
3553 DESCRIPTOR_BRANCH_ALWAYS
);
3554 if (packet
->skip
&& i
== 0)
3555 d
->control
|= cpu_to_le16(DESCRIPTOR_WAIT
);
3556 if (packet
->interrupt
&& i
== z
- 1)
3557 d
->control
|= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS
);
3559 if (offset
+ rest
< PAGE_SIZE
)
3562 length
= PAGE_SIZE
- offset
;
3563 d
->req_count
= cpu_to_le16(length
);
3564 d
->res_count
= d
->req_count
;
3565 d
->transfer_status
= 0;
3567 page_bus
= page_private(buffer
->pages
[page
]);
3568 d
->data_address
= cpu_to_le32(page_bus
+ offset
);
3570 dma_sync_single_range_for_device(ctx
->context
.ohci
->card
.device
,
3571 page_bus
, offset
, length
,
3578 context_append(&ctx
->context
, d
, 1, 0);
3584 static int ohci_queue_iso(struct fw_iso_context
*base
,
3585 struct fw_iso_packet
*packet
,
3586 struct fw_iso_buffer
*buffer
,
3587 unsigned long payload
)
3589 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
3590 unsigned long flags
;
3593 spin_lock_irqsave(&ctx
->context
.ohci
->lock
, flags
);
3594 switch (base
->type
) {
3595 case FW_ISO_CONTEXT_TRANSMIT
:
3596 ret
= queue_iso_transmit(ctx
, packet
, buffer
, payload
);
3598 case FW_ISO_CONTEXT_RECEIVE
:
3599 ret
= queue_iso_packet_per_buffer(ctx
, packet
, buffer
, payload
);
3601 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
3602 ret
= queue_iso_buffer_fill(ctx
, packet
, buffer
, payload
);
3605 spin_unlock_irqrestore(&ctx
->context
.ohci
->lock
, flags
);
3610 static void ohci_flush_queue_iso(struct fw_iso_context
*base
)
3612 struct context
*ctx
=
3613 &container_of(base
, struct iso_context
, base
)->context
;
3615 reg_write(ctx
->ohci
, CONTROL_SET(ctx
->regs
), CONTEXT_WAKE
);
3618 static int ohci_flush_iso_completions(struct fw_iso_context
*base
)
3620 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
3623 tasklet_disable_in_atomic(&ctx
->context
.tasklet
);
3625 if (!test_and_set_bit_lock(0, &ctx
->flushing_completions
)) {
3626 context_tasklet((unsigned long)&ctx
->context
);
3628 switch (base
->type
) {
3629 case FW_ISO_CONTEXT_TRANSMIT
:
3630 case FW_ISO_CONTEXT_RECEIVE
:
3631 if (ctx
->header_length
!= 0)
3632 flush_iso_completions(ctx
, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH
);
3634 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
3635 if (ctx
->mc_completed
!= 0)
3636 flush_ir_buffer_fill(ctx
);
3642 clear_bit_unlock(0, &ctx
->flushing_completions
);
3643 smp_mb__after_atomic();
3646 tasklet_enable(&ctx
->context
.tasklet
);
3651 static const struct fw_card_driver ohci_driver
= {
3652 .enable
= ohci_enable
,
3653 .read_phy_reg
= ohci_read_phy_reg
,
3654 .update_phy_reg
= ohci_update_phy_reg
,
3655 .set_config_rom
= ohci_set_config_rom
,
3656 .send_request
= ohci_send_request
,
3657 .send_response
= ohci_send_response
,
3658 .cancel_packet
= ohci_cancel_packet
,
3659 .enable_phys_dma
= ohci_enable_phys_dma
,
3660 .read_csr
= ohci_read_csr
,
3661 .write_csr
= ohci_write_csr
,
3663 .allocate_iso_context
= ohci_allocate_iso_context
,
3664 .free_iso_context
= ohci_free_iso_context
,
3665 .set_iso_channels
= ohci_set_iso_channels
,
3666 .queue_iso
= ohci_queue_iso
,
3667 .flush_queue_iso
= ohci_flush_queue_iso
,
3668 .flush_iso_completions
= ohci_flush_iso_completions
,
3669 .start_iso
= ohci_start_iso
,
3670 .stop_iso
= ohci_stop_iso
,
3673 #ifdef CONFIG_PPC_PMAC
3674 static void pmac_ohci_on(struct pci_dev
*dev
)
3676 if (machine_is(powermac
)) {
3677 struct device_node
*ofn
= pci_device_to_OF_node(dev
);
3680 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER
, ofn
, 0, 1);
3681 pmac_call_feature(PMAC_FTR_1394_ENABLE
, ofn
, 0, 1);
3686 static void pmac_ohci_off(struct pci_dev
*dev
)
3688 if (machine_is(powermac
)) {
3689 struct device_node
*ofn
= pci_device_to_OF_node(dev
);
3692 pmac_call_feature(PMAC_FTR_1394_ENABLE
, ofn
, 0, 0);
3693 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER
, ofn
, 0, 0);
3698 static inline void pmac_ohci_on(struct pci_dev
*dev
) {}
3699 static inline void pmac_ohci_off(struct pci_dev
*dev
) {}
3700 #endif /* CONFIG_PPC_PMAC */
3702 static void release_ohci(struct device
*dev
, void *data
)
3704 struct pci_dev
*pdev
= to_pci_dev(dev
);
3705 struct fw_ohci
*ohci
= pci_get_drvdata(pdev
);
3707 pmac_ohci_off(pdev
);
3709 ar_context_release(&ohci
->ar_response_ctx
);
3710 ar_context_release(&ohci
->ar_request_ctx
);
3712 dev_notice(dev
, "removed fw-ohci device\n");
3715 static int pci_probe(struct pci_dev
*dev
,
3716 const struct pci_device_id
*ent
)
3718 struct fw_ohci
*ohci
;
3719 u32 bus_options
, max_receive
, link_speed
, version
;
3721 int i
, flags
, irq
, err
;
3724 if (dev
->vendor
== PCI_VENDOR_ID_PINNACLE_SYSTEMS
) {
3725 dev_err(&dev
->dev
, "Pinnacle MovieBoard is not yet supported\n");
3729 ohci
= devres_alloc(release_ohci
, sizeof(*ohci
), GFP_KERNEL
);
3732 fw_card_initialize(&ohci
->card
, &ohci_driver
, &dev
->dev
);
3733 pci_set_drvdata(dev
, ohci
);
3735 devres_add(&dev
->dev
, ohci
);
3737 err
= pcim_enable_device(dev
);
3739 dev_err(&dev
->dev
, "failed to enable OHCI hardware\n");
3743 pci_set_master(dev
);
3744 pci_write_config_dword(dev
, OHCI1394_PCI_HCI_Control
, 0);
3746 spin_lock_init(&ohci
->lock
);
3747 mutex_init(&ohci
->phy_reg_mutex
);
3749 INIT_WORK(&ohci
->bus_reset_work
, bus_reset_work
);
3751 if (!(pci_resource_flags(dev
, 0) & IORESOURCE_MEM
) ||
3752 pci_resource_len(dev
, 0) < OHCI1394_REGISTER_SIZE
) {
3753 ohci_err(ohci
, "invalid MMIO resource\n");
3757 err
= pcim_iomap_regions(dev
, 1 << 0, ohci_driver_name
);
3759 ohci_err(ohci
, "request and map MMIO resource unavailable\n");
3762 ohci
->registers
= pcim_iomap_table(dev
)[0];
3764 for (i
= 0; i
< ARRAY_SIZE(ohci_quirks
); i
++)
3765 if ((ohci_quirks
[i
].vendor
== dev
->vendor
) &&
3766 (ohci_quirks
[i
].device
== (unsigned short)PCI_ANY_ID
||
3767 ohci_quirks
[i
].device
== dev
->device
) &&
3768 (ohci_quirks
[i
].revision
== (unsigned short)PCI_ANY_ID
||
3769 ohci_quirks
[i
].revision
>= dev
->revision
)) {
3770 ohci
->quirks
= ohci_quirks
[i
].flags
;
3774 ohci
->quirks
= param_quirks
;
3776 if (detect_vt630x_with_asm1083_on_amd_ryzen_machine(dev
))
3777 ohci
->quirks
|= QUIRK_REBOOT_BY_CYCLE_TIMER_READ
;
3780 * Because dma_alloc_coherent() allocates at least one page,
3781 * we save space by using a common buffer for the AR request/
3782 * response descriptors and the self IDs buffer.
3784 BUILD_BUG_ON(AR_BUFFERS
* sizeof(struct descriptor
) > PAGE_SIZE
/4);
3785 BUILD_BUG_ON(SELF_ID_BUF_SIZE
> PAGE_SIZE
/2);
3786 ohci
->misc_buffer
= dmam_alloc_coherent(&dev
->dev
, PAGE_SIZE
, &ohci
->misc_buffer_bus
,
3788 if (!ohci
->misc_buffer
)
3791 err
= ar_context_init(&ohci
->ar_request_ctx
, ohci
, 0,
3792 OHCI1394_AsReqRcvContextControlSet
);
3796 err
= ar_context_init(&ohci
->ar_response_ctx
, ohci
, PAGE_SIZE
/4,
3797 OHCI1394_AsRspRcvContextControlSet
);
3801 err
= context_init(&ohci
->at_request_ctx
, ohci
,
3802 OHCI1394_AsReqTrContextControlSet
, handle_at_packet
);
3806 err
= context_init(&ohci
->at_response_ctx
, ohci
,
3807 OHCI1394_AsRspTrContextControlSet
, handle_at_packet
);
3811 reg_write(ohci
, OHCI1394_IsoRecvIntMaskSet
, ~0);
3812 ohci
->ir_context_channels
= ~0ULL;
3813 ohci
->ir_context_support
= reg_read(ohci
, OHCI1394_IsoRecvIntMaskSet
);
3814 reg_write(ohci
, OHCI1394_IsoRecvIntMaskClear
, ~0);
3815 ohci
->ir_context_mask
= ohci
->ir_context_support
;
3816 ohci
->n_ir
= hweight32(ohci
->ir_context_mask
);
3817 size
= sizeof(struct iso_context
) * ohci
->n_ir
;
3818 ohci
->ir_context_list
= devm_kzalloc(&dev
->dev
, size
, GFP_KERNEL
);
3819 if (!ohci
->ir_context_list
)
3822 reg_write(ohci
, OHCI1394_IsoXmitIntMaskSet
, ~0);
3823 ohci
->it_context_support
= reg_read(ohci
, OHCI1394_IsoXmitIntMaskSet
);
3824 /* JMicron JMB38x often shows 0 at first read, just ignore it */
3825 if (!ohci
->it_context_support
) {
3826 ohci_notice(ohci
, "overriding IsoXmitIntMask\n");
3827 ohci
->it_context_support
= 0xf;
3829 reg_write(ohci
, OHCI1394_IsoXmitIntMaskClear
, ~0);
3830 ohci
->it_context_mask
= ohci
->it_context_support
;
3831 ohci
->n_it
= hweight32(ohci
->it_context_mask
);
3832 size
= sizeof(struct iso_context
) * ohci
->n_it
;
3833 ohci
->it_context_list
= devm_kzalloc(&dev
->dev
, size
, GFP_KERNEL
);
3834 if (!ohci
->it_context_list
)
3837 ohci
->self_id
= ohci
->misc_buffer
+ PAGE_SIZE
/2;
3838 ohci
->self_id_bus
= ohci
->misc_buffer_bus
+ PAGE_SIZE
/2;
3840 bus_options
= reg_read(ohci
, OHCI1394_BusOptions
);
3841 max_receive
= (bus_options
>> 12) & 0xf;
3842 link_speed
= bus_options
& 0x7;
3843 guid
= ((u64
) reg_read(ohci
, OHCI1394_GUIDHi
) << 32) |
3844 reg_read(ohci
, OHCI1394_GUIDLo
);
3846 flags
= PCI_IRQ_INTX
;
3847 if (!(ohci
->quirks
& QUIRK_NO_MSI
))
3848 flags
|= PCI_IRQ_MSI
;
3849 err
= pci_alloc_irq_vectors(dev
, 1, 1, flags
);
3852 irq
= pci_irq_vector(dev
, 0);
3858 err
= request_threaded_irq(irq
, irq_handler
, NULL
,
3859 pci_dev_msi_enabled(dev
) ? 0 : IRQF_SHARED
, ohci_driver_name
,
3862 ohci_err(ohci
, "failed to allocate interrupt %d\n", irq
);
3866 err
= fw_card_add(&ohci
->card
, max_receive
, link_speed
, guid
);
3870 version
= reg_read(ohci
, OHCI1394_Version
) & 0x00ff00ff;
3872 "added OHCI v%x.%x device as card %d, "
3873 "%d IR + %d IT contexts, quirks 0x%x%s\n",
3874 version
>> 16, version
& 0xff, ohci
->card
.index
,
3875 ohci
->n_ir
, ohci
->n_it
, ohci
->quirks
,
3876 reg_read(ohci
, OHCI1394_PhyUpperBound
) ?
3882 free_irq(irq
, ohci
);
3884 pci_free_irq_vectors(dev
);
3889 static void pci_remove(struct pci_dev
*dev
)
3891 struct fw_ohci
*ohci
= pci_get_drvdata(dev
);
3895 * If the removal is happening from the suspend state, LPS won't be
3896 * enabled and host registers (eg., IntMaskClear) won't be accessible.
3898 if (reg_read(ohci
, OHCI1394_HCControlSet
) & OHCI1394_HCControl_LPS
) {
3899 reg_write(ohci
, OHCI1394_IntMaskClear
, ~0);
3902 cancel_work_sync(&ohci
->bus_reset_work
);
3903 fw_core_remove_card(&ohci
->card
);
3906 * FIXME: Fail all pending packets here, now that the upper
3907 * layers can't queue any more.
3910 software_reset(ohci
);
3912 irq
= pci_irq_vector(dev
, 0);
3914 free_irq(irq
, ohci
);
3915 pci_free_irq_vectors(dev
);
3917 dev_notice(&dev
->dev
, "removing fw-ohci device\n");
3921 static int pci_suspend(struct pci_dev
*dev
, pm_message_t state
)
3923 struct fw_ohci
*ohci
= pci_get_drvdata(dev
);
3926 software_reset(ohci
);
3927 err
= pci_save_state(dev
);
3929 ohci_err(ohci
, "pci_save_state failed\n");
3932 err
= pci_set_power_state(dev
, pci_choose_state(dev
, state
));
3934 ohci_err(ohci
, "pci_set_power_state failed with %d\n", err
);
3940 static int pci_resume(struct pci_dev
*dev
)
3942 struct fw_ohci
*ohci
= pci_get_drvdata(dev
);
3946 pci_set_power_state(dev
, PCI_D0
);
3947 pci_restore_state(dev
);
3948 err
= pci_enable_device(dev
);
3950 ohci_err(ohci
, "pci_enable_device failed\n");
3954 /* Some systems don't setup GUID register on resume from ram */
3955 if (!reg_read(ohci
, OHCI1394_GUIDLo
) &&
3956 !reg_read(ohci
, OHCI1394_GUIDHi
)) {
3957 reg_write(ohci
, OHCI1394_GUIDLo
, (u32
)ohci
->card
.guid
);
3958 reg_write(ohci
, OHCI1394_GUIDHi
, (u32
)(ohci
->card
.guid
>> 32));
3961 err
= ohci_enable(&ohci
->card
, NULL
, 0);
3965 ohci_resume_iso_dma(ohci
);
3971 static const struct pci_device_id pci_table
[] = {
3972 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI
, ~0) },
3976 MODULE_DEVICE_TABLE(pci
, pci_table
);
3978 static struct pci_driver fw_ohci_pci_driver
= {
3979 .name
= ohci_driver_name
,
3980 .id_table
= pci_table
,
3982 .remove
= pci_remove
,
3984 .resume
= pci_resume
,
3985 .suspend
= pci_suspend
,
3989 static int __init
fw_ohci_init(void)
3991 selfid_workqueue
= alloc_workqueue(KBUILD_MODNAME
, WQ_MEM_RECLAIM
, 0);
3992 if (!selfid_workqueue
)
3995 return pci_register_driver(&fw_ohci_pci_driver
);
3998 static void __exit
fw_ohci_cleanup(void)
4000 pci_unregister_driver(&fw_ohci_pci_driver
);
4001 destroy_workqueue(selfid_workqueue
);
4004 module_init(fw_ohci_init
);
4005 module_exit(fw_ohci_cleanup
);
4007 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
4008 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
4009 MODULE_LICENSE("GPL");
4011 /* Provide a module alias so root-on-sbp2 initrds don't break. */
4012 MODULE_ALIAS("ohci1394");