thinkpad-acpi: simplify module autoloading
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / firewire / fw-ohci.c
blob6d19828a93a5a458e8254a1d33b4bd3d5fd59f0a
1 /*
2 * Driver for OHCI 1394 controllers
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/compiler.h>
22 #include <linux/delay.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/gfp.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
34 #include <asm/page.h>
35 #include <asm/system.h>
37 #ifdef CONFIG_PPC_PMAC
38 #include <asm/pmac_feature.h>
39 #endif
41 #include "fw-ohci.h"
42 #include "fw-transaction.h"
44 #define DESCRIPTOR_OUTPUT_MORE 0
45 #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
46 #define DESCRIPTOR_INPUT_MORE (2 << 12)
47 #define DESCRIPTOR_INPUT_LAST (3 << 12)
48 #define DESCRIPTOR_STATUS (1 << 11)
49 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
50 #define DESCRIPTOR_PING (1 << 7)
51 #define DESCRIPTOR_YY (1 << 6)
52 #define DESCRIPTOR_NO_IRQ (0 << 4)
53 #define DESCRIPTOR_IRQ_ERROR (1 << 4)
54 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
55 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
56 #define DESCRIPTOR_WAIT (3 << 0)
58 struct descriptor {
59 __le16 req_count;
60 __le16 control;
61 __le32 data_address;
62 __le32 branch_address;
63 __le16 res_count;
64 __le16 transfer_status;
65 } __attribute__((aligned(16)));
67 struct db_descriptor {
68 __le16 first_size;
69 __le16 control;
70 __le16 second_req_count;
71 __le16 first_req_count;
72 __le32 branch_address;
73 __le16 second_res_count;
74 __le16 first_res_count;
75 __le32 reserved0;
76 __le32 first_buffer;
77 __le32 second_buffer;
78 __le32 reserved1;
79 } __attribute__((aligned(16)));
81 #define CONTROL_SET(regs) (regs)
82 #define CONTROL_CLEAR(regs) ((regs) + 4)
83 #define COMMAND_PTR(regs) ((regs) + 12)
84 #define CONTEXT_MATCH(regs) ((regs) + 16)
86 struct ar_buffer {
87 struct descriptor descriptor;
88 struct ar_buffer *next;
89 __le32 data[0];
92 struct ar_context {
93 struct fw_ohci *ohci;
94 struct ar_buffer *current_buffer;
95 struct ar_buffer *last_buffer;
96 void *pointer;
97 u32 regs;
98 struct tasklet_struct tasklet;
101 struct context;
103 typedef int (*descriptor_callback_t)(struct context *ctx,
104 struct descriptor *d,
105 struct descriptor *last);
108 * A buffer that contains a block of DMA-able coherent memory used for
109 * storing a portion of a DMA descriptor program.
111 struct descriptor_buffer {
112 struct list_head list;
113 dma_addr_t buffer_bus;
114 size_t buffer_size;
115 size_t used;
116 struct descriptor buffer[0];
119 struct context {
120 struct fw_ohci *ohci;
121 u32 regs;
122 int total_allocation;
125 * List of page-sized buffers for storing DMA descriptors.
126 * Head of list contains buffers in use and tail of list contains
127 * free buffers.
129 struct list_head buffer_list;
132 * Pointer to a buffer inside buffer_list that contains the tail
133 * end of the current DMA program.
135 struct descriptor_buffer *buffer_tail;
138 * The descriptor containing the branch address of the first
139 * descriptor that has not yet been filled by the device.
141 struct descriptor *last;
144 * The last descriptor in the DMA program. It contains the branch
145 * address that must be updated upon appending a new descriptor.
147 struct descriptor *prev;
149 descriptor_callback_t callback;
151 struct tasklet_struct tasklet;
154 #define IT_HEADER_SY(v) ((v) << 0)
155 #define IT_HEADER_TCODE(v) ((v) << 4)
156 #define IT_HEADER_CHANNEL(v) ((v) << 8)
157 #define IT_HEADER_TAG(v) ((v) << 14)
158 #define IT_HEADER_SPEED(v) ((v) << 16)
159 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
161 struct iso_context {
162 struct fw_iso_context base;
163 struct context context;
164 int excess_bytes;
165 void *header;
166 size_t header_length;
169 #define CONFIG_ROM_SIZE 1024
171 struct fw_ohci {
172 struct fw_card card;
174 __iomem char *registers;
175 dma_addr_t self_id_bus;
176 __le32 *self_id_cpu;
177 struct tasklet_struct bus_reset_tasklet;
178 int node_id;
179 int generation;
180 int request_generation; /* for timestamping incoming requests */
181 u32 bus_seconds;
183 bool use_dualbuffer;
184 bool old_uninorth;
185 bool bus_reset_packet_quirk;
188 * Spinlock for accessing fw_ohci data. Never call out of
189 * this driver with this lock held.
191 spinlock_t lock;
192 u32 self_id_buffer[512];
194 /* Config rom buffers */
195 __be32 *config_rom;
196 dma_addr_t config_rom_bus;
197 __be32 *next_config_rom;
198 dma_addr_t next_config_rom_bus;
199 u32 next_header;
201 struct ar_context ar_request_ctx;
202 struct ar_context ar_response_ctx;
203 struct context at_request_ctx;
204 struct context at_response_ctx;
206 u32 it_context_mask;
207 struct iso_context *it_context_list;
208 u32 ir_context_mask;
209 struct iso_context *ir_context_list;
212 static inline struct fw_ohci *fw_ohci(struct fw_card *card)
214 return container_of(card, struct fw_ohci, card);
217 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
218 #define IR_CONTEXT_BUFFER_FILL 0x80000000
219 #define IR_CONTEXT_ISOCH_HEADER 0x40000000
220 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
221 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
222 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
224 #define CONTEXT_RUN 0x8000
225 #define CONTEXT_WAKE 0x1000
226 #define CONTEXT_DEAD 0x0800
227 #define CONTEXT_ACTIVE 0x0400
229 #define OHCI1394_MAX_AT_REQ_RETRIES 0xf
230 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
231 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
233 #define FW_OHCI_MAJOR 240
234 #define OHCI1394_REGISTER_SIZE 0x800
235 #define OHCI_LOOP_COUNT 500
236 #define OHCI1394_PCI_HCI_Control 0x40
237 #define SELF_ID_BUF_SIZE 0x800
238 #define OHCI_TCODE_PHY_PACKET 0x0e
239 #define OHCI_VERSION_1_1 0x010010
241 static char ohci_driver_name[] = KBUILD_MODNAME;
243 #ifdef CONFIG_FIREWIRE_OHCI_DEBUG
245 #define OHCI_PARAM_DEBUG_AT_AR 1
246 #define OHCI_PARAM_DEBUG_SELFIDS 2
247 #define OHCI_PARAM_DEBUG_IRQS 4
248 #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
250 static int param_debug;
251 module_param_named(debug, param_debug, int, 0644);
252 MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
253 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
254 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
255 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
256 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
257 ", or a combination, or all = -1)");
259 static void log_irqs(u32 evt)
261 if (likely(!(param_debug &
262 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
263 return;
265 if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
266 !(evt & OHCI1394_busReset))
267 return;
269 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
270 evt & OHCI1394_selfIDComplete ? " selfID" : "",
271 evt & OHCI1394_RQPkt ? " AR_req" : "",
272 evt & OHCI1394_RSPkt ? " AR_resp" : "",
273 evt & OHCI1394_reqTxComplete ? " AT_req" : "",
274 evt & OHCI1394_respTxComplete ? " AT_resp" : "",
275 evt & OHCI1394_isochRx ? " IR" : "",
276 evt & OHCI1394_isochTx ? " IT" : "",
277 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
278 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
279 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
280 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
281 evt & OHCI1394_busReset ? " busReset" : "",
282 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
283 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
284 OHCI1394_respTxComplete | OHCI1394_isochRx |
285 OHCI1394_isochTx | OHCI1394_postedWriteErr |
286 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
287 OHCI1394_regAccessFail | OHCI1394_busReset)
288 ? " ?" : "");
291 static const char *speed[] = {
292 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
294 static const char *power[] = {
295 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
296 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
298 static const char port[] = { '.', '-', 'p', 'c', };
300 static char _p(u32 *s, int shift)
302 return port[*s >> shift & 3];
305 static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
307 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
308 return;
310 fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
311 self_id_count, generation, node_id);
313 for (; self_id_count--; ++s)
314 if ((*s & 1 << 23) == 0)
315 fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
316 "%s gc=%d %s %s%s%s\n",
317 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
318 speed[*s >> 14 & 3], *s >> 16 & 63,
319 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
320 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
321 else
322 fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
323 *s, *s >> 24 & 63,
324 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
325 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
328 static const char *evts[] = {
329 [0x00] = "evt_no_status", [0x01] = "-reserved-",
330 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
331 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
332 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
333 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
334 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
335 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
336 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
337 [0x10] = "-reserved-", [0x11] = "ack_complete",
338 [0x12] = "ack_pending ", [0x13] = "-reserved-",
339 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
340 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
341 [0x18] = "-reserved-", [0x19] = "-reserved-",
342 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
343 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
344 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
345 [0x20] = "pending/cancelled",
347 static const char *tcodes[] = {
348 [0x0] = "QW req", [0x1] = "BW req",
349 [0x2] = "W resp", [0x3] = "-reserved-",
350 [0x4] = "QR req", [0x5] = "BR req",
351 [0x6] = "QR resp", [0x7] = "BR resp",
352 [0x8] = "cycle start", [0x9] = "Lk req",
353 [0xa] = "async stream packet", [0xb] = "Lk resp",
354 [0xc] = "-reserved-", [0xd] = "-reserved-",
355 [0xe] = "link internal", [0xf] = "-reserved-",
357 static const char *phys[] = {
358 [0x0] = "phy config packet", [0x1] = "link-on packet",
359 [0x2] = "self-id packet", [0x3] = "-reserved-",
362 static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
364 int tcode = header[0] >> 4 & 0xf;
365 char specific[12];
367 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
368 return;
370 if (unlikely(evt >= ARRAY_SIZE(evts)))
371 evt = 0x1f;
373 if (evt == OHCI1394_evt_bus_reset) {
374 fw_notify("A%c evt_bus_reset, generation %d\n",
375 dir, (header[2] >> 16) & 0xff);
376 return;
379 if (header[0] == ~header[1]) {
380 fw_notify("A%c %s, %s, %08x\n",
381 dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]);
382 return;
385 switch (tcode) {
386 case 0x0: case 0x6: case 0x8:
387 snprintf(specific, sizeof(specific), " = %08x",
388 be32_to_cpu((__force __be32)header[3]));
389 break;
390 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
391 snprintf(specific, sizeof(specific), " %x,%x",
392 header[3] >> 16, header[3] & 0xffff);
393 break;
394 default:
395 specific[0] = '\0';
398 switch (tcode) {
399 case 0xe: case 0xa:
400 fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
401 break;
402 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
403 fw_notify("A%c spd %x tl %02x, "
404 "%04x -> %04x, %s, "
405 "%s, %04x%08x%s\n",
406 dir, speed, header[0] >> 10 & 0x3f,
407 header[1] >> 16, header[0] >> 16, evts[evt],
408 tcodes[tcode], header[1] & 0xffff, header[2], specific);
409 break;
410 default:
411 fw_notify("A%c spd %x tl %02x, "
412 "%04x -> %04x, %s, "
413 "%s%s\n",
414 dir, speed, header[0] >> 10 & 0x3f,
415 header[1] >> 16, header[0] >> 16, evts[evt],
416 tcodes[tcode], specific);
420 #else
422 #define log_irqs(evt)
423 #define log_selfids(node_id, generation, self_id_count, sid)
424 #define log_ar_at_event(dir, speed, header, evt)
426 #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
428 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
430 writel(data, ohci->registers + offset);
433 static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
435 return readl(ohci->registers + offset);
438 static inline void flush_writes(const struct fw_ohci *ohci)
440 /* Do a dummy read to flush writes. */
441 reg_read(ohci, OHCI1394_Version);
444 static int
445 ohci_update_phy_reg(struct fw_card *card, int addr,
446 int clear_bits, int set_bits)
448 struct fw_ohci *ohci = fw_ohci(card);
449 u32 val, old;
451 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
452 flush_writes(ohci);
453 msleep(2);
454 val = reg_read(ohci, OHCI1394_PhyControl);
455 if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
456 fw_error("failed to set phy reg bits.\n");
457 return -EBUSY;
460 old = OHCI1394_PhyControl_ReadData(val);
461 old = (old & ~clear_bits) | set_bits;
462 reg_write(ohci, OHCI1394_PhyControl,
463 OHCI1394_PhyControl_Write(addr, old));
465 return 0;
468 static int ar_context_add_page(struct ar_context *ctx)
470 struct device *dev = ctx->ohci->card.device;
471 struct ar_buffer *ab;
472 dma_addr_t uninitialized_var(ab_bus);
473 size_t offset;
475 ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
476 if (ab == NULL)
477 return -ENOMEM;
479 ab->next = NULL;
480 memset(&ab->descriptor, 0, sizeof(ab->descriptor));
481 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
482 DESCRIPTOR_STATUS |
483 DESCRIPTOR_BRANCH_ALWAYS);
484 offset = offsetof(struct ar_buffer, data);
485 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
486 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
487 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
488 ab->descriptor.branch_address = 0;
490 ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
491 ctx->last_buffer->next = ab;
492 ctx->last_buffer = ab;
494 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
495 flush_writes(ctx->ohci);
497 return 0;
500 static void ar_context_release(struct ar_context *ctx)
502 struct ar_buffer *ab, *ab_next;
503 size_t offset;
504 dma_addr_t ab_bus;
506 for (ab = ctx->current_buffer; ab; ab = ab_next) {
507 ab_next = ab->next;
508 offset = offsetof(struct ar_buffer, data);
509 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
510 dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE,
511 ab, ab_bus);
515 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
516 #define cond_le32_to_cpu(v) \
517 (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
518 #else
519 #define cond_le32_to_cpu(v) le32_to_cpu(v)
520 #endif
522 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
524 struct fw_ohci *ohci = ctx->ohci;
525 struct fw_packet p;
526 u32 status, length, tcode;
527 int evt;
529 p.header[0] = cond_le32_to_cpu(buffer[0]);
530 p.header[1] = cond_le32_to_cpu(buffer[1]);
531 p.header[2] = cond_le32_to_cpu(buffer[2]);
533 tcode = (p.header[0] >> 4) & 0x0f;
534 switch (tcode) {
535 case TCODE_WRITE_QUADLET_REQUEST:
536 case TCODE_READ_QUADLET_RESPONSE:
537 p.header[3] = (__force __u32) buffer[3];
538 p.header_length = 16;
539 p.payload_length = 0;
540 break;
542 case TCODE_READ_BLOCK_REQUEST :
543 p.header[3] = cond_le32_to_cpu(buffer[3]);
544 p.header_length = 16;
545 p.payload_length = 0;
546 break;
548 case TCODE_WRITE_BLOCK_REQUEST:
549 case TCODE_READ_BLOCK_RESPONSE:
550 case TCODE_LOCK_REQUEST:
551 case TCODE_LOCK_RESPONSE:
552 p.header[3] = cond_le32_to_cpu(buffer[3]);
553 p.header_length = 16;
554 p.payload_length = p.header[3] >> 16;
555 break;
557 case TCODE_WRITE_RESPONSE:
558 case TCODE_READ_QUADLET_REQUEST:
559 case OHCI_TCODE_PHY_PACKET:
560 p.header_length = 12;
561 p.payload_length = 0;
562 break;
564 default:
565 /* FIXME: Stop context, discard everything, and restart? */
566 p.header_length = 0;
567 p.payload_length = 0;
570 p.payload = (void *) buffer + p.header_length;
572 /* FIXME: What to do about evt_* errors? */
573 length = (p.header_length + p.payload_length + 3) / 4;
574 status = cond_le32_to_cpu(buffer[length]);
575 evt = (status >> 16) & 0x1f;
577 p.ack = evt - 16;
578 p.speed = (status >> 21) & 0x7;
579 p.timestamp = status & 0xffff;
580 p.generation = ohci->request_generation;
582 log_ar_at_event('R', p.speed, p.header, evt);
585 * The OHCI bus reset handler synthesizes a phy packet with
586 * the new generation number when a bus reset happens (see
587 * section 8.4.2.3). This helps us determine when a request
588 * was received and make sure we send the response in the same
589 * generation. We only need this for requests; for responses
590 * we use the unique tlabel for finding the matching
591 * request.
593 * Alas some chips sometimes emit bus reset packets with a
594 * wrong generation. We set the correct generation for these
595 * at a slightly incorrect time (in bus_reset_tasklet).
597 if (evt == OHCI1394_evt_bus_reset) {
598 if (!ohci->bus_reset_packet_quirk)
599 ohci->request_generation = (p.header[2] >> 16) & 0xff;
600 } else if (ctx == &ohci->ar_request_ctx) {
601 fw_core_handle_request(&ohci->card, &p);
602 } else {
603 fw_core_handle_response(&ohci->card, &p);
606 return buffer + length + 1;
609 static void ar_context_tasklet(unsigned long data)
611 struct ar_context *ctx = (struct ar_context *)data;
612 struct fw_ohci *ohci = ctx->ohci;
613 struct ar_buffer *ab;
614 struct descriptor *d;
615 void *buffer, *end;
617 ab = ctx->current_buffer;
618 d = &ab->descriptor;
620 if (d->res_count == 0) {
621 size_t size, rest, offset;
622 dma_addr_t start_bus;
623 void *start;
626 * This descriptor is finished and we may have a
627 * packet split across this and the next buffer. We
628 * reuse the page for reassembling the split packet.
631 offset = offsetof(struct ar_buffer, data);
632 start = buffer = ab;
633 start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
635 ab = ab->next;
636 d = &ab->descriptor;
637 size = buffer + PAGE_SIZE - ctx->pointer;
638 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
639 memmove(buffer, ctx->pointer, size);
640 memcpy(buffer + size, ab->data, rest);
641 ctx->current_buffer = ab;
642 ctx->pointer = (void *) ab->data + rest;
643 end = buffer + size + rest;
645 while (buffer < end)
646 buffer = handle_ar_packet(ctx, buffer);
648 dma_free_coherent(ohci->card.device, PAGE_SIZE,
649 start, start_bus);
650 ar_context_add_page(ctx);
651 } else {
652 buffer = ctx->pointer;
653 ctx->pointer = end =
654 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
656 while (buffer < end)
657 buffer = handle_ar_packet(ctx, buffer);
661 static int
662 ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
664 struct ar_buffer ab;
666 ctx->regs = regs;
667 ctx->ohci = ohci;
668 ctx->last_buffer = &ab;
669 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
671 ar_context_add_page(ctx);
672 ar_context_add_page(ctx);
673 ctx->current_buffer = ab.next;
674 ctx->pointer = ctx->current_buffer->data;
676 return 0;
679 static void ar_context_run(struct ar_context *ctx)
681 struct ar_buffer *ab = ctx->current_buffer;
682 dma_addr_t ab_bus;
683 size_t offset;
685 offset = offsetof(struct ar_buffer, data);
686 ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
688 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1);
689 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
690 flush_writes(ctx->ohci);
693 static struct descriptor *
694 find_branch_descriptor(struct descriptor *d, int z)
696 int b, key;
698 b = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2;
699 key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8;
701 /* figure out which descriptor the branch address goes in */
702 if (z == 2 && (b == 3 || key == 2))
703 return d;
704 else
705 return d + z - 1;
708 static void context_tasklet(unsigned long data)
710 struct context *ctx = (struct context *) data;
711 struct descriptor *d, *last;
712 u32 address;
713 int z;
714 struct descriptor_buffer *desc;
716 desc = list_entry(ctx->buffer_list.next,
717 struct descriptor_buffer, list);
718 last = ctx->last;
719 while (last->branch_address != 0) {
720 struct descriptor_buffer *old_desc = desc;
721 address = le32_to_cpu(last->branch_address);
722 z = address & 0xf;
723 address &= ~0xf;
725 /* If the branch address points to a buffer outside of the
726 * current buffer, advance to the next buffer. */
727 if (address < desc->buffer_bus ||
728 address >= desc->buffer_bus + desc->used)
729 desc = list_entry(desc->list.next,
730 struct descriptor_buffer, list);
731 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
732 last = find_branch_descriptor(d, z);
734 if (!ctx->callback(ctx, d, last))
735 break;
737 if (old_desc != desc) {
738 /* If we've advanced to the next buffer, move the
739 * previous buffer to the free list. */
740 unsigned long flags;
741 old_desc->used = 0;
742 spin_lock_irqsave(&ctx->ohci->lock, flags);
743 list_move_tail(&old_desc->list, &ctx->buffer_list);
744 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
746 ctx->last = last;
751 * Allocate a new buffer and add it to the list of free buffers for this
752 * context. Must be called with ohci->lock held.
754 static int
755 context_add_buffer(struct context *ctx)
757 struct descriptor_buffer *desc;
758 dma_addr_t uninitialized_var(bus_addr);
759 int offset;
762 * 16MB of descriptors should be far more than enough for any DMA
763 * program. This will catch run-away userspace or DoS attacks.
765 if (ctx->total_allocation >= 16*1024*1024)
766 return -ENOMEM;
768 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
769 &bus_addr, GFP_ATOMIC);
770 if (!desc)
771 return -ENOMEM;
773 offset = (void *)&desc->buffer - (void *)desc;
774 desc->buffer_size = PAGE_SIZE - offset;
775 desc->buffer_bus = bus_addr + offset;
776 desc->used = 0;
778 list_add_tail(&desc->list, &ctx->buffer_list);
779 ctx->total_allocation += PAGE_SIZE;
781 return 0;
784 static int
785 context_init(struct context *ctx, struct fw_ohci *ohci,
786 u32 regs, descriptor_callback_t callback)
788 ctx->ohci = ohci;
789 ctx->regs = regs;
790 ctx->total_allocation = 0;
792 INIT_LIST_HEAD(&ctx->buffer_list);
793 if (context_add_buffer(ctx) < 0)
794 return -ENOMEM;
796 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
797 struct descriptor_buffer, list);
799 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
800 ctx->callback = callback;
803 * We put a dummy descriptor in the buffer that has a NULL
804 * branch address and looks like it's been sent. That way we
805 * have a descriptor to append DMA programs to.
807 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
808 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
809 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
810 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
811 ctx->last = ctx->buffer_tail->buffer;
812 ctx->prev = ctx->buffer_tail->buffer;
814 return 0;
817 static void
818 context_release(struct context *ctx)
820 struct fw_card *card = &ctx->ohci->card;
821 struct descriptor_buffer *desc, *tmp;
823 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
824 dma_free_coherent(card->device, PAGE_SIZE, desc,
825 desc->buffer_bus -
826 ((void *)&desc->buffer - (void *)desc));
829 /* Must be called with ohci->lock held */
830 static struct descriptor *
831 context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
833 struct descriptor *d = NULL;
834 struct descriptor_buffer *desc = ctx->buffer_tail;
836 if (z * sizeof(*d) > desc->buffer_size)
837 return NULL;
839 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
840 /* No room for the descriptor in this buffer, so advance to the
841 * next one. */
843 if (desc->list.next == &ctx->buffer_list) {
844 /* If there is no free buffer next in the list,
845 * allocate one. */
846 if (context_add_buffer(ctx) < 0)
847 return NULL;
849 desc = list_entry(desc->list.next,
850 struct descriptor_buffer, list);
851 ctx->buffer_tail = desc;
854 d = desc->buffer + desc->used / sizeof(*d);
855 memset(d, 0, z * sizeof(*d));
856 *d_bus = desc->buffer_bus + desc->used;
858 return d;
861 static void context_run(struct context *ctx, u32 extra)
863 struct fw_ohci *ohci = ctx->ohci;
865 reg_write(ohci, COMMAND_PTR(ctx->regs),
866 le32_to_cpu(ctx->last->branch_address));
867 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
868 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
869 flush_writes(ohci);
872 static void context_append(struct context *ctx,
873 struct descriptor *d, int z, int extra)
875 dma_addr_t d_bus;
876 struct descriptor_buffer *desc = ctx->buffer_tail;
878 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
880 desc->used += (z + extra) * sizeof(*d);
881 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
882 ctx->prev = find_branch_descriptor(d, z);
884 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
885 flush_writes(ctx->ohci);
888 static void context_stop(struct context *ctx)
890 u32 reg;
891 int i;
893 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
894 flush_writes(ctx->ohci);
896 for (i = 0; i < 10; i++) {
897 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
898 if ((reg & CONTEXT_ACTIVE) == 0)
899 return;
901 mdelay(1);
903 fw_error("Error: DMA context still active (0x%08x)\n", reg);
906 struct driver_data {
907 struct fw_packet *packet;
911 * This function apppends a packet to the DMA queue for transmission.
912 * Must always be called with the ochi->lock held to ensure proper
913 * generation handling and locking around packet queue manipulation.
915 static int
916 at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
918 struct fw_ohci *ohci = ctx->ohci;
919 dma_addr_t d_bus, uninitialized_var(payload_bus);
920 struct driver_data *driver_data;
921 struct descriptor *d, *last;
922 __le32 *header;
923 int z, tcode;
924 u32 reg;
926 d = context_get_descriptors(ctx, 4, &d_bus);
927 if (d == NULL) {
928 packet->ack = RCODE_SEND_ERROR;
929 return -1;
932 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
933 d[0].res_count = cpu_to_le16(packet->timestamp);
936 * The DMA format for asyncronous link packets is different
937 * from the IEEE1394 layout, so shift the fields around
938 * accordingly. If header_length is 8, it's a PHY packet, to
939 * which we need to prepend an extra quadlet.
942 header = (__le32 *) &d[1];
943 if (packet->header_length > 8) {
944 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
945 (packet->speed << 16));
946 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
947 (packet->header[0] & 0xffff0000));
948 header[2] = cpu_to_le32(packet->header[2]);
950 tcode = (packet->header[0] >> 4) & 0x0f;
951 if (TCODE_IS_BLOCK_PACKET(tcode))
952 header[3] = cpu_to_le32(packet->header[3]);
953 else
954 header[3] = (__force __le32) packet->header[3];
956 d[0].req_count = cpu_to_le16(packet->header_length);
957 } else {
958 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
959 (packet->speed << 16));
960 header[1] = cpu_to_le32(packet->header[0]);
961 header[2] = cpu_to_le32(packet->header[1]);
962 d[0].req_count = cpu_to_le16(12);
965 driver_data = (struct driver_data *) &d[3];
966 driver_data->packet = packet;
967 packet->driver_data = driver_data;
969 if (packet->payload_length > 0) {
970 payload_bus =
971 dma_map_single(ohci->card.device, packet->payload,
972 packet->payload_length, DMA_TO_DEVICE);
973 if (dma_mapping_error(ohci->card.device, payload_bus)) {
974 packet->ack = RCODE_SEND_ERROR;
975 return -1;
977 packet->payload_bus = payload_bus;
979 d[2].req_count = cpu_to_le16(packet->payload_length);
980 d[2].data_address = cpu_to_le32(payload_bus);
981 last = &d[2];
982 z = 3;
983 } else {
984 last = &d[0];
985 z = 2;
988 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
989 DESCRIPTOR_IRQ_ALWAYS |
990 DESCRIPTOR_BRANCH_ALWAYS);
993 * If the controller and packet generations don't match, we need to
994 * bail out and try again. If IntEvent.busReset is set, the AT context
995 * is halted, so appending to the context and trying to run it is
996 * futile. Most controllers do the right thing and just flush the AT
997 * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
998 * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
999 * up stalling out. So we just bail out in software and try again
1000 * later, and everyone is happy.
1001 * FIXME: Document how the locking works.
1003 if (ohci->generation != packet->generation ||
1004 reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
1005 if (packet->payload_length > 0)
1006 dma_unmap_single(ohci->card.device, payload_bus,
1007 packet->payload_length, DMA_TO_DEVICE);
1008 packet->ack = RCODE_GENERATION;
1009 return -1;
1012 context_append(ctx, d, z, 4 - z);
1014 /* If the context isn't already running, start it up. */
1015 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
1016 if ((reg & CONTEXT_RUN) == 0)
1017 context_run(ctx, 0);
1019 return 0;
1022 static int handle_at_packet(struct context *context,
1023 struct descriptor *d,
1024 struct descriptor *last)
1026 struct driver_data *driver_data;
1027 struct fw_packet *packet;
1028 struct fw_ohci *ohci = context->ohci;
1029 int evt;
1031 if (last->transfer_status == 0)
1032 /* This descriptor isn't done yet, stop iteration. */
1033 return 0;
1035 driver_data = (struct driver_data *) &d[3];
1036 packet = driver_data->packet;
1037 if (packet == NULL)
1038 /* This packet was cancelled, just continue. */
1039 return 1;
1041 if (packet->payload_bus)
1042 dma_unmap_single(ohci->card.device, packet->payload_bus,
1043 packet->payload_length, DMA_TO_DEVICE);
1045 evt = le16_to_cpu(last->transfer_status) & 0x1f;
1046 packet->timestamp = le16_to_cpu(last->res_count);
1048 log_ar_at_event('T', packet->speed, packet->header, evt);
1050 switch (evt) {
1051 case OHCI1394_evt_timeout:
1052 /* Async response transmit timed out. */
1053 packet->ack = RCODE_CANCELLED;
1054 break;
1056 case OHCI1394_evt_flushed:
1058 * The packet was flushed should give same error as
1059 * when we try to use a stale generation count.
1061 packet->ack = RCODE_GENERATION;
1062 break;
1064 case OHCI1394_evt_missing_ack:
1066 * Using a valid (current) generation count, but the
1067 * node is not on the bus or not sending acks.
1069 packet->ack = RCODE_NO_ACK;
1070 break;
1072 case ACK_COMPLETE + 0x10:
1073 case ACK_PENDING + 0x10:
1074 case ACK_BUSY_X + 0x10:
1075 case ACK_BUSY_A + 0x10:
1076 case ACK_BUSY_B + 0x10:
1077 case ACK_DATA_ERROR + 0x10:
1078 case ACK_TYPE_ERROR + 0x10:
1079 packet->ack = evt - 0x10;
1080 break;
1082 default:
1083 packet->ack = RCODE_SEND_ERROR;
1084 break;
1087 packet->callback(packet, &ohci->card, packet->ack);
1089 return 1;
1092 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
1093 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
1094 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
1095 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
1096 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
1098 static void
1099 handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
1101 struct fw_packet response;
1102 int tcode, length, i;
1104 tcode = HEADER_GET_TCODE(packet->header[0]);
1105 if (TCODE_IS_BLOCK_PACKET(tcode))
1106 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1107 else
1108 length = 4;
1110 i = csr - CSR_CONFIG_ROM;
1111 if (i + length > CONFIG_ROM_SIZE) {
1112 fw_fill_response(&response, packet->header,
1113 RCODE_ADDRESS_ERROR, NULL, 0);
1114 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
1115 fw_fill_response(&response, packet->header,
1116 RCODE_TYPE_ERROR, NULL, 0);
1117 } else {
1118 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
1119 (void *) ohci->config_rom + i, length);
1122 fw_core_handle_response(&ohci->card, &response);
1125 static void
1126 handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
1128 struct fw_packet response;
1129 int tcode, length, ext_tcode, sel;
1130 __be32 *payload, lock_old;
1131 u32 lock_arg, lock_data;
1133 tcode = HEADER_GET_TCODE(packet->header[0]);
1134 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1135 payload = packet->payload;
1136 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
1138 if (tcode == TCODE_LOCK_REQUEST &&
1139 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
1140 lock_arg = be32_to_cpu(payload[0]);
1141 lock_data = be32_to_cpu(payload[1]);
1142 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
1143 lock_arg = 0;
1144 lock_data = 0;
1145 } else {
1146 fw_fill_response(&response, packet->header,
1147 RCODE_TYPE_ERROR, NULL, 0);
1148 goto out;
1151 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
1152 reg_write(ohci, OHCI1394_CSRData, lock_data);
1153 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1154 reg_write(ohci, OHCI1394_CSRControl, sel);
1156 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
1157 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
1158 else
1159 fw_notify("swap not done yet\n");
1161 fw_fill_response(&response, packet->header,
1162 RCODE_COMPLETE, &lock_old, sizeof(lock_old));
1163 out:
1164 fw_core_handle_response(&ohci->card, &response);
1167 static void
1168 handle_local_request(struct context *ctx, struct fw_packet *packet)
1170 u64 offset;
1171 u32 csr;
1173 if (ctx == &ctx->ohci->at_request_ctx) {
1174 packet->ack = ACK_PENDING;
1175 packet->callback(packet, &ctx->ohci->card, packet->ack);
1178 offset =
1179 ((unsigned long long)
1180 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
1181 packet->header[2];
1182 csr = offset - CSR_REGISTER_BASE;
1184 /* Handle config rom reads. */
1185 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1186 handle_local_rom(ctx->ohci, packet, csr);
1187 else switch (csr) {
1188 case CSR_BUS_MANAGER_ID:
1189 case CSR_BANDWIDTH_AVAILABLE:
1190 case CSR_CHANNELS_AVAILABLE_HI:
1191 case CSR_CHANNELS_AVAILABLE_LO:
1192 handle_local_lock(ctx->ohci, packet, csr);
1193 break;
1194 default:
1195 if (ctx == &ctx->ohci->at_request_ctx)
1196 fw_core_handle_request(&ctx->ohci->card, packet);
1197 else
1198 fw_core_handle_response(&ctx->ohci->card, packet);
1199 break;
1202 if (ctx == &ctx->ohci->at_response_ctx) {
1203 packet->ack = ACK_COMPLETE;
1204 packet->callback(packet, &ctx->ohci->card, packet->ack);
1208 static void
1209 at_context_transmit(struct context *ctx, struct fw_packet *packet)
1211 unsigned long flags;
1212 int retval;
1214 spin_lock_irqsave(&ctx->ohci->lock, flags);
1216 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
1217 ctx->ohci->generation == packet->generation) {
1218 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1219 handle_local_request(ctx, packet);
1220 return;
1223 retval = at_context_queue_packet(ctx, packet);
1224 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1226 if (retval < 0)
1227 packet->callback(packet, &ctx->ohci->card, packet->ack);
1231 static void bus_reset_tasklet(unsigned long data)
1233 struct fw_ohci *ohci = (struct fw_ohci *)data;
1234 int self_id_count, i, j, reg;
1235 int generation, new_generation;
1236 unsigned long flags;
1237 void *free_rom = NULL;
1238 dma_addr_t free_rom_bus = 0;
1240 reg = reg_read(ohci, OHCI1394_NodeID);
1241 if (!(reg & OHCI1394_NodeID_idValid)) {
1242 fw_notify("node ID not valid, new bus reset in progress\n");
1243 return;
1245 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1246 fw_notify("malconfigured bus\n");
1247 return;
1249 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1250 OHCI1394_NodeID_nodeNumber);
1252 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1253 if (reg & OHCI1394_SelfIDCount_selfIDError) {
1254 fw_notify("inconsistent self IDs\n");
1255 return;
1258 * The count in the SelfIDCount register is the number of
1259 * bytes in the self ID receive buffer. Since we also receive
1260 * the inverted quadlets and a header quadlet, we shift one
1261 * bit extra to get the actual number of self IDs.
1263 self_id_count = (reg >> 3) & 0x3ff;
1264 if (self_id_count == 0) {
1265 fw_notify("inconsistent self IDs\n");
1266 return;
1268 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
1269 rmb();
1271 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1272 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
1273 fw_notify("inconsistent self IDs\n");
1274 return;
1276 ohci->self_id_buffer[j] =
1277 cond_le32_to_cpu(ohci->self_id_cpu[i]);
1279 rmb();
1282 * Check the consistency of the self IDs we just read. The
1283 * problem we face is that a new bus reset can start while we
1284 * read out the self IDs from the DMA buffer. If this happens,
1285 * the DMA buffer will be overwritten with new self IDs and we
1286 * will read out inconsistent data. The OHCI specification
1287 * (section 11.2) recommends a technique similar to
1288 * linux/seqlock.h, where we remember the generation of the
1289 * self IDs in the buffer before reading them out and compare
1290 * it to the current generation after reading them out. If
1291 * the two generations match we know we have a consistent set
1292 * of self IDs.
1295 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
1296 if (new_generation != generation) {
1297 fw_notify("recursive bus reset detected, "
1298 "discarding self ids\n");
1299 return;
1302 /* FIXME: Document how the locking works. */
1303 spin_lock_irqsave(&ohci->lock, flags);
1305 ohci->generation = generation;
1306 context_stop(&ohci->at_request_ctx);
1307 context_stop(&ohci->at_response_ctx);
1308 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1310 if (ohci->bus_reset_packet_quirk)
1311 ohci->request_generation = generation;
1314 * This next bit is unrelated to the AT context stuff but we
1315 * have to do it under the spinlock also. If a new config rom
1316 * was set up before this reset, the old one is now no longer
1317 * in use and we can free it. Update the config rom pointers
1318 * to point to the current config rom and clear the
1319 * next_config_rom pointer so a new udpate can take place.
1322 if (ohci->next_config_rom != NULL) {
1323 if (ohci->next_config_rom != ohci->config_rom) {
1324 free_rom = ohci->config_rom;
1325 free_rom_bus = ohci->config_rom_bus;
1327 ohci->config_rom = ohci->next_config_rom;
1328 ohci->config_rom_bus = ohci->next_config_rom_bus;
1329 ohci->next_config_rom = NULL;
1332 * Restore config_rom image and manually update
1333 * config_rom registers. Writing the header quadlet
1334 * will indicate that the config rom is ready, so we
1335 * do that last.
1337 reg_write(ohci, OHCI1394_BusOptions,
1338 be32_to_cpu(ohci->config_rom[2]));
1339 ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
1340 reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
1343 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1344 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
1345 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
1346 #endif
1348 spin_unlock_irqrestore(&ohci->lock, flags);
1350 if (free_rom)
1351 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1352 free_rom, free_rom_bus);
1354 log_selfids(ohci->node_id, generation,
1355 self_id_count, ohci->self_id_buffer);
1357 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
1358 self_id_count, ohci->self_id_buffer);
1361 static irqreturn_t irq_handler(int irq, void *data)
1363 struct fw_ohci *ohci = data;
1364 u32 event, iso_event, cycle_time;
1365 int i;
1367 event = reg_read(ohci, OHCI1394_IntEventClear);
1369 if (!event || !~event)
1370 return IRQ_NONE;
1372 /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
1373 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
1374 log_irqs(event);
1376 if (event & OHCI1394_selfIDComplete)
1377 tasklet_schedule(&ohci->bus_reset_tasklet);
1379 if (event & OHCI1394_RQPkt)
1380 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
1382 if (event & OHCI1394_RSPkt)
1383 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
1385 if (event & OHCI1394_reqTxComplete)
1386 tasklet_schedule(&ohci->at_request_ctx.tasklet);
1388 if (event & OHCI1394_respTxComplete)
1389 tasklet_schedule(&ohci->at_response_ctx.tasklet);
1391 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
1392 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
1394 while (iso_event) {
1395 i = ffs(iso_event) - 1;
1396 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
1397 iso_event &= ~(1 << i);
1400 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
1401 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
1403 while (iso_event) {
1404 i = ffs(iso_event) - 1;
1405 tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
1406 iso_event &= ~(1 << i);
1409 if (unlikely(event & OHCI1394_regAccessFail))
1410 fw_error("Register access failure - "
1411 "please notify linux1394-devel@lists.sf.net\n");
1413 if (unlikely(event & OHCI1394_postedWriteErr))
1414 fw_error("PCI posted write error\n");
1416 if (unlikely(event & OHCI1394_cycleTooLong)) {
1417 if (printk_ratelimit())
1418 fw_notify("isochronous cycle too long\n");
1419 reg_write(ohci, OHCI1394_LinkControlSet,
1420 OHCI1394_LinkControl_cycleMaster);
1423 if (event & OHCI1394_cycle64Seconds) {
1424 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1425 if ((cycle_time & 0x80000000) == 0)
1426 ohci->bus_seconds++;
1429 return IRQ_HANDLED;
1432 static int software_reset(struct fw_ohci *ohci)
1434 int i;
1436 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
1438 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
1439 if ((reg_read(ohci, OHCI1394_HCControlSet) &
1440 OHCI1394_HCControl_softReset) == 0)
1441 return 0;
1442 msleep(1);
1445 return -EBUSY;
1448 static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1450 struct fw_ohci *ohci = fw_ohci(card);
1451 struct pci_dev *dev = to_pci_dev(card->device);
1452 u32 lps;
1453 int i;
1455 if (software_reset(ohci)) {
1456 fw_error("Failed to reset ohci card.\n");
1457 return -EBUSY;
1461 * Now enable LPS, which we need in order to start accessing
1462 * most of the registers. In fact, on some cards (ALI M5251),
1463 * accessing registers in the SClk domain without LPS enabled
1464 * will lock up the machine. Wait 50msec to make sure we have
1465 * full link enabled. However, with some cards (well, at least
1466 * a JMicron PCIe card), we have to try again sometimes.
1468 reg_write(ohci, OHCI1394_HCControlSet,
1469 OHCI1394_HCControl_LPS |
1470 OHCI1394_HCControl_postedWriteEnable);
1471 flush_writes(ohci);
1473 for (lps = 0, i = 0; !lps && i < 3; i++) {
1474 msleep(50);
1475 lps = reg_read(ohci, OHCI1394_HCControlSet) &
1476 OHCI1394_HCControl_LPS;
1479 if (!lps) {
1480 fw_error("Failed to set Link Power Status\n");
1481 return -EIO;
1484 reg_write(ohci, OHCI1394_HCControlClear,
1485 OHCI1394_HCControl_noByteSwapData);
1487 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
1488 reg_write(ohci, OHCI1394_LinkControlClear,
1489 OHCI1394_LinkControl_rcvPhyPkt);
1490 reg_write(ohci, OHCI1394_LinkControlSet,
1491 OHCI1394_LinkControl_rcvSelfID |
1492 OHCI1394_LinkControl_cycleTimerEnable |
1493 OHCI1394_LinkControl_cycleMaster);
1495 reg_write(ohci, OHCI1394_ATRetries,
1496 OHCI1394_MAX_AT_REQ_RETRIES |
1497 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
1498 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
1500 ar_context_run(&ohci->ar_request_ctx);
1501 ar_context_run(&ohci->ar_response_ctx);
1503 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
1504 reg_write(ohci, OHCI1394_IntEventClear, ~0);
1505 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1506 reg_write(ohci, OHCI1394_IntMaskSet,
1507 OHCI1394_selfIDComplete |
1508 OHCI1394_RQPkt | OHCI1394_RSPkt |
1509 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1510 OHCI1394_isochRx | OHCI1394_isochTx |
1511 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
1512 OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
1513 OHCI1394_masterIntEnable);
1514 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
1515 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
1517 /* Activate link_on bit and contender bit in our self ID packets.*/
1518 if (ohci_update_phy_reg(card, 4, 0,
1519 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
1520 return -EIO;
1523 * When the link is not yet enabled, the atomic config rom
1524 * update mechanism described below in ohci_set_config_rom()
1525 * is not active. We have to update ConfigRomHeader and
1526 * BusOptions manually, and the write to ConfigROMmap takes
1527 * effect immediately. We tie this to the enabling of the
1528 * link, so we have a valid config rom before enabling - the
1529 * OHCI requires that ConfigROMhdr and BusOptions have valid
1530 * values before enabling.
1532 * However, when the ConfigROMmap is written, some controllers
1533 * always read back quadlets 0 and 2 from the config rom to
1534 * the ConfigRomHeader and BusOptions registers on bus reset.
1535 * They shouldn't do that in this initial case where the link
1536 * isn't enabled. This means we have to use the same
1537 * workaround here, setting the bus header to 0 and then write
1538 * the right values in the bus reset tasklet.
1541 if (config_rom) {
1542 ohci->next_config_rom =
1543 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1544 &ohci->next_config_rom_bus,
1545 GFP_KERNEL);
1546 if (ohci->next_config_rom == NULL)
1547 return -ENOMEM;
1549 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1550 fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
1551 } else {
1553 * In the suspend case, config_rom is NULL, which
1554 * means that we just reuse the old config rom.
1556 ohci->next_config_rom = ohci->config_rom;
1557 ohci->next_config_rom_bus = ohci->config_rom_bus;
1560 ohci->next_header = be32_to_cpu(ohci->next_config_rom[0]);
1561 ohci->next_config_rom[0] = 0;
1562 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
1563 reg_write(ohci, OHCI1394_BusOptions,
1564 be32_to_cpu(ohci->next_config_rom[2]));
1565 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
1567 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
1569 if (request_irq(dev->irq, irq_handler,
1570 IRQF_SHARED, ohci_driver_name, ohci)) {
1571 fw_error("Failed to allocate shared interrupt %d.\n",
1572 dev->irq);
1573 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1574 ohci->config_rom, ohci->config_rom_bus);
1575 return -EIO;
1578 reg_write(ohci, OHCI1394_HCControlSet,
1579 OHCI1394_HCControl_linkEnable |
1580 OHCI1394_HCControl_BIBimageValid);
1581 flush_writes(ohci);
1584 * We are ready to go, initiate bus reset to finish the
1585 * initialization.
1588 fw_core_initiate_bus_reset(&ohci->card, 1);
1590 return 0;
1593 static int
1594 ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1596 struct fw_ohci *ohci;
1597 unsigned long flags;
1598 int retval = -EBUSY;
1599 __be32 *next_config_rom;
1600 dma_addr_t uninitialized_var(next_config_rom_bus);
1602 ohci = fw_ohci(card);
1605 * When the OHCI controller is enabled, the config rom update
1606 * mechanism is a bit tricky, but easy enough to use. See
1607 * section 5.5.6 in the OHCI specification.
1609 * The OHCI controller caches the new config rom address in a
1610 * shadow register (ConfigROMmapNext) and needs a bus reset
1611 * for the changes to take place. When the bus reset is
1612 * detected, the controller loads the new values for the
1613 * ConfigRomHeader and BusOptions registers from the specified
1614 * config rom and loads ConfigROMmap from the ConfigROMmapNext
1615 * shadow register. All automatically and atomically.
1617 * Now, there's a twist to this story. The automatic load of
1618 * ConfigRomHeader and BusOptions doesn't honor the
1619 * noByteSwapData bit, so with a be32 config rom, the
1620 * controller will load be32 values in to these registers
1621 * during the atomic update, even on litte endian
1622 * architectures. The workaround we use is to put a 0 in the
1623 * header quadlet; 0 is endian agnostic and means that the
1624 * config rom isn't ready yet. In the bus reset tasklet we
1625 * then set up the real values for the two registers.
1627 * We use ohci->lock to avoid racing with the code that sets
1628 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
1631 next_config_rom =
1632 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1633 &next_config_rom_bus, GFP_KERNEL);
1634 if (next_config_rom == NULL)
1635 return -ENOMEM;
1637 spin_lock_irqsave(&ohci->lock, flags);
1639 if (ohci->next_config_rom == NULL) {
1640 ohci->next_config_rom = next_config_rom;
1641 ohci->next_config_rom_bus = next_config_rom_bus;
1643 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1644 fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
1645 length * 4);
1647 ohci->next_header = config_rom[0];
1648 ohci->next_config_rom[0] = 0;
1650 reg_write(ohci, OHCI1394_ConfigROMmap,
1651 ohci->next_config_rom_bus);
1652 retval = 0;
1655 spin_unlock_irqrestore(&ohci->lock, flags);
1658 * Now initiate a bus reset to have the changes take
1659 * effect. We clean up the old config rom memory and DMA
1660 * mappings in the bus reset tasklet, since the OHCI
1661 * controller could need to access it before the bus reset
1662 * takes effect.
1664 if (retval == 0)
1665 fw_core_initiate_bus_reset(&ohci->card, 1);
1666 else
1667 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1668 next_config_rom, next_config_rom_bus);
1670 return retval;
1673 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
1675 struct fw_ohci *ohci = fw_ohci(card);
1677 at_context_transmit(&ohci->at_request_ctx, packet);
1680 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
1682 struct fw_ohci *ohci = fw_ohci(card);
1684 at_context_transmit(&ohci->at_response_ctx, packet);
1687 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1689 struct fw_ohci *ohci = fw_ohci(card);
1690 struct context *ctx = &ohci->at_request_ctx;
1691 struct driver_data *driver_data = packet->driver_data;
1692 int retval = -ENOENT;
1694 tasklet_disable(&ctx->tasklet);
1696 if (packet->ack != 0)
1697 goto out;
1699 if (packet->payload_bus)
1700 dma_unmap_single(ohci->card.device, packet->payload_bus,
1701 packet->payload_length, DMA_TO_DEVICE);
1703 log_ar_at_event('T', packet->speed, packet->header, 0x20);
1704 driver_data->packet = NULL;
1705 packet->ack = RCODE_CANCELLED;
1706 packet->callback(packet, &ohci->card, packet->ack);
1707 retval = 0;
1709 out:
1710 tasklet_enable(&ctx->tasklet);
1712 return retval;
1715 static int
1716 ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1718 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1719 return 0;
1720 #else
1721 struct fw_ohci *ohci = fw_ohci(card);
1722 unsigned long flags;
1723 int n, retval = 0;
1726 * FIXME: Make sure this bitmask is cleared when we clear the busReset
1727 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
1730 spin_lock_irqsave(&ohci->lock, flags);
1732 if (ohci->generation != generation) {
1733 retval = -ESTALE;
1734 goto out;
1738 * Note, if the node ID contains a non-local bus ID, physical DMA is
1739 * enabled for _all_ nodes on remote buses.
1742 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
1743 if (n < 32)
1744 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
1745 else
1746 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
1748 flush_writes(ohci);
1749 out:
1750 spin_unlock_irqrestore(&ohci->lock, flags);
1751 return retval;
1752 #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
1755 static u64
1756 ohci_get_bus_time(struct fw_card *card)
1758 struct fw_ohci *ohci = fw_ohci(card);
1759 u32 cycle_time;
1760 u64 bus_time;
1762 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1763 bus_time = ((u64) ohci->bus_seconds << 32) | cycle_time;
1765 return bus_time;
1768 static int handle_ir_dualbuffer_packet(struct context *context,
1769 struct descriptor *d,
1770 struct descriptor *last)
1772 struct iso_context *ctx =
1773 container_of(context, struct iso_context, context);
1774 struct db_descriptor *db = (struct db_descriptor *) d;
1775 __le32 *ir_header;
1776 size_t header_length;
1777 void *p, *end;
1778 int i;
1780 if (db->first_res_count != 0 && db->second_res_count != 0) {
1781 if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
1782 /* This descriptor isn't done yet, stop iteration. */
1783 return 0;
1785 ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
1788 header_length = le16_to_cpu(db->first_req_count) -
1789 le16_to_cpu(db->first_res_count);
1791 i = ctx->header_length;
1792 p = db + 1;
1793 end = p + header_length;
1794 while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
1796 * The iso header is byteswapped to little endian by
1797 * the controller, but the remaining header quadlets
1798 * are big endian. We want to present all the headers
1799 * as big endian, so we have to swap the first
1800 * quadlet.
1802 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1803 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1804 i += ctx->base.header_size;
1805 ctx->excess_bytes +=
1806 (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
1807 p += ctx->base.header_size + 4;
1809 ctx->header_length = i;
1811 ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
1812 le16_to_cpu(db->second_res_count);
1814 if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
1815 ir_header = (__le32 *) (db + 1);
1816 ctx->base.callback(&ctx->base,
1817 le32_to_cpu(ir_header[0]) & 0xffff,
1818 ctx->header_length, ctx->header,
1819 ctx->base.callback_data);
1820 ctx->header_length = 0;
1823 return 1;
1826 static int handle_ir_packet_per_buffer(struct context *context,
1827 struct descriptor *d,
1828 struct descriptor *last)
1830 struct iso_context *ctx =
1831 container_of(context, struct iso_context, context);
1832 struct descriptor *pd;
1833 __le32 *ir_header;
1834 void *p;
1835 int i;
1837 for (pd = d; pd <= last; pd++) {
1838 if (pd->transfer_status)
1839 break;
1841 if (pd > last)
1842 /* Descriptor(s) not done yet, stop iteration */
1843 return 0;
1845 i = ctx->header_length;
1846 p = last + 1;
1848 if (ctx->base.header_size > 0 &&
1849 i + ctx->base.header_size <= PAGE_SIZE) {
1851 * The iso header is byteswapped to little endian by
1852 * the controller, but the remaining header quadlets
1853 * are big endian. We want to present all the headers
1854 * as big endian, so we have to swap the first quadlet.
1856 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1857 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1858 ctx->header_length += ctx->base.header_size;
1861 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
1862 ir_header = (__le32 *) p;
1863 ctx->base.callback(&ctx->base,
1864 le32_to_cpu(ir_header[0]) & 0xffff,
1865 ctx->header_length, ctx->header,
1866 ctx->base.callback_data);
1867 ctx->header_length = 0;
1870 return 1;
1873 static int handle_it_packet(struct context *context,
1874 struct descriptor *d,
1875 struct descriptor *last)
1877 struct iso_context *ctx =
1878 container_of(context, struct iso_context, context);
1880 if (last->transfer_status == 0)
1881 /* This descriptor isn't done yet, stop iteration. */
1882 return 0;
1884 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
1885 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
1886 0, NULL, ctx->base.callback_data);
1888 return 1;
1891 static struct fw_iso_context *
1892 ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1894 struct fw_ohci *ohci = fw_ohci(card);
1895 struct iso_context *ctx, *list;
1896 descriptor_callback_t callback;
1897 u32 *mask, regs;
1898 unsigned long flags;
1899 int index, retval = -ENOMEM;
1901 if (type == FW_ISO_CONTEXT_TRANSMIT) {
1902 mask = &ohci->it_context_mask;
1903 list = ohci->it_context_list;
1904 callback = handle_it_packet;
1905 } else {
1906 mask = &ohci->ir_context_mask;
1907 list = ohci->ir_context_list;
1908 if (ohci->use_dualbuffer)
1909 callback = handle_ir_dualbuffer_packet;
1910 else
1911 callback = handle_ir_packet_per_buffer;
1914 spin_lock_irqsave(&ohci->lock, flags);
1915 index = ffs(*mask) - 1;
1916 if (index >= 0)
1917 *mask &= ~(1 << index);
1918 spin_unlock_irqrestore(&ohci->lock, flags);
1920 if (index < 0)
1921 return ERR_PTR(-EBUSY);
1923 if (type == FW_ISO_CONTEXT_TRANSMIT)
1924 regs = OHCI1394_IsoXmitContextBase(index);
1925 else
1926 regs = OHCI1394_IsoRcvContextBase(index);
1928 ctx = &list[index];
1929 memset(ctx, 0, sizeof(*ctx));
1930 ctx->header_length = 0;
1931 ctx->header = (void *) __get_free_page(GFP_KERNEL);
1932 if (ctx->header == NULL)
1933 goto out;
1935 retval = context_init(&ctx->context, ohci, regs, callback);
1936 if (retval < 0)
1937 goto out_with_header;
1939 return &ctx->base;
1941 out_with_header:
1942 free_page((unsigned long)ctx->header);
1943 out:
1944 spin_lock_irqsave(&ohci->lock, flags);
1945 *mask |= 1 << index;
1946 spin_unlock_irqrestore(&ohci->lock, flags);
1948 return ERR_PTR(retval);
1951 static int ohci_start_iso(struct fw_iso_context *base,
1952 s32 cycle, u32 sync, u32 tags)
1954 struct iso_context *ctx = container_of(base, struct iso_context, base);
1955 struct fw_ohci *ohci = ctx->context.ohci;
1956 u32 control, match;
1957 int index;
1959 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1960 index = ctx - ohci->it_context_list;
1961 match = 0;
1962 if (cycle >= 0)
1963 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
1964 (cycle & 0x7fff) << 16;
1966 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
1967 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
1968 context_run(&ctx->context, match);
1969 } else {
1970 index = ctx - ohci->ir_context_list;
1971 control = IR_CONTEXT_ISOCH_HEADER;
1972 if (ohci->use_dualbuffer)
1973 control |= IR_CONTEXT_DUAL_BUFFER_MODE;
1974 match = (tags << 28) | (sync << 8) | ctx->base.channel;
1975 if (cycle >= 0) {
1976 match |= (cycle & 0x07fff) << 12;
1977 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
1980 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
1981 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
1982 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
1983 context_run(&ctx->context, control);
1986 return 0;
1989 static int ohci_stop_iso(struct fw_iso_context *base)
1991 struct fw_ohci *ohci = fw_ohci(base->card);
1992 struct iso_context *ctx = container_of(base, struct iso_context, base);
1993 int index;
1995 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1996 index = ctx - ohci->it_context_list;
1997 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
1998 } else {
1999 index = ctx - ohci->ir_context_list;
2000 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
2002 flush_writes(ohci);
2003 context_stop(&ctx->context);
2005 return 0;
2008 static void ohci_free_iso_context(struct fw_iso_context *base)
2010 struct fw_ohci *ohci = fw_ohci(base->card);
2011 struct iso_context *ctx = container_of(base, struct iso_context, base);
2012 unsigned long flags;
2013 int index;
2015 ohci_stop_iso(base);
2016 context_release(&ctx->context);
2017 free_page((unsigned long)ctx->header);
2019 spin_lock_irqsave(&ohci->lock, flags);
2021 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
2022 index = ctx - ohci->it_context_list;
2023 ohci->it_context_mask |= 1 << index;
2024 } else {
2025 index = ctx - ohci->ir_context_list;
2026 ohci->ir_context_mask |= 1 << index;
2029 spin_unlock_irqrestore(&ohci->lock, flags);
2032 static int
2033 ohci_queue_iso_transmit(struct fw_iso_context *base,
2034 struct fw_iso_packet *packet,
2035 struct fw_iso_buffer *buffer,
2036 unsigned long payload)
2038 struct iso_context *ctx = container_of(base, struct iso_context, base);
2039 struct descriptor *d, *last, *pd;
2040 struct fw_iso_packet *p;
2041 __le32 *header;
2042 dma_addr_t d_bus, page_bus;
2043 u32 z, header_z, payload_z, irq;
2044 u32 payload_index, payload_end_index, next_page_index;
2045 int page, end_page, i, length, offset;
2048 * FIXME: Cycle lost behavior should be configurable: lose
2049 * packet, retransmit or terminate..
2052 p = packet;
2053 payload_index = payload;
2055 if (p->skip)
2056 z = 1;
2057 else
2058 z = 2;
2059 if (p->header_length > 0)
2060 z++;
2062 /* Determine the first page the payload isn't contained in. */
2063 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
2064 if (p->payload_length > 0)
2065 payload_z = end_page - (payload_index >> PAGE_SHIFT);
2066 else
2067 payload_z = 0;
2069 z += payload_z;
2071 /* Get header size in number of descriptors. */
2072 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
2074 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
2075 if (d == NULL)
2076 return -ENOMEM;
2078 if (!p->skip) {
2079 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
2080 d[0].req_count = cpu_to_le16(8);
2082 header = (__le32 *) &d[1];
2083 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
2084 IT_HEADER_TAG(p->tag) |
2085 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
2086 IT_HEADER_CHANNEL(ctx->base.channel) |
2087 IT_HEADER_SPEED(ctx->base.speed));
2088 header[1] =
2089 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
2090 p->payload_length));
2093 if (p->header_length > 0) {
2094 d[2].req_count = cpu_to_le16(p->header_length);
2095 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
2096 memcpy(&d[z], p->header, p->header_length);
2099 pd = d + z - payload_z;
2100 payload_end_index = payload_index + p->payload_length;
2101 for (i = 0; i < payload_z; i++) {
2102 page = payload_index >> PAGE_SHIFT;
2103 offset = payload_index & ~PAGE_MASK;
2104 next_page_index = (page + 1) << PAGE_SHIFT;
2105 length =
2106 min(next_page_index, payload_end_index) - payload_index;
2107 pd[i].req_count = cpu_to_le16(length);
2109 page_bus = page_private(buffer->pages[page]);
2110 pd[i].data_address = cpu_to_le32(page_bus + offset);
2112 payload_index += length;
2115 if (p->interrupt)
2116 irq = DESCRIPTOR_IRQ_ALWAYS;
2117 else
2118 irq = DESCRIPTOR_NO_IRQ;
2120 last = z == 2 ? d : d + z - 1;
2121 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
2122 DESCRIPTOR_STATUS |
2123 DESCRIPTOR_BRANCH_ALWAYS |
2124 irq);
2126 context_append(&ctx->context, d, z, header_z);
2128 return 0;
2131 static int
2132 ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2133 struct fw_iso_packet *packet,
2134 struct fw_iso_buffer *buffer,
2135 unsigned long payload)
2137 struct iso_context *ctx = container_of(base, struct iso_context, base);
2138 struct db_descriptor *db = NULL;
2139 struct descriptor *d;
2140 struct fw_iso_packet *p;
2141 dma_addr_t d_bus, page_bus;
2142 u32 z, header_z, length, rest;
2143 int page, offset, packet_count, header_size;
2146 * FIXME: Cycle lost behavior should be configurable: lose
2147 * packet, retransmit or terminate..
2150 p = packet;
2151 z = 2;
2154 * The OHCI controller puts the status word in the header
2155 * buffer too, so we need 4 extra bytes per packet.
2157 packet_count = p->header_length / ctx->base.header_size;
2158 header_size = packet_count * (ctx->base.header_size + 4);
2160 /* Get header size in number of descriptors. */
2161 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
2162 page = payload >> PAGE_SHIFT;
2163 offset = payload & ~PAGE_MASK;
2164 rest = p->payload_length;
2166 /* FIXME: make packet-per-buffer/dual-buffer a context option */
2167 while (rest > 0) {
2168 d = context_get_descriptors(&ctx->context,
2169 z + header_z, &d_bus);
2170 if (d == NULL)
2171 return -ENOMEM;
2173 db = (struct db_descriptor *) d;
2174 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
2175 DESCRIPTOR_BRANCH_ALWAYS);
2176 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
2177 if (p->skip && rest == p->payload_length) {
2178 db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2179 db->first_req_count = db->first_size;
2180 } else {
2181 db->first_req_count = cpu_to_le16(header_size);
2183 db->first_res_count = db->first_req_count;
2184 db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
2186 if (p->skip && rest == p->payload_length)
2187 length = 4;
2188 else if (offset + rest < PAGE_SIZE)
2189 length = rest;
2190 else
2191 length = PAGE_SIZE - offset;
2193 db->second_req_count = cpu_to_le16(length);
2194 db->second_res_count = db->second_req_count;
2195 page_bus = page_private(buffer->pages[page]);
2196 db->second_buffer = cpu_to_le32(page_bus + offset);
2198 if (p->interrupt && length == rest)
2199 db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2201 context_append(&ctx->context, d, z, header_z);
2202 offset = (offset + length) & ~PAGE_MASK;
2203 rest -= length;
2204 if (offset == 0)
2205 page++;
2208 return 0;
2211 static int
2212 ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2213 struct fw_iso_packet *packet,
2214 struct fw_iso_buffer *buffer,
2215 unsigned long payload)
2217 struct iso_context *ctx = container_of(base, struct iso_context, base);
2218 struct descriptor *d = NULL, *pd = NULL;
2219 struct fw_iso_packet *p = packet;
2220 dma_addr_t d_bus, page_bus;
2221 u32 z, header_z, rest;
2222 int i, j, length;
2223 int page, offset, packet_count, header_size, payload_per_buffer;
2226 * The OHCI controller puts the status word in the
2227 * buffer too, so we need 4 extra bytes per packet.
2229 packet_count = p->header_length / ctx->base.header_size;
2230 header_size = ctx->base.header_size + 4;
2232 /* Get header size in number of descriptors. */
2233 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
2234 page = payload >> PAGE_SHIFT;
2235 offset = payload & ~PAGE_MASK;
2236 payload_per_buffer = p->payload_length / packet_count;
2238 for (i = 0; i < packet_count; i++) {
2239 /* d points to the header descriptor */
2240 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
2241 d = context_get_descriptors(&ctx->context,
2242 z + header_z, &d_bus);
2243 if (d == NULL)
2244 return -ENOMEM;
2246 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
2247 DESCRIPTOR_INPUT_MORE);
2248 if (p->skip && i == 0)
2249 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2250 d->req_count = cpu_to_le16(header_size);
2251 d->res_count = d->req_count;
2252 d->transfer_status = 0;
2253 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
2255 rest = payload_per_buffer;
2256 for (j = 1; j < z; j++) {
2257 pd = d + j;
2258 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
2259 DESCRIPTOR_INPUT_MORE);
2261 if (offset + rest < PAGE_SIZE)
2262 length = rest;
2263 else
2264 length = PAGE_SIZE - offset;
2265 pd->req_count = cpu_to_le16(length);
2266 pd->res_count = pd->req_count;
2267 pd->transfer_status = 0;
2269 page_bus = page_private(buffer->pages[page]);
2270 pd->data_address = cpu_to_le32(page_bus + offset);
2272 offset = (offset + length) & ~PAGE_MASK;
2273 rest -= length;
2274 if (offset == 0)
2275 page++;
2277 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
2278 DESCRIPTOR_INPUT_LAST |
2279 DESCRIPTOR_BRANCH_ALWAYS);
2280 if (p->interrupt && i == packet_count - 1)
2281 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2283 context_append(&ctx->context, d, z, header_z);
2286 return 0;
2289 static int
2290 ohci_queue_iso(struct fw_iso_context *base,
2291 struct fw_iso_packet *packet,
2292 struct fw_iso_buffer *buffer,
2293 unsigned long payload)
2295 struct iso_context *ctx = container_of(base, struct iso_context, base);
2296 unsigned long flags;
2297 int retval;
2299 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
2300 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
2301 retval = ohci_queue_iso_transmit(base, packet, buffer, payload);
2302 else if (ctx->context.ohci->use_dualbuffer)
2303 retval = ohci_queue_iso_receive_dualbuffer(base, packet,
2304 buffer, payload);
2305 else
2306 retval = ohci_queue_iso_receive_packet_per_buffer(base, packet,
2307 buffer,
2308 payload);
2309 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
2311 return retval;
2314 static const struct fw_card_driver ohci_driver = {
2315 .enable = ohci_enable,
2316 .update_phy_reg = ohci_update_phy_reg,
2317 .set_config_rom = ohci_set_config_rom,
2318 .send_request = ohci_send_request,
2319 .send_response = ohci_send_response,
2320 .cancel_packet = ohci_cancel_packet,
2321 .enable_phys_dma = ohci_enable_phys_dma,
2322 .get_bus_time = ohci_get_bus_time,
2324 .allocate_iso_context = ohci_allocate_iso_context,
2325 .free_iso_context = ohci_free_iso_context,
2326 .queue_iso = ohci_queue_iso,
2327 .start_iso = ohci_start_iso,
2328 .stop_iso = ohci_stop_iso,
2331 #ifdef CONFIG_PPC_PMAC
2332 static void ohci_pmac_on(struct pci_dev *dev)
2334 if (machine_is(powermac)) {
2335 struct device_node *ofn = pci_device_to_OF_node(dev);
2337 if (ofn) {
2338 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
2339 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
2344 static void ohci_pmac_off(struct pci_dev *dev)
2346 if (machine_is(powermac)) {
2347 struct device_node *ofn = pci_device_to_OF_node(dev);
2349 if (ofn) {
2350 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
2351 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
2355 #else
2356 #define ohci_pmac_on(dev)
2357 #define ohci_pmac_off(dev)
2358 #endif /* CONFIG_PPC_PMAC */
2360 static int __devinit
2361 pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2363 struct fw_ohci *ohci;
2364 u32 bus_options, max_receive, link_speed, version;
2365 u64 guid;
2366 int err;
2367 size_t size;
2369 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
2370 if (ohci == NULL) {
2371 err = -ENOMEM;
2372 goto fail;
2375 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
2377 ohci_pmac_on(dev);
2379 err = pci_enable_device(dev);
2380 if (err) {
2381 fw_error("Failed to enable OHCI hardware\n");
2382 goto fail_free;
2385 pci_set_master(dev);
2386 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
2387 pci_set_drvdata(dev, ohci);
2389 spin_lock_init(&ohci->lock);
2391 tasklet_init(&ohci->bus_reset_tasklet,
2392 bus_reset_tasklet, (unsigned long)ohci);
2394 err = pci_request_region(dev, 0, ohci_driver_name);
2395 if (err) {
2396 fw_error("MMIO resource unavailable\n");
2397 goto fail_disable;
2400 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
2401 if (ohci->registers == NULL) {
2402 fw_error("Failed to remap registers\n");
2403 err = -ENXIO;
2404 goto fail_iomem;
2407 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2408 ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
2410 /* x86-32 currently doesn't use highmem for dma_alloc_coherent */
2411 #if !defined(CONFIG_X86_32)
2412 /* dual-buffer mode is broken with descriptor addresses above 2G */
2413 if (dev->vendor == PCI_VENDOR_ID_TI &&
2414 dev->device == PCI_DEVICE_ID_TI_TSB43AB22)
2415 ohci->use_dualbuffer = false;
2416 #endif
2418 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
2419 ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
2420 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
2421 #endif
2422 ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
2424 ar_context_init(&ohci->ar_request_ctx, ohci,
2425 OHCI1394_AsReqRcvContextControlSet);
2427 ar_context_init(&ohci->ar_response_ctx, ohci,
2428 OHCI1394_AsRspRcvContextControlSet);
2430 context_init(&ohci->at_request_ctx, ohci,
2431 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
2433 context_init(&ohci->at_response_ctx, ohci,
2434 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
2436 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
2437 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
2438 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
2439 size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
2440 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
2442 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
2443 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2444 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
2445 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
2446 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
2448 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
2449 err = -ENOMEM;
2450 goto fail_contexts;
2453 /* self-id dma buffer allocation */
2454 ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
2455 SELF_ID_BUF_SIZE,
2456 &ohci->self_id_bus,
2457 GFP_KERNEL);
2458 if (ohci->self_id_cpu == NULL) {
2459 err = -ENOMEM;
2460 goto fail_contexts;
2463 bus_options = reg_read(ohci, OHCI1394_BusOptions);
2464 max_receive = (bus_options >> 12) & 0xf;
2465 link_speed = bus_options & 0x7;
2466 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
2467 reg_read(ohci, OHCI1394_GUIDLo);
2469 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
2470 if (err < 0)
2471 goto fail_self_id;
2473 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
2474 dev_name(&dev->dev), version >> 16, version & 0xff);
2475 return 0;
2477 fail_self_id:
2478 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
2479 ohci->self_id_cpu, ohci->self_id_bus);
2480 fail_contexts:
2481 kfree(ohci->ir_context_list);
2482 kfree(ohci->it_context_list);
2483 context_release(&ohci->at_response_ctx);
2484 context_release(&ohci->at_request_ctx);
2485 ar_context_release(&ohci->ar_response_ctx);
2486 ar_context_release(&ohci->ar_request_ctx);
2487 pci_iounmap(dev, ohci->registers);
2488 fail_iomem:
2489 pci_release_region(dev, 0);
2490 fail_disable:
2491 pci_disable_device(dev);
2492 fail_free:
2493 kfree(&ohci->card);
2494 ohci_pmac_off(dev);
2495 fail:
2496 if (err == -ENOMEM)
2497 fw_error("Out of memory\n");
2499 return err;
2502 static void pci_remove(struct pci_dev *dev)
2504 struct fw_ohci *ohci;
2506 ohci = pci_get_drvdata(dev);
2507 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2508 flush_writes(ohci);
2509 fw_core_remove_card(&ohci->card);
2512 * FIXME: Fail all pending packets here, now that the upper
2513 * layers can't queue any more.
2516 software_reset(ohci);
2517 free_irq(dev->irq, ohci);
2519 if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
2520 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2521 ohci->next_config_rom, ohci->next_config_rom_bus);
2522 if (ohci->config_rom)
2523 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2524 ohci->config_rom, ohci->config_rom_bus);
2525 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
2526 ohci->self_id_cpu, ohci->self_id_bus);
2527 ar_context_release(&ohci->ar_request_ctx);
2528 ar_context_release(&ohci->ar_response_ctx);
2529 context_release(&ohci->at_request_ctx);
2530 context_release(&ohci->at_response_ctx);
2531 kfree(ohci->it_context_list);
2532 kfree(ohci->ir_context_list);
2533 pci_iounmap(dev, ohci->registers);
2534 pci_release_region(dev, 0);
2535 pci_disable_device(dev);
2536 kfree(&ohci->card);
2537 ohci_pmac_off(dev);
2539 fw_notify("Removed fw-ohci device.\n");
2542 #ifdef CONFIG_PM
2543 static int pci_suspend(struct pci_dev *dev, pm_message_t state)
2545 struct fw_ohci *ohci = pci_get_drvdata(dev);
2546 int err;
2548 software_reset(ohci);
2549 free_irq(dev->irq, ohci);
2550 err = pci_save_state(dev);
2551 if (err) {
2552 fw_error("pci_save_state failed\n");
2553 return err;
2555 err = pci_set_power_state(dev, pci_choose_state(dev, state));
2556 if (err)
2557 fw_error("pci_set_power_state failed with %d\n", err);
2558 ohci_pmac_off(dev);
2560 return 0;
2563 static int pci_resume(struct pci_dev *dev)
2565 struct fw_ohci *ohci = pci_get_drvdata(dev);
2566 int err;
2568 ohci_pmac_on(dev);
2569 pci_set_power_state(dev, PCI_D0);
2570 pci_restore_state(dev);
2571 err = pci_enable_device(dev);
2572 if (err) {
2573 fw_error("pci_enable_device failed\n");
2574 return err;
2577 return ohci_enable(&ohci->card, NULL, 0);
2579 #endif
2581 static struct pci_device_id pci_table[] = {
2582 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
2586 MODULE_DEVICE_TABLE(pci, pci_table);
2588 static struct pci_driver fw_ohci_pci_driver = {
2589 .name = ohci_driver_name,
2590 .id_table = pci_table,
2591 .probe = pci_probe,
2592 .remove = pci_remove,
2593 #ifdef CONFIG_PM
2594 .resume = pci_resume,
2595 .suspend = pci_suspend,
2596 #endif
2599 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
2600 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
2601 MODULE_LICENSE("GPL");
2603 /* Provide a module alias so root-on-sbp2 initrds don't break. */
2604 #ifndef CONFIG_IEEE1394_OHCI1394_MODULE
2605 MODULE_ALIAS("ohci1394");
2606 #endif
2608 static int __init fw_ohci_init(void)
2610 return pci_register_driver(&fw_ohci_pci_driver);
2613 static void __exit fw_ohci_cleanup(void)
2615 pci_unregister_driver(&fw_ohci_pci_driver);
2618 module_init(fw_ohci_init);
2619 module_exit(fw_ohci_cleanup);