This is pre8 ...
[linux-2.6/linux-mips.git] / drivers / ieee1394 / ieee1394_core.c
blobe0641271ba0b22912298111f3583cf28ef1962b0
1 /*
2 * IEEE 1394 for Linux
4 * Core support: hpsb_packet management, packet handling and forwarding to
5 * highlevel or lowlevel code
7 * Copyright (C) 1999, 2000 Andreas E. Bombe
9 * This code is licensed under the GPL. See the file COPYING in the root
10 * directory of the kernel sources for details.
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/string.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/interrupt.h>
20 #include <asm/bitops.h>
21 #include <asm/byteorder.h>
22 #include <asm/semaphore.h>
24 #include "ieee1394_types.h"
25 #include "ieee1394.h"
26 #include "hosts.h"
27 #include "ieee1394_core.h"
28 #include "highlevel.h"
29 #include "ieee1394_transactions.h"
30 #include "csr.h"
31 #include "guid.h"
34 atomic_t hpsb_generation = ATOMIC_INIT(0);
37 static void dump_packet(const char *text, quadlet_t *data, int size)
39 int i;
41 size /= 4;
42 size = (size > 4 ? 4 : size);
44 printk(KERN_DEBUG "ieee1394: %s", text);
45 for (i = 0; i < size; i++) {
46 printk(" %8.8x", data[i]);
48 printk("\n");
52 /**
53 * alloc_hpsb_packet - allocate new packet structure
54 * @data_size: size of the data block to be allocated
56 * This function allocates, initializes and returns a new &struct hpsb_packet.
57 * It can be used in interrupt context. A header block is always included, its
58 * size is big enough to contain all possible 1394 headers. The data block is
59 * only allocated when @data_size is not zero.
61 * For packets for which responses will be received the @data_size has to be big
62 * enough to contain the response's data block since no further allocation
63 * occurs at response matching time.
65 * The packet's generation value will be set to the current generation number
66 * for ease of use. Remember to overwrite it with your own recorded generation
67 * number if you can not be sure that your code will not race with a bus reset.
69 * Return value: A pointer to a &struct hpsb_packet or NULL on allocation
70 * failure.
72 struct hpsb_packet *alloc_hpsb_packet(size_t data_size)
74 struct hpsb_packet *packet = NULL;
75 void *header = NULL, *data = NULL;
76 int kmflags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
78 packet = kmalloc(sizeof(struct hpsb_packet), kmflags);
79 header = kmalloc(5 * 4, kmflags);
80 if (header == NULL || packet == NULL) {
81 kfree(header);
82 kfree(packet);
83 return NULL;
86 memset(packet, 0, sizeof(struct hpsb_packet));
87 packet->header = header;
89 if (data_size) {
90 data = kmalloc(data_size + 8, kmflags);
91 if (data == NULL) {
92 kfree(header);
93 kfree(packet);
94 return NULL;
97 packet->data = data;
98 packet->data_size = data_size;
101 INIT_LIST_HEAD(&packet->list);
102 sema_init(&packet->state_change, 0);
103 packet->state = unused;
104 packet->generation = get_hpsb_generation();
105 packet->data_be = 1;
107 return packet;
112 * free_hpsb_packet - free packet and data associated with it
113 * @packet: packet to free (is NULL safe)
115 * This function will free packet->data, packet->header and finally the packet
116 * itself.
118 void free_hpsb_packet(struct hpsb_packet *packet)
120 if (packet == NULL) {
121 return;
124 kfree(packet->data);
125 kfree(packet->header);
126 kfree(packet);
130 void reset_host_bus(struct hpsb_host *host)
132 if (!host->initialized) {
133 return;
136 hpsb_bus_reset(host);
137 host->template->devctl(host, RESET_BUS, 0);
141 void hpsb_bus_reset(struct hpsb_host *host)
143 if (!host->in_bus_reset) {
144 abort_requests(host);
145 host->in_bus_reset = 1;
146 host->irm_id = -1;
147 host->busmgr_id = -1;
148 host->node_count = 0;
149 host->selfid_count = 0;
150 } else {
151 HPSB_NOTICE(__FUNCTION__
152 " called while bus reset already in progress");
158 * Verify num_of_selfids SelfIDs and return number of nodes. Return zero in
159 * case verification failed.
161 static int check_selfids(struct hpsb_host *host, unsigned int num_of_selfids)
163 int nodeid = -1;
164 int rest_of_selfids = num_of_selfids;
165 struct selfid *sid = (struct selfid *)host->topology_map;
166 struct ext_selfid *esid;
167 int esid_seq = 23;
169 while (rest_of_selfids--) {
170 if (!sid->extended) {
171 nodeid++;
172 esid_seq = 0;
174 if (sid->phy_id != nodeid) {
175 HPSB_INFO("SelfIDs failed monotony check with "
176 "%d", sid->phy_id);
177 return 0;
180 if (sid->contender && sid->link_active) {
181 host->irm_id = LOCAL_BUS | sid->phy_id;
183 } else {
184 esid = (struct ext_selfid *)sid;
186 if ((esid->phy_id != nodeid)
187 || (esid->seq_nr != esid_seq)) {
188 HPSB_INFO("SelfIDs failed monotony check with "
189 "%d/%d", esid->phy_id, esid->seq_nr);
190 return 0;
192 esid_seq++;
194 sid++;
197 esid = (struct ext_selfid *)(sid - 1);
198 while (esid->extended) {
199 if ((esid->porta == 0x2) || (esid->portb == 0x2)
200 || (esid->portc == 0x2) || (esid->portd == 0x2)
201 || (esid->porte == 0x2) || (esid->portf == 0x2)
202 || (esid->portg == 0x2) || (esid->porth == 0x2)) {
203 HPSB_INFO("SelfIDs failed root check on "
204 "extended SelfID");
205 return 0;
207 esid--;
210 sid = (struct selfid *)esid;
211 if ((sid->port0 == 0x2) || (sid->port1 == 0x2) || (sid->port2 == 0x2)) {
212 HPSB_INFO("SelfIDs failed root check");
213 return 0;
216 return nodeid + 1;
219 static void build_speed_map(struct hpsb_host *host, int nodecount)
221 char speedcap[nodecount];
222 char cldcnt[nodecount];
223 u8 *map = host->speed_map;
224 struct selfid *sid;
225 struct ext_selfid *esid;
226 int i, j, n;
228 for (i = 0; i < (nodecount * 64); i += 64) {
229 for (j = 0; j < nodecount; j++) {
230 map[i+j] = SPEED_400;
234 for (i = 0; i < nodecount; i++) {
235 cldcnt[i] = 0;
238 /* find direct children count and speed */
239 for (sid = (struct selfid *)&host->topology_map[host->selfid_count-1],
240 n = nodecount - 1;
241 (void *)sid >= (void *)host->topology_map; sid--) {
242 if (sid->extended) {
243 esid = (struct ext_selfid *)sid;
245 if (esid->porta == 0x3) cldcnt[n]++;
246 if (esid->portb == 0x3) cldcnt[n]++;
247 if (esid->portc == 0x3) cldcnt[n]++;
248 if (esid->portd == 0x3) cldcnt[n]++;
249 if (esid->porte == 0x3) cldcnt[n]++;
250 if (esid->portf == 0x3) cldcnt[n]++;
251 if (esid->portg == 0x3) cldcnt[n]++;
252 if (esid->porth == 0x3) cldcnt[n]++;
253 } else {
254 if (sid->port0 == 0x3) cldcnt[n]++;
255 if (sid->port1 == 0x3) cldcnt[n]++;
256 if (sid->port2 == 0x3) cldcnt[n]++;
258 speedcap[n] = sid->speed;
259 n--;
263 /* set self mapping */
264 for (i = nodecount - 1; i; i--) {
265 map[64*i + i] = speedcap[i];
268 /* fix up direct children count to total children count;
269 * also fix up speedcaps for sibling and parent communication */
270 for (i = 1; i < nodecount; i++) {
271 for (j = cldcnt[i], n = i - 1; j > 0; j--) {
272 cldcnt[i] += cldcnt[n];
273 speedcap[n] = MIN(speedcap[n], speedcap[i]);
274 n -= cldcnt[n] + 1;
278 for (n = 0; n < nodecount; n++) {
279 for (i = n - cldcnt[n]; i <= n; i++) {
280 for (j = 0; j < (n - cldcnt[n]); j++) {
281 map[j*64 + i] = map[i*64 + j] =
282 MIN(map[i*64 + j], speedcap[n]);
284 for (j = n + 1; j < nodecount; j++) {
285 map[j*64 + i] = map[i*64 + j] =
286 MIN(map[i*64 + j], speedcap[n]);
292 void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
294 if (host->in_bus_reset) {
295 HPSB_DEBUG("including selfid 0x%x", sid);
296 host->topology_map[host->selfid_count++] = sid;
297 } else {
298 /* FIXME - info on which host */
299 HPSB_NOTICE("spurious selfid packet (0x%8.8x) received", sid);
303 void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
305 host->node_id = 0xffc0 | phyid;
306 host->in_bus_reset = 0;
307 host->is_root = isroot;
309 host->node_count = check_selfids(host, host->selfid_count);
310 if (!host->node_count) {
311 if (host->reset_retries++ < 20) {
312 /* selfid stage did not complete without error */
313 HPSB_NOTICE("error in SelfID stage - resetting");
314 reset_host_bus(host);
315 return;
316 } else {
317 HPSB_NOTICE("stopping out-of-control reset loop");
318 HPSB_NOTICE("warning - topology map and speed map will "
319 "therefore not be valid");
321 } else {
322 build_speed_map(host, host->node_count);
325 /* irm_id is kept up to date by check_selfids() */
326 if (host->irm_id == host->node_id) {
327 host->is_irm = 1;
328 host->is_busmgr = 1;
329 host->busmgr_id = host->node_id;
330 host->csr.bus_manager_id = host->node_id;
333 host->reset_retries = 0;
334 inc_hpsb_generation();
335 highlevel_host_reset(host);
339 void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
340 int ackcode)
342 unsigned long flags;
344 packet->ack_code = ackcode;
346 if (packet->no_waiter) {
347 /* must not have a tlabel allocated */
348 free_hpsb_packet(packet);
349 return;
352 if (ackcode != ACK_PENDING || !packet->expect_response) {
353 packet->state = complete;
354 up(&packet->state_change);
355 up(&packet->state_change);
356 run_task_queue(&packet->complete_tq);
357 return;
360 packet->state = pending;
361 packet->sendtime = jiffies;
363 spin_lock_irqsave(&host->pending_pkt_lock, flags);
364 list_add_tail(&packet->list, &host->pending_packets);
365 spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
367 up(&packet->state_change);
368 queue_task(&host->timeout_tq, &tq_timer);
372 * hpsb_send_packet - transmit a packet on the bus
373 * @packet: packet to send
375 * The packet is sent through the host specified in the packet->host field.
376 * Before sending, the packet's transmit speed is automatically determined using
377 * the local speed map.
379 * Possibilities for failure are that host is either not initialized, in bus
380 * reset, the packet's generation number doesn't match the current generation
381 * number or the host reports a transmit error.
383 * Return value: False (0) on failure, true (1) otherwise.
385 int hpsb_send_packet(struct hpsb_packet *packet)
387 struct hpsb_host *host = packet->host;
389 if (!host->initialized || host->in_bus_reset
390 || (packet->generation != get_hpsb_generation())) {
391 return 0;
394 packet->state = queued;
395 packet->speed_code = host->speed_map[(host->node_id & NODE_MASK) * 64
396 + (packet->node_id & NODE_MASK)];
398 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
399 switch (packet->speed_code) {
400 case 2:
401 dump_packet("send packet 400:", packet->header,
402 packet->header_size);
403 break;
404 case 1:
405 dump_packet("send packet 200:", packet->header,
406 packet->header_size);
407 break;
408 default:
409 dump_packet("send packet 100:", packet->header,
410 packet->header_size);
412 #endif
414 return host->template->transmit_packet(host, packet);
417 static void send_packet_nocare(struct hpsb_packet *packet)
419 if (!hpsb_send_packet(packet)) {
420 free_hpsb_packet(packet);
425 void handle_packet_response(struct hpsb_host *host, int tcode, quadlet_t *data,
426 size_t size)
428 struct hpsb_packet *packet = NULL;
429 struct list_head *lh;
430 int tcode_match = 0;
431 int tlabel;
432 unsigned long flags;
434 tlabel = (data[0] >> 10) & 0x3f;
436 spin_lock_irqsave(&host->pending_pkt_lock, flags);
438 lh = host->pending_packets.next;
439 while (lh != &host->pending_packets) {
440 packet = list_entry(lh, struct hpsb_packet, list);
441 if ((packet->tlabel == tlabel)
442 && (packet->node_id == (data[1] >> 16))){
443 break;
445 lh = lh->next;
448 if (lh == &host->pending_packets) {
449 HPSB_INFO("unsolicited response packet received - np");
450 dump_packet("contents:", data, 16);
451 spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
452 return;
455 switch (packet->tcode) {
456 case TCODE_WRITEQ:
457 case TCODE_WRITEB:
458 if (tcode == TCODE_WRITE_RESPONSE) tcode_match = 1;
459 break;
460 case TCODE_READQ:
461 if (tcode == TCODE_READQ_RESPONSE) tcode_match = 1;
462 break;
463 case TCODE_READB:
464 if (tcode == TCODE_READB_RESPONSE) tcode_match = 1;
465 break;
466 case TCODE_LOCK_REQUEST:
467 if (tcode == TCODE_LOCK_RESPONSE) tcode_match = 1;
468 break;
471 if (!tcode_match || (packet->tlabel != tlabel)
472 || (packet->node_id != (data[1] >> 16))) {
473 HPSB_INFO("unsolicited response packet received");
474 dump_packet("contents:", data, 16);
476 spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
477 return;
480 list_del(&packet->list);
482 spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
484 /* FIXME - update size fields? */
485 switch (tcode) {
486 case TCODE_WRITE_RESPONSE:
487 memcpy(packet->header, data, 12);
488 break;
489 case TCODE_READQ_RESPONSE:
490 memcpy(packet->header, data, 16);
491 break;
492 case TCODE_READB_RESPONSE:
493 memcpy(packet->header, data, 16);
494 memcpy(packet->data, data + 4, size - 16);
495 break;
496 case TCODE_LOCK_RESPONSE:
497 memcpy(packet->header, data, 16);
498 memcpy(packet->data, data + 4, (size - 16) > 8 ? 8 : size - 16);
499 break;
502 packet->state = complete;
503 up(&packet->state_change);
504 run_task_queue(&packet->complete_tq);
508 struct hpsb_packet *create_reply_packet(struct hpsb_host *host, quadlet_t *data,
509 size_t dsize)
511 struct hpsb_packet *p;
513 dsize += (dsize % 4 ? 4 - (dsize % 4) : 0);
515 p = alloc_hpsb_packet(dsize);
516 if (p == NULL) {
517 /* FIXME - send data_error response */
518 return NULL;
521 p->type = async;
522 p->state = unused;
523 p->host = host;
524 p->node_id = data[1] >> 16;
525 p->tlabel = (data[0] >> 10) & 0x3f;
526 p->no_waiter = 1;
528 if (dsize % 4) {
529 p->data[dsize / 4] = 0;
532 return p;
535 #define PREP_REPLY_PACKET(length) \
536 packet = create_reply_packet(host, data, length); \
537 if (packet == NULL) break
539 void handle_incoming_packet(struct hpsb_host *host, int tcode, quadlet_t *data,
540 size_t size, int write_acked)
542 struct hpsb_packet *packet;
543 int length, rcode, extcode;
544 int source = data[1] >> 16;
545 u64 addr;
547 /* big FIXME - no error checking is done for an out of bounds length */
549 switch (tcode) {
550 case TCODE_WRITEQ:
551 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
552 rcode = highlevel_write(host, source, data+3, addr, 4);
554 if (!write_acked
555 && ((data[0] >> 16) & NODE_MASK) != NODE_MASK) {
556 /* not a broadcast write, reply */
557 PREP_REPLY_PACKET(0);
558 fill_async_write_resp(packet, rcode);
559 send_packet_nocare(packet);
561 break;
563 case TCODE_WRITEB:
564 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
565 rcode = highlevel_write(host, source, data+4, addr,
566 data[3]>>16);
568 if (!write_acked
569 && ((data[0] >> 16) & NODE_MASK) != NODE_MASK) {
570 /* not a broadcast write, reply */
571 PREP_REPLY_PACKET(0);
572 fill_async_write_resp(packet, rcode);
573 send_packet_nocare(packet);
575 break;
577 case TCODE_READQ:
578 PREP_REPLY_PACKET(0);
580 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
581 rcode = highlevel_read(host, source, data, addr, 4);
582 fill_async_readquad_resp(packet, rcode, *data);
583 send_packet_nocare(packet);
584 break;
586 case TCODE_READB:
587 length = data[3] >> 16;
588 PREP_REPLY_PACKET(length);
590 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
591 rcode = highlevel_read(host, source, packet->data, addr,
592 length);
593 fill_async_readblock_resp(packet, rcode, length);
594 send_packet_nocare(packet);
595 break;
597 case TCODE_LOCK_REQUEST:
598 length = data[3] >> 16;
599 extcode = data[3] & 0xffff;
600 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
602 PREP_REPLY_PACKET(8);
604 if ((extcode == 0) || (extcode >= 7)) {
605 /* let switch default handle error */
606 length = 0;
609 switch (length) {
610 case 4:
611 rcode = highlevel_lock(host, source, packet->data, addr,
612 data[4], 0, extcode);
613 fill_async_lock_resp(packet, rcode, extcode, 4);
614 break;
615 case 8:
616 if ((extcode != EXTCODE_FETCH_ADD)
617 && (extcode != EXTCODE_LITTLE_ADD)) {
618 rcode = highlevel_lock(host, source,
619 packet->data, addr,
620 data[5], data[4],
621 extcode);
622 fill_async_lock_resp(packet, rcode, extcode, 4);
623 } else {
624 rcode = highlevel_lock64(host, source,
625 (octlet_t *)packet->data, addr,
626 *(octlet_t *)(data + 4), 0ULL,
627 extcode);
628 fill_async_lock_resp(packet, rcode, extcode, 8);
630 break;
631 case 16:
632 rcode = highlevel_lock64(host, source,
633 (octlet_t *)packet->data, addr,
634 *(octlet_t *)(data + 6),
635 *(octlet_t *)(data + 4),
636 extcode);
637 fill_async_lock_resp(packet, rcode, extcode, 8);
638 break;
639 default:
640 fill_async_lock_resp(packet, RCODE_TYPE_ERROR,
641 extcode, 0);
644 send_packet_nocare(packet);
645 break;
649 #undef PREP_REPLY_PACKET
652 void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
653 int write_acked)
655 int tcode;
657 if (host->in_bus_reset) {
658 HPSB_INFO("received packet during reset; ignoring");
659 return;
662 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
663 dump_packet("received packet:", data, size);
664 #endif
666 tcode = (data[0] >> 4) & 0xf;
668 switch (tcode) {
669 case TCODE_WRITE_RESPONSE:
670 case TCODE_READQ_RESPONSE:
671 case TCODE_READB_RESPONSE:
672 case TCODE_LOCK_RESPONSE:
673 handle_packet_response(host, tcode, data, size);
674 break;
676 case TCODE_WRITEQ:
677 case TCODE_WRITEB:
678 case TCODE_READQ:
679 case TCODE_READB:
680 case TCODE_LOCK_REQUEST:
681 handle_incoming_packet(host, tcode, data, size, write_acked);
682 break;
685 case TCODE_ISO_DATA:
686 highlevel_iso_receive(host, data, size);
687 break;
689 case TCODE_CYCLE_START:
690 /* simply ignore this packet if it is passed on */
691 break;
693 default:
694 HPSB_NOTICE("received packet with bogus transaction code %d",
695 tcode);
696 break;
701 void abort_requests(struct hpsb_host *host)
703 unsigned long flags;
704 struct hpsb_packet *packet;
705 struct list_head *lh;
706 LIST_HEAD(llist);
708 host->template->devctl(host, CANCEL_REQUESTS, 0);
710 spin_lock_irqsave(&host->pending_pkt_lock, flags);
711 list_splice(&host->pending_packets, &llist);
712 INIT_LIST_HEAD(&host->pending_packets);
713 spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
715 lh = llist.next;
717 while (lh != &llist) {
718 packet = list_entry(lh, struct hpsb_packet, list);
719 lh = lh->next;
720 packet->state = complete;
721 packet->ack_code = ACKX_ABORTED;
722 up(&packet->state_change);
723 run_task_queue(&packet->complete_tq);
727 void abort_timedouts(struct hpsb_host *host)
729 unsigned long flags;
730 struct hpsb_packet *packet;
731 unsigned long expire;
732 struct list_head *lh;
733 LIST_HEAD(expiredlist);
735 spin_lock_irqsave(&host->csr.lock, flags);
736 expire = (host->csr.split_timeout_hi * 8000
737 + (host->csr.split_timeout_lo >> 19))
738 * HZ / 8000;
739 /* Avoid shortening of timeout due to rounding errors: */
740 expire++;
741 spin_unlock_irqrestore(&host->csr.lock, flags);
744 spin_lock_irqsave(&host->pending_pkt_lock, flags);
745 lh = host->pending_packets.next;
747 while (lh != &host->pending_packets) {
748 packet = list_entry(lh, struct hpsb_packet, list);
749 lh = lh->next;
750 if (time_before(packet->sendtime + expire, jiffies)) {
751 list_del(&packet->list);
752 list_add(&packet->list, &expiredlist);
756 if (!list_empty(&host->pending_packets)) {
757 queue_task(&host->timeout_tq, &tq_timer);
759 spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
761 lh = expiredlist.next;
762 while (lh != &expiredlist) {
763 packet = list_entry(lh, struct hpsb_packet, list);
764 lh = lh->next;
765 packet->state = complete;
766 packet->ack_code = ACKX_TIMEOUT;
767 up(&packet->state_change);
768 run_task_queue(&packet->complete_tq);
773 #ifndef MODULE
775 void __init ieee1394_init(void)
777 register_builtin_lowlevels();
778 init_hpsb_highlevel();
779 init_csr();
780 init_ieee1394_guid();
783 #else
785 int init_module(void)
787 init_hpsb_highlevel();
788 init_csr();
789 init_ieee1394_guid();
791 return 0;
794 #endif