4 * Core support: hpsb_packet management, packet handling and forwarding to
5 * highlevel or lowlevel code
7 * Copyright (C) 1999, 2000 Andreas E. Bombe
9 * This code is licensed under the GPL. See the file COPYING in the root
10 * directory of the kernel sources for details.
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/string.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/interrupt.h>
20 #include <asm/bitops.h>
21 #include <asm/byteorder.h>
22 #include <asm/semaphore.h>
24 #include "ieee1394_types.h"
27 #include "ieee1394_core.h"
28 #include "highlevel.h"
29 #include "ieee1394_transactions.h"
34 atomic_t hpsb_generation
= ATOMIC_INIT(0);
37 static void dump_packet(const char *text
, quadlet_t
*data
, int size
)
42 size
= (size
> 4 ? 4 : size
);
44 printk(KERN_DEBUG
"ieee1394: %s", text
);
45 for (i
= 0; i
< size
; i
++) {
46 printk(" %8.8x", data
[i
]);
53 * alloc_hpsb_packet - allocate new packet structure
54 * @data_size: size of the data block to be allocated
56 * This function allocates, initializes and returns a new &struct hpsb_packet.
57 * It can be used in interrupt context. A header block is always included, its
58 * size is big enough to contain all possible 1394 headers. The data block is
59 * only allocated when @data_size is not zero.
61 * For packets for which responses will be received the @data_size has to be big
62 * enough to contain the response's data block since no further allocation
63 * occurs at response matching time.
65 * The packet's generation value will be set to the current generation number
66 * for ease of use. Remember to overwrite it with your own recorded generation
67 * number if you can not be sure that your code will not race with a bus reset.
69 * Return value: A pointer to a &struct hpsb_packet or NULL on allocation
72 struct hpsb_packet
*alloc_hpsb_packet(size_t data_size
)
74 struct hpsb_packet
*packet
= NULL
;
75 void *header
= NULL
, *data
= NULL
;
76 int kmflags
= in_interrupt() ? GFP_ATOMIC
: GFP_KERNEL
;
78 packet
= kmalloc(sizeof(struct hpsb_packet
), kmflags
);
79 header
= kmalloc(5 * 4, kmflags
);
80 if (header
== NULL
|| packet
== NULL
) {
86 memset(packet
, 0, sizeof(struct hpsb_packet
));
87 packet
->header
= header
;
90 data
= kmalloc(data_size
+ 8, kmflags
);
98 packet
->data_size
= data_size
;
101 INIT_LIST_HEAD(&packet
->list
);
102 sema_init(&packet
->state_change
, 0);
103 packet
->state
= unused
;
104 packet
->generation
= get_hpsb_generation();
112 * free_hpsb_packet - free packet and data associated with it
113 * @packet: packet to free (is NULL safe)
115 * This function will free packet->data, packet->header and finally the packet
118 void free_hpsb_packet(struct hpsb_packet
*packet
)
120 if (packet
== NULL
) {
125 kfree(packet
->header
);
130 int hpsb_reset_bus(struct hpsb_host
*host
)
132 if (!host
->initialized
) {
136 if (!hpsb_bus_reset(host
)) {
137 host
->template->devctl(host
, RESET_BUS
, 0);
145 int hpsb_bus_reset(struct hpsb_host
*host
)
147 if (host
->in_bus_reset
) {
148 HPSB_NOTICE(__FUNCTION__
149 " called while bus reset already in progress");
153 abort_requests(host
);
154 host
->in_bus_reset
= 1;
156 host
->busmgr_id
= -1;
157 host
->node_count
= 0;
158 host
->selfid_count
= 0;
165 * Verify num_of_selfids SelfIDs and return number of nodes. Return zero in
166 * case verification failed.
168 static int check_selfids(struct hpsb_host
*host
, unsigned int num_of_selfids
)
171 int rest_of_selfids
= num_of_selfids
;
172 struct selfid
*sid
= (struct selfid
*)host
->topology_map
;
173 struct ext_selfid
*esid
;
176 while (rest_of_selfids
--) {
177 if (!sid
->extended
) {
181 if (sid
->phy_id
!= nodeid
) {
182 HPSB_INFO("SelfIDs failed monotony check with "
187 if (sid
->contender
&& sid
->link_active
) {
188 host
->irm_id
= LOCAL_BUS
| sid
->phy_id
;
191 esid
= (struct ext_selfid
*)sid
;
193 if ((esid
->phy_id
!= nodeid
)
194 || (esid
->seq_nr
!= esid_seq
)) {
195 HPSB_INFO("SelfIDs failed monotony check with "
196 "%d/%d", esid
->phy_id
, esid
->seq_nr
);
204 esid
= (struct ext_selfid
*)(sid
- 1);
205 while (esid
->extended
) {
206 if ((esid
->porta
== 0x2) || (esid
->portb
== 0x2)
207 || (esid
->portc
== 0x2) || (esid
->portd
== 0x2)
208 || (esid
->porte
== 0x2) || (esid
->portf
== 0x2)
209 || (esid
->portg
== 0x2) || (esid
->porth
== 0x2)) {
210 HPSB_INFO("SelfIDs failed root check on "
217 sid
= (struct selfid
*)esid
;
218 if ((sid
->port0
== 0x2) || (sid
->port1
== 0x2) || (sid
->port2
== 0x2)) {
219 HPSB_INFO("SelfIDs failed root check");
226 static void build_speed_map(struct hpsb_host
*host
, int nodecount
)
228 char speedcap
[nodecount
];
229 char cldcnt
[nodecount
];
230 u8
*map
= host
->speed_map
;
232 struct ext_selfid
*esid
;
235 for (i
= 0; i
< (nodecount
* 64); i
+= 64) {
236 for (j
= 0; j
< nodecount
; j
++) {
237 map
[i
+j
] = SPEED_400
;
241 for (i
= 0; i
< nodecount
; i
++) {
245 /* find direct children count and speed */
246 for (sid
= (struct selfid
*)&host
->topology_map
[host
->selfid_count
-1],
248 (void *)sid
>= (void *)host
->topology_map
; sid
--) {
250 esid
= (struct ext_selfid
*)sid
;
252 if (esid
->porta
== 0x3) cldcnt
[n
]++;
253 if (esid
->portb
== 0x3) cldcnt
[n
]++;
254 if (esid
->portc
== 0x3) cldcnt
[n
]++;
255 if (esid
->portd
== 0x3) cldcnt
[n
]++;
256 if (esid
->porte
== 0x3) cldcnt
[n
]++;
257 if (esid
->portf
== 0x3) cldcnt
[n
]++;
258 if (esid
->portg
== 0x3) cldcnt
[n
]++;
259 if (esid
->porth
== 0x3) cldcnt
[n
]++;
261 if (sid
->port0
== 0x3) cldcnt
[n
]++;
262 if (sid
->port1
== 0x3) cldcnt
[n
]++;
263 if (sid
->port2
== 0x3) cldcnt
[n
]++;
265 speedcap
[n
] = sid
->speed
;
270 /* set self mapping */
271 for (i
= nodecount
- 1; i
; i
--) {
272 map
[64*i
+ i
] = speedcap
[i
];
275 /* fix up direct children count to total children count;
276 * also fix up speedcaps for sibling and parent communication */
277 for (i
= 1; i
< nodecount
; i
++) {
278 for (j
= cldcnt
[i
], n
= i
- 1; j
> 0; j
--) {
279 cldcnt
[i
] += cldcnt
[n
];
280 speedcap
[n
] = MIN(speedcap
[n
], speedcap
[i
]);
285 for (n
= 0; n
< nodecount
; n
++) {
286 for (i
= n
- cldcnt
[n
]; i
<= n
; i
++) {
287 for (j
= 0; j
< (n
- cldcnt
[n
]); j
++) {
288 map
[j
*64 + i
] = map
[i
*64 + j
] =
289 MIN(map
[i
*64 + j
], speedcap
[n
]);
291 for (j
= n
+ 1; j
< nodecount
; j
++) {
292 map
[j
*64 + i
] = map
[i
*64 + j
] =
293 MIN(map
[i
*64 + j
], speedcap
[n
]);
299 void hpsb_selfid_received(struct hpsb_host
*host
, quadlet_t sid
)
301 if (host
->in_bus_reset
) {
302 HPSB_DEBUG("including selfid 0x%x", sid
);
303 host
->topology_map
[host
->selfid_count
++] = sid
;
305 /* FIXME - info on which host */
306 HPSB_NOTICE("spurious selfid packet (0x%8.8x) received", sid
);
310 void hpsb_selfid_complete(struct hpsb_host
*host
, int phyid
, int isroot
)
312 host
->node_id
= 0xffc0 | phyid
;
313 host
->in_bus_reset
= 0;
314 host
->is_root
= isroot
;
316 host
->node_count
= check_selfids(host
, host
->selfid_count
);
317 if (!host
->node_count
) {
318 if (host
->reset_retries
++ < 20) {
319 /* selfid stage did not complete without error */
320 HPSB_NOTICE("error in SelfID stage - resetting");
321 hpsb_reset_bus(host
);
324 HPSB_NOTICE("stopping out-of-control reset loop");
325 HPSB_NOTICE("warning - topology map and speed map will "
326 "therefore not be valid");
329 build_speed_map(host
, host
->node_count
);
332 /* irm_id is kept up to date by check_selfids() */
333 if (host
->irm_id
== host
->node_id
) {
336 host
->busmgr_id
= host
->node_id
;
337 host
->csr
.bus_manager_id
= host
->node_id
;
340 host
->reset_retries
= 0;
341 inc_hpsb_generation();
342 if (isroot
) host
->template->devctl(host
, ACT_CYCLE_MASTER
, 1);
343 highlevel_host_reset(host
);
347 void hpsb_packet_sent(struct hpsb_host
*host
, struct hpsb_packet
*packet
,
352 packet
->ack_code
= ackcode
;
354 if (packet
->no_waiter
) {
355 /* must not have a tlabel allocated */
356 free_hpsb_packet(packet
);
360 if (ackcode
!= ACK_PENDING
|| !packet
->expect_response
) {
361 packet
->state
= complete
;
362 up(&packet
->state_change
);
363 up(&packet
->state_change
);
364 run_task_queue(&packet
->complete_tq
);
368 packet
->state
= pending
;
369 packet
->sendtime
= jiffies
;
371 spin_lock_irqsave(&host
->pending_pkt_lock
, flags
);
372 list_add_tail(&packet
->list
, &host
->pending_packets
);
373 spin_unlock_irqrestore(&host
->pending_pkt_lock
, flags
);
375 up(&packet
->state_change
);
376 queue_task(&host
->timeout_tq
, &tq_timer
);
380 * hpsb_send_packet - transmit a packet on the bus
381 * @packet: packet to send
383 * The packet is sent through the host specified in the packet->host field.
384 * Before sending, the packet's transmit speed is automatically determined using
385 * the local speed map when it is an async, non-broadcast packet.
387 * Possibilities for failure are that host is either not initialized, in bus
388 * reset, the packet's generation number doesn't match the current generation
389 * number or the host reports a transmit error.
391 * Return value: False (0) on failure, true (1) otherwise.
393 int hpsb_send_packet(struct hpsb_packet
*packet
)
395 struct hpsb_host
*host
= packet
->host
;
397 if (!host
->initialized
|| host
->in_bus_reset
398 || (packet
->generation
!= get_hpsb_generation())) {
402 packet
->state
= queued
;
404 if (packet
->type
== async
&& packet
->node_id
!= ALL_NODES
) {
406 host
->speed_map
[(host
->node_id
& NODE_MASK
) * 64
407 + (packet
->node_id
& NODE_MASK
)];
410 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
411 switch (packet
->speed_code
) {
413 dump_packet("send packet 400:", packet
->header
,
414 packet
->header_size
);
417 dump_packet("send packet 200:", packet
->header
,
418 packet
->header_size
);
421 dump_packet("send packet 100:", packet
->header
,
422 packet
->header_size
);
426 return host
->template->transmit_packet(host
, packet
);
429 static void send_packet_nocare(struct hpsb_packet
*packet
)
431 if (!hpsb_send_packet(packet
)) {
432 free_hpsb_packet(packet
);
437 void handle_packet_response(struct hpsb_host
*host
, int tcode
, quadlet_t
*data
,
440 struct hpsb_packet
*packet
= NULL
;
441 struct list_head
*lh
;
446 tlabel
= (data
[0] >> 10) & 0x3f;
448 spin_lock_irqsave(&host
->pending_pkt_lock
, flags
);
450 lh
= host
->pending_packets
.next
;
451 while (lh
!= &host
->pending_packets
) {
452 packet
= list_entry(lh
, struct hpsb_packet
, list
);
453 if ((packet
->tlabel
== tlabel
)
454 && (packet
->node_id
== (data
[1] >> 16))){
460 if (lh
== &host
->pending_packets
) {
461 HPSB_INFO("unsolicited response packet received - np");
462 dump_packet("contents:", data
, 16);
463 spin_unlock_irqrestore(&host
->pending_pkt_lock
, flags
);
467 switch (packet
->tcode
) {
470 if (tcode
== TCODE_WRITE_RESPONSE
) tcode_match
= 1;
473 if (tcode
== TCODE_READQ_RESPONSE
) tcode_match
= 1;
476 if (tcode
== TCODE_READB_RESPONSE
) tcode_match
= 1;
478 case TCODE_LOCK_REQUEST
:
479 if (tcode
== TCODE_LOCK_RESPONSE
) tcode_match
= 1;
483 if (!tcode_match
|| (packet
->tlabel
!= tlabel
)
484 || (packet
->node_id
!= (data
[1] >> 16))) {
485 HPSB_INFO("unsolicited response packet received");
486 dump_packet("contents:", data
, 16);
488 spin_unlock_irqrestore(&host
->pending_pkt_lock
, flags
);
492 list_del(&packet
->list
);
494 spin_unlock_irqrestore(&host
->pending_pkt_lock
, flags
);
496 /* FIXME - update size fields? */
498 case TCODE_WRITE_RESPONSE
:
499 memcpy(packet
->header
, data
, 12);
501 case TCODE_READQ_RESPONSE
:
502 memcpy(packet
->header
, data
, 16);
504 case TCODE_READB_RESPONSE
:
505 memcpy(packet
->header
, data
, 16);
506 memcpy(packet
->data
, data
+ 4, size
- 16);
508 case TCODE_LOCK_RESPONSE
:
509 memcpy(packet
->header
, data
, 16);
510 memcpy(packet
->data
, data
+ 4, (size
- 16) > 8 ? 8 : size
- 16);
514 packet
->state
= complete
;
515 up(&packet
->state_change
);
516 run_task_queue(&packet
->complete_tq
);
520 struct hpsb_packet
*create_reply_packet(struct hpsb_host
*host
, quadlet_t
*data
,
523 struct hpsb_packet
*p
;
525 dsize
+= (dsize
% 4 ? 4 - (dsize
% 4) : 0);
527 p
= alloc_hpsb_packet(dsize
);
529 /* FIXME - send data_error response */
536 p
->node_id
= data
[1] >> 16;
537 p
->tlabel
= (data
[0] >> 10) & 0x3f;
541 p
->data
[dsize
/ 4] = 0;
547 #define PREP_REPLY_PACKET(length) \
548 packet = create_reply_packet(host, data, length); \
549 if (packet == NULL) break
551 void handle_incoming_packet(struct hpsb_host
*host
, int tcode
, quadlet_t
*data
,
552 size_t size
, int write_acked
)
554 struct hpsb_packet
*packet
;
555 int length
, rcode
, extcode
;
556 int source
= data
[1] >> 16;
559 /* big FIXME - no error checking is done for an out of bounds length */
563 addr
= (((u64
)(data
[1] & 0xffff)) << 32) | data
[2];
564 rcode
= highlevel_write(host
, source
, data
+3, addr
, 4);
567 && ((data
[0] >> 16) & NODE_MASK
) != NODE_MASK
) {
568 /* not a broadcast write, reply */
569 PREP_REPLY_PACKET(0);
570 fill_async_write_resp(packet
, rcode
);
571 send_packet_nocare(packet
);
576 addr
= (((u64
)(data
[1] & 0xffff)) << 32) | data
[2];
577 rcode
= highlevel_write(host
, source
, data
+4, addr
,
581 && ((data
[0] >> 16) & NODE_MASK
) != NODE_MASK
) {
582 /* not a broadcast write, reply */
583 PREP_REPLY_PACKET(0);
584 fill_async_write_resp(packet
, rcode
);
585 send_packet_nocare(packet
);
590 PREP_REPLY_PACKET(0);
592 addr
= (((u64
)(data
[1] & 0xffff)) << 32) | data
[2];
593 rcode
= highlevel_read(host
, source
, data
, addr
, 4);
594 fill_async_readquad_resp(packet
, rcode
, *data
);
595 send_packet_nocare(packet
);
599 length
= data
[3] >> 16;
600 PREP_REPLY_PACKET(length
);
602 addr
= (((u64
)(data
[1] & 0xffff)) << 32) | data
[2];
603 rcode
= highlevel_read(host
, source
, packet
->data
, addr
,
605 fill_async_readblock_resp(packet
, rcode
, length
);
606 send_packet_nocare(packet
);
609 case TCODE_LOCK_REQUEST
:
610 length
= data
[3] >> 16;
611 extcode
= data
[3] & 0xffff;
612 addr
= (((u64
)(data
[1] & 0xffff)) << 32) | data
[2];
614 PREP_REPLY_PACKET(8);
616 if ((extcode
== 0) || (extcode
>= 7)) {
617 /* let switch default handle error */
623 rcode
= highlevel_lock(host
, source
, packet
->data
, addr
,
624 data
[4], 0, extcode
);
625 fill_async_lock_resp(packet
, rcode
, extcode
, 4);
628 if ((extcode
!= EXTCODE_FETCH_ADD
)
629 && (extcode
!= EXTCODE_LITTLE_ADD
)) {
630 rcode
= highlevel_lock(host
, source
,
634 fill_async_lock_resp(packet
, rcode
, extcode
, 4);
636 rcode
= highlevel_lock64(host
, source
,
637 (octlet_t
*)packet
->data
, addr
,
638 *(octlet_t
*)(data
+ 4), 0ULL,
640 fill_async_lock_resp(packet
, rcode
, extcode
, 8);
644 rcode
= highlevel_lock64(host
, source
,
645 (octlet_t
*)packet
->data
, addr
,
646 *(octlet_t
*)(data
+ 6),
647 *(octlet_t
*)(data
+ 4),
649 fill_async_lock_resp(packet
, rcode
, extcode
, 8);
652 fill_async_lock_resp(packet
, RCODE_TYPE_ERROR
,
656 send_packet_nocare(packet
);
661 #undef PREP_REPLY_PACKET
664 void hpsb_packet_received(struct hpsb_host
*host
, quadlet_t
*data
, size_t size
,
669 if (host
->in_bus_reset
) {
670 HPSB_INFO("received packet during reset; ignoring");
674 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
675 dump_packet("received packet:", data
, size
);
678 tcode
= (data
[0] >> 4) & 0xf;
681 case TCODE_WRITE_RESPONSE
:
682 case TCODE_READQ_RESPONSE
:
683 case TCODE_READB_RESPONSE
:
684 case TCODE_LOCK_RESPONSE
:
685 handle_packet_response(host
, tcode
, data
, size
);
692 case TCODE_LOCK_REQUEST
:
693 handle_incoming_packet(host
, tcode
, data
, size
, write_acked
);
698 highlevel_iso_receive(host
, data
, size
);
701 case TCODE_CYCLE_START
:
702 /* simply ignore this packet if it is passed on */
706 HPSB_NOTICE("received packet with bogus transaction code %d",
713 void abort_requests(struct hpsb_host
*host
)
716 struct hpsb_packet
*packet
;
717 struct list_head
*lh
;
720 host
->template->devctl(host
, CANCEL_REQUESTS
, 0);
722 spin_lock_irqsave(&host
->pending_pkt_lock
, flags
);
723 list_splice(&host
->pending_packets
, &llist
);
724 INIT_LIST_HEAD(&host
->pending_packets
);
725 spin_unlock_irqrestore(&host
->pending_pkt_lock
, flags
);
729 while (lh
!= &llist
) {
730 packet
= list_entry(lh
, struct hpsb_packet
, list
);
732 packet
->state
= complete
;
733 packet
->ack_code
= ACKX_ABORTED
;
734 up(&packet
->state_change
);
735 run_task_queue(&packet
->complete_tq
);
739 void abort_timedouts(struct hpsb_host
*host
)
742 struct hpsb_packet
*packet
;
743 unsigned long expire
;
744 struct list_head
*lh
;
745 LIST_HEAD(expiredlist
);
747 spin_lock_irqsave(&host
->csr
.lock
, flags
);
748 expire
= (host
->csr
.split_timeout_hi
* 8000
749 + (host
->csr
.split_timeout_lo
>> 19))
751 /* Avoid shortening of timeout due to rounding errors: */
753 spin_unlock_irqrestore(&host
->csr
.lock
, flags
);
756 spin_lock_irqsave(&host
->pending_pkt_lock
, flags
);
757 lh
= host
->pending_packets
.next
;
759 while (lh
!= &host
->pending_packets
) {
760 packet
= list_entry(lh
, struct hpsb_packet
, list
);
762 if (time_before(packet
->sendtime
+ expire
, jiffies
)) {
763 list_del(&packet
->list
);
764 list_add(&packet
->list
, &expiredlist
);
768 if (!list_empty(&host
->pending_packets
)) {
769 queue_task(&host
->timeout_tq
, &tq_timer
);
771 spin_unlock_irqrestore(&host
->pending_pkt_lock
, flags
);
773 lh
= expiredlist
.next
;
774 while (lh
!= &expiredlist
) {
775 packet
= list_entry(lh
, struct hpsb_packet
, list
);
777 packet
->state
= complete
;
778 packet
->ack_code
= ACKX_TIMEOUT
;
779 up(&packet
->state_change
);
780 run_task_queue(&packet
->complete_tq
);
787 void __init
ieee1394_init(void)
789 register_builtin_lowlevels();
790 init_hpsb_highlevel();
792 init_ieee1394_guid();
797 int init_module(void)
799 init_hpsb_highlevel();
801 init_ieee1394_guid();