4 * Core support: hpsb_packet management, packet handling and forwarding to
5 * highlevel or lowlevel code
7 * Copyright (C) 1999, 2000 Andreas E. Bombe
9 * This code is licensed under the GPL. See the file COPYING in the root
10 * directory of the kernel sources for details.
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/string.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/interrupt.h>
20 #include <asm/bitops.h>
21 #include <asm/byteorder.h>
22 #include <asm/semaphore.h>
24 #include "ieee1394_types.h"
27 #include "ieee1394_core.h"
28 #include "highlevel.h"
29 #include "ieee1394_transactions.h"
34 atomic_t hpsb_generation
= ATOMIC_INIT(0);
37 static void dump_packet(const char *text
, quadlet_t
*data
, int size
)
42 size
= (size
> 4 ? 4 : size
);
44 printk(KERN_DEBUG
"ieee1394: %s", text
);
45 for (i
= 0; i
< size
; i
++) {
46 printk(" %8.8x", data
[i
]);
53 * alloc_hpsb_packet - allocate new packet structure
54 * @data_size: size of the data block to be allocated
56 * This function allocates, initializes and returns a new &struct hpsb_packet.
57 * It can be used in interrupt context. A header block is always included, its
58 * size is big enough to contain all possible 1394 headers. The data block is
59 * only allocated when @data_size is not zero.
61 * For packets for which responses will be received the @data_size has to be big
62 * enough to contain the response's data block since no further allocation
63 * occurs at response matching time.
65 * The packet's generation value will be set to the current generation number
66 * for ease of use. Remember to overwrite it with your own recorded generation
67 * number if you can not be sure that your code will not race with a bus reset.
69 * Return value: A pointer to a &struct hpsb_packet or NULL on allocation
72 struct hpsb_packet
*alloc_hpsb_packet(size_t data_size
)
74 struct hpsb_packet
*packet
= NULL
;
75 void *header
= NULL
, *data
= NULL
;
76 int kmflags
= in_interrupt() ? GFP_ATOMIC
: GFP_KERNEL
;
78 packet
= kmalloc(sizeof(struct hpsb_packet
), kmflags
);
79 header
= kmalloc(5 * 4, kmflags
);
80 if (header
== NULL
|| packet
== NULL
) {
86 memset(packet
, 0, sizeof(struct hpsb_packet
));
87 packet
->header
= header
;
90 data
= kmalloc(data_size
+ 8, kmflags
);
98 packet
->data_size
= data_size
;
101 INIT_LIST_HEAD(&packet
->list
);
102 sema_init(&packet
->state_change
, 0);
103 packet
->state
= unused
;
104 packet
->generation
= get_hpsb_generation();
112 * free_hpsb_packet - free packet and data associated with it
113 * @packet: packet to free (is NULL safe)
115 * This function will free packet->data, packet->header and finally the packet
118 void free_hpsb_packet(struct hpsb_packet
*packet
)
120 if (packet
== NULL
) {
125 kfree(packet
->header
);
130 void reset_host_bus(struct hpsb_host
*host
)
132 if (!host
->initialized
) {
136 hpsb_bus_reset(host
);
137 host
->template->devctl(host
, RESET_BUS
, 0);
141 void hpsb_bus_reset(struct hpsb_host
*host
)
143 if (!host
->in_bus_reset
) {
144 abort_requests(host
);
145 host
->in_bus_reset
= 1;
147 host
->busmgr_id
= -1;
148 host
->node_count
= 0;
149 host
->selfid_count
= 0;
151 HPSB_NOTICE(__FUNCTION__
152 " called while bus reset already in progress");
158 * Verify num_of_selfids SelfIDs and return number of nodes. Return zero in
159 * case verification failed.
161 static int check_selfids(struct hpsb_host
*host
, unsigned int num_of_selfids
)
164 int rest_of_selfids
= num_of_selfids
;
165 struct selfid
*sid
= (struct selfid
*)host
->topology_map
;
166 struct ext_selfid
*esid
;
169 while (rest_of_selfids
--) {
170 if (!sid
->extended
) {
174 if (sid
->phy_id
!= nodeid
) {
175 HPSB_INFO("SelfIDs failed monotony check with "
180 if (sid
->contender
&& sid
->link_active
) {
181 host
->irm_id
= LOCAL_BUS
| sid
->phy_id
;
184 esid
= (struct ext_selfid
*)sid
;
186 if ((esid
->phy_id
!= nodeid
)
187 || (esid
->seq_nr
!= esid_seq
)) {
188 HPSB_INFO("SelfIDs failed monotony check with "
189 "%d/%d", esid
->phy_id
, esid
->seq_nr
);
197 esid
= (struct ext_selfid
*)(sid
- 1);
198 while (esid
->extended
) {
199 if ((esid
->porta
== 0x2) || (esid
->portb
== 0x2)
200 || (esid
->portc
== 0x2) || (esid
->portd
== 0x2)
201 || (esid
->porte
== 0x2) || (esid
->portf
== 0x2)
202 || (esid
->portg
== 0x2) || (esid
->porth
== 0x2)) {
203 HPSB_INFO("SelfIDs failed root check on "
210 sid
= (struct selfid
*)esid
;
211 if ((sid
->port0
== 0x2) || (sid
->port1
== 0x2) || (sid
->port2
== 0x2)) {
212 HPSB_INFO("SelfIDs failed root check");
219 static void build_speed_map(struct hpsb_host
*host
, int nodecount
)
221 char speedcap
[nodecount
];
222 char cldcnt
[nodecount
];
223 u8
*map
= host
->speed_map
;
225 struct ext_selfid
*esid
;
228 for (i
= 0; i
< (nodecount
* 64); i
+= 64) {
229 for (j
= 0; j
< nodecount
; j
++) {
230 map
[i
+j
] = SPEED_400
;
234 for (i
= 0; i
< nodecount
; i
++) {
238 /* find direct children count and speed */
239 for (sid
= (struct selfid
*)&host
->topology_map
[host
->selfid_count
-1],
241 (void *)sid
>= (void *)host
->topology_map
; sid
--) {
243 esid
= (struct ext_selfid
*)sid
;
245 if (esid
->porta
== 0x3) cldcnt
[n
]++;
246 if (esid
->portb
== 0x3) cldcnt
[n
]++;
247 if (esid
->portc
== 0x3) cldcnt
[n
]++;
248 if (esid
->portd
== 0x3) cldcnt
[n
]++;
249 if (esid
->porte
== 0x3) cldcnt
[n
]++;
250 if (esid
->portf
== 0x3) cldcnt
[n
]++;
251 if (esid
->portg
== 0x3) cldcnt
[n
]++;
252 if (esid
->porth
== 0x3) cldcnt
[n
]++;
254 if (sid
->port0
== 0x3) cldcnt
[n
]++;
255 if (sid
->port1
== 0x3) cldcnt
[n
]++;
256 if (sid
->port2
== 0x3) cldcnt
[n
]++;
258 speedcap
[n
] = sid
->speed
;
263 /* set self mapping */
264 for (i
= nodecount
- 1; i
; i
--) {
265 map
[64*i
+ i
] = speedcap
[i
];
268 /* fix up direct children count to total children count;
269 * also fix up speedcaps for sibling and parent communication */
270 for (i
= 1; i
< nodecount
; i
++) {
271 for (j
= cldcnt
[i
], n
= i
- 1; j
> 0; j
--) {
272 cldcnt
[i
] += cldcnt
[n
];
273 speedcap
[n
] = MIN(speedcap
[n
], speedcap
[i
]);
278 for (n
= 0; n
< nodecount
; n
++) {
279 for (i
= n
- cldcnt
[n
]; i
<= n
; i
++) {
280 for (j
= 0; j
< (n
- cldcnt
[n
]); j
++) {
281 map
[j
*64 + i
] = map
[i
*64 + j
] =
282 MIN(map
[i
*64 + j
], speedcap
[n
]);
284 for (j
= n
+ 1; j
< nodecount
; j
++) {
285 map
[j
*64 + i
] = map
[i
*64 + j
] =
286 MIN(map
[i
*64 + j
], speedcap
[n
]);
292 void hpsb_selfid_received(struct hpsb_host
*host
, quadlet_t sid
)
294 if (host
->in_bus_reset
) {
295 HPSB_DEBUG("including selfid 0x%x", sid
);
296 host
->topology_map
[host
->selfid_count
++] = sid
;
298 /* FIXME - info on which host */
299 HPSB_NOTICE("spurious selfid packet (0x%8.8x) received", sid
);
303 void hpsb_selfid_complete(struct hpsb_host
*host
, int phyid
, int isroot
)
305 host
->node_id
= 0xffc0 | phyid
;
306 host
->in_bus_reset
= 0;
307 host
->is_root
= isroot
;
309 host
->node_count
= check_selfids(host
, host
->selfid_count
);
310 if (!host
->node_count
) {
311 if (host
->reset_retries
++ < 20) {
312 /* selfid stage did not complete without error */
313 HPSB_NOTICE("error in SelfID stage - resetting");
314 reset_host_bus(host
);
317 HPSB_NOTICE("stopping out-of-control reset loop");
318 HPSB_NOTICE("warning - topology map and speed map will "
319 "therefore not be valid");
322 build_speed_map(host
, host
->node_count
);
325 /* irm_id is kept up to date by check_selfids() */
326 if (host
->irm_id
== host
->node_id
) {
329 host
->busmgr_id
= host
->node_id
;
330 host
->csr
.bus_manager_id
= host
->node_id
;
333 host
->reset_retries
= 0;
334 inc_hpsb_generation();
335 highlevel_host_reset(host
);
339 void hpsb_packet_sent(struct hpsb_host
*host
, struct hpsb_packet
*packet
,
344 packet
->ack_code
= ackcode
;
346 if (packet
->no_waiter
) {
347 /* must not have a tlabel allocated */
348 free_hpsb_packet(packet
);
352 if (ackcode
!= ACK_PENDING
|| !packet
->expect_response
) {
353 packet
->state
= complete
;
354 up(&packet
->state_change
);
355 up(&packet
->state_change
);
356 run_task_queue(&packet
->complete_tq
);
360 packet
->state
= pending
;
361 packet
->sendtime
= jiffies
;
363 spin_lock_irqsave(&host
->pending_pkt_lock
, flags
);
364 list_add_tail(&packet
->list
, &host
->pending_packets
);
365 spin_unlock_irqrestore(&host
->pending_pkt_lock
, flags
);
367 up(&packet
->state_change
);
368 queue_task(&host
->timeout_tq
, &tq_timer
);
372 * hpsb_send_packet - transmit a packet on the bus
373 * @packet: packet to send
375 * The packet is sent through the host specified in the packet->host field.
376 * Before sending, the packet's transmit speed is automatically determined using
377 * the local speed map.
379 * Possibilities for failure are that host is either not initialized, in bus
380 * reset, the packet's generation number doesn't match the current generation
381 * number or the host reports a transmit error.
383 * Return value: False (0) on failure, true (1) otherwise.
385 int hpsb_send_packet(struct hpsb_packet
*packet
)
387 struct hpsb_host
*host
= packet
->host
;
389 if (!host
->initialized
|| host
->in_bus_reset
390 || (packet
->generation
!= get_hpsb_generation())) {
394 packet
->state
= queued
;
395 packet
->speed_code
= host
->speed_map
[(host
->node_id
& NODE_MASK
) * 64
396 + (packet
->node_id
& NODE_MASK
)];
398 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
399 switch (packet
->speed_code
) {
401 dump_packet("send packet 400:", packet
->header
,
402 packet
->header_size
);
405 dump_packet("send packet 200:", packet
->header
,
406 packet
->header_size
);
409 dump_packet("send packet 100:", packet
->header
,
410 packet
->header_size
);
414 return host
->template->transmit_packet(host
, packet
);
417 static void send_packet_nocare(struct hpsb_packet
*packet
)
419 if (!hpsb_send_packet(packet
)) {
420 free_hpsb_packet(packet
);
425 void handle_packet_response(struct hpsb_host
*host
, int tcode
, quadlet_t
*data
,
428 struct hpsb_packet
*packet
= NULL
;
429 struct list_head
*lh
;
434 tlabel
= (data
[0] >> 10) & 0x3f;
436 spin_lock_irqsave(&host
->pending_pkt_lock
, flags
);
438 lh
= host
->pending_packets
.next
;
439 while (lh
!= &host
->pending_packets
) {
440 packet
= list_entry(lh
, struct hpsb_packet
, list
);
441 if ((packet
->tlabel
== tlabel
)
442 && (packet
->node_id
== (data
[1] >> 16))){
448 if (lh
== &host
->pending_packets
) {
449 HPSB_INFO("unsolicited response packet received - np");
450 dump_packet("contents:", data
, 16);
451 spin_unlock_irqrestore(&host
->pending_pkt_lock
, flags
);
455 switch (packet
->tcode
) {
458 if (tcode
== TCODE_WRITE_RESPONSE
) tcode_match
= 1;
461 if (tcode
== TCODE_READQ_RESPONSE
) tcode_match
= 1;
464 if (tcode
== TCODE_READB_RESPONSE
) tcode_match
= 1;
466 case TCODE_LOCK_REQUEST
:
467 if (tcode
== TCODE_LOCK_RESPONSE
) tcode_match
= 1;
471 if (!tcode_match
|| (packet
->tlabel
!= tlabel
)
472 || (packet
->node_id
!= (data
[1] >> 16))) {
473 HPSB_INFO("unsolicited response packet received");
474 dump_packet("contents:", data
, 16);
476 spin_unlock_irqrestore(&host
->pending_pkt_lock
, flags
);
480 list_del(&packet
->list
);
482 spin_unlock_irqrestore(&host
->pending_pkt_lock
, flags
);
484 /* FIXME - update size fields? */
486 case TCODE_WRITE_RESPONSE
:
487 memcpy(packet
->header
, data
, 12);
489 case TCODE_READQ_RESPONSE
:
490 memcpy(packet
->header
, data
, 16);
492 case TCODE_READB_RESPONSE
:
493 memcpy(packet
->header
, data
, 16);
494 memcpy(packet
->data
, data
+ 4, size
- 16);
496 case TCODE_LOCK_RESPONSE
:
497 memcpy(packet
->header
, data
, 16);
498 memcpy(packet
->data
, data
+ 4, (size
- 16) > 8 ? 8 : size
- 16);
502 packet
->state
= complete
;
503 up(&packet
->state_change
);
504 run_task_queue(&packet
->complete_tq
);
508 struct hpsb_packet
*create_reply_packet(struct hpsb_host
*host
, quadlet_t
*data
,
511 struct hpsb_packet
*p
;
513 dsize
+= (dsize
% 4 ? 4 - (dsize
% 4) : 0);
515 p
= alloc_hpsb_packet(dsize
);
517 /* FIXME - send data_error response */
524 p
->node_id
= data
[1] >> 16;
525 p
->tlabel
= (data
[0] >> 10) & 0x3f;
529 p
->data
[dsize
/ 4] = 0;
535 #define PREP_REPLY_PACKET(length) \
536 packet = create_reply_packet(host, data, length); \
537 if (packet == NULL) break
539 void handle_incoming_packet(struct hpsb_host
*host
, int tcode
, quadlet_t
*data
,
540 size_t size
, int write_acked
)
542 struct hpsb_packet
*packet
;
543 int length
, rcode
, extcode
;
544 int source
= data
[1] >> 16;
547 /* big FIXME - no error checking is done for an out of bounds length */
551 addr
= (((u64
)(data
[1] & 0xffff)) << 32) | data
[2];
552 rcode
= highlevel_write(host
, source
, data
+3, addr
, 4);
555 && ((data
[0] >> 16) & NODE_MASK
) != NODE_MASK
) {
556 /* not a broadcast write, reply */
557 PREP_REPLY_PACKET(0);
558 fill_async_write_resp(packet
, rcode
);
559 send_packet_nocare(packet
);
564 addr
= (((u64
)(data
[1] & 0xffff)) << 32) | data
[2];
565 rcode
= highlevel_write(host
, source
, data
+4, addr
,
569 && ((data
[0] >> 16) & NODE_MASK
) != NODE_MASK
) {
570 /* not a broadcast write, reply */
571 PREP_REPLY_PACKET(0);
572 fill_async_write_resp(packet
, rcode
);
573 send_packet_nocare(packet
);
578 PREP_REPLY_PACKET(0);
580 addr
= (((u64
)(data
[1] & 0xffff)) << 32) | data
[2];
581 rcode
= highlevel_read(host
, source
, data
, addr
, 4);
582 fill_async_readquad_resp(packet
, rcode
, *data
);
583 send_packet_nocare(packet
);
587 length
= data
[3] >> 16;
588 PREP_REPLY_PACKET(length
);
590 addr
= (((u64
)(data
[1] & 0xffff)) << 32) | data
[2];
591 rcode
= highlevel_read(host
, source
, packet
->data
, addr
,
593 fill_async_readblock_resp(packet
, rcode
, length
);
594 send_packet_nocare(packet
);
597 case TCODE_LOCK_REQUEST
:
598 length
= data
[3] >> 16;
599 extcode
= data
[3] & 0xffff;
600 addr
= (((u64
)(data
[1] & 0xffff)) << 32) | data
[2];
602 PREP_REPLY_PACKET(8);
604 if ((extcode
== 0) || (extcode
>= 7)) {
605 /* let switch default handle error */
611 rcode
= highlevel_lock(host
, source
, packet
->data
, addr
,
612 data
[4], 0, extcode
);
613 fill_async_lock_resp(packet
, rcode
, extcode
, 4);
616 if ((extcode
!= EXTCODE_FETCH_ADD
)
617 && (extcode
!= EXTCODE_LITTLE_ADD
)) {
618 rcode
= highlevel_lock(host
, source
,
622 fill_async_lock_resp(packet
, rcode
, extcode
, 4);
624 rcode
= highlevel_lock64(host
, source
,
625 (octlet_t
*)packet
->data
, addr
,
626 *(octlet_t
*)(data
+ 4), 0ULL,
628 fill_async_lock_resp(packet
, rcode
, extcode
, 8);
632 rcode
= highlevel_lock64(host
, source
,
633 (octlet_t
*)packet
->data
, addr
,
634 *(octlet_t
*)(data
+ 6),
635 *(octlet_t
*)(data
+ 4),
637 fill_async_lock_resp(packet
, rcode
, extcode
, 8);
640 fill_async_lock_resp(packet
, RCODE_TYPE_ERROR
,
644 send_packet_nocare(packet
);
649 #undef PREP_REPLY_PACKET
652 void hpsb_packet_received(struct hpsb_host
*host
, quadlet_t
*data
, size_t size
,
657 if (host
->in_bus_reset
) {
658 HPSB_INFO("received packet during reset; ignoring");
662 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
663 dump_packet("received packet:", data
, size
);
666 tcode
= (data
[0] >> 4) & 0xf;
669 case TCODE_WRITE_RESPONSE
:
670 case TCODE_READQ_RESPONSE
:
671 case TCODE_READB_RESPONSE
:
672 case TCODE_LOCK_RESPONSE
:
673 handle_packet_response(host
, tcode
, data
, size
);
680 case TCODE_LOCK_REQUEST
:
681 handle_incoming_packet(host
, tcode
, data
, size
, write_acked
);
686 highlevel_iso_receive(host
, data
, size
);
689 case TCODE_CYCLE_START
:
690 /* simply ignore this packet if it is passed on */
694 HPSB_NOTICE("received packet with bogus transaction code %d",
701 void abort_requests(struct hpsb_host
*host
)
704 struct hpsb_packet
*packet
;
705 struct list_head
*lh
;
708 host
->template->devctl(host
, CANCEL_REQUESTS
, 0);
710 spin_lock_irqsave(&host
->pending_pkt_lock
, flags
);
711 list_splice(&host
->pending_packets
, &llist
);
712 INIT_LIST_HEAD(&host
->pending_packets
);
713 spin_unlock_irqrestore(&host
->pending_pkt_lock
, flags
);
717 while (lh
!= &llist
) {
718 packet
= list_entry(lh
, struct hpsb_packet
, list
);
720 packet
->state
= complete
;
721 packet
->ack_code
= ACKX_ABORTED
;
722 up(&packet
->state_change
);
723 run_task_queue(&packet
->complete_tq
);
727 void abort_timedouts(struct hpsb_host
*host
)
730 struct hpsb_packet
*packet
;
731 unsigned long expire
;
732 struct list_head
*lh
;
733 LIST_HEAD(expiredlist
);
735 spin_lock_irqsave(&host
->csr
.lock
, flags
);
736 expire
= (host
->csr
.split_timeout_hi
* 8000
737 + (host
->csr
.split_timeout_lo
>> 19))
739 /* Avoid shortening of timeout due to rounding errors: */
741 spin_unlock_irqrestore(&host
->csr
.lock
, flags
);
744 spin_lock_irqsave(&host
->pending_pkt_lock
, flags
);
745 lh
= host
->pending_packets
.next
;
747 while (lh
!= &host
->pending_packets
) {
748 packet
= list_entry(lh
, struct hpsb_packet
, list
);
750 if (time_before(packet
->sendtime
+ expire
, jiffies
)) {
751 list_del(&packet
->list
);
752 list_add(&packet
->list
, &expiredlist
);
756 if (!list_empty(&host
->pending_packets
)) {
757 queue_task(&host
->timeout_tq
, &tq_timer
);
759 spin_unlock_irqrestore(&host
->pending_pkt_lock
, flags
);
761 lh
= expiredlist
.next
;
762 while (lh
!= &expiredlist
) {
763 packet
= list_entry(lh
, struct hpsb_packet
, list
);
765 packet
->state
= complete
;
766 packet
->ack_code
= ACKX_TIMEOUT
;
767 up(&packet
->state_change
);
768 run_task_queue(&packet
->complete_tq
);
775 void __init
ieee1394_init(void)
777 register_builtin_lowlevels();
778 init_hpsb_highlevel();
780 init_ieee1394_guid();
785 int init_module(void)
787 init_hpsb_highlevel();
789 init_ieee1394_guid();