Alignment in mv643xx_eth
[linux-2.6/mini2440.git] / drivers / s390 / net / ctcmain.c
blob0d6d5fcc128b8231d3b53d87ae762a4a83075bd8
1 /*
2 * CTC / ESCON network driver
4 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6 * Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
7 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 Peter Tiedemann (ptiedem@de.ibm.com)
9 * Driver Model stuff by : Cornelia Huck <cornelia.huck@de.ibm.com>
11 * Documentation used:
12 * - Principles of Operation (IBM doc#: SA22-7201-06)
13 * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
14 * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
15 * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
16 * - ESCON I/O Interface (IBM doc#: SA22-7202-029
18 * and the source of the original CTC driver by:
19 * Dieter Wellerdiek (wel@de.ibm.com)
20 * Martin Schwidefsky (schwidefsky@de.ibm.com)
21 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
22 * Jochen Röhrig (roehrig@de.ibm.com)
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
27 * any later version.
29 * This program is distributed in the hope that it will be useful,
30 * but WITHOUT ANY WARRANTY; without even the implied warranty of
31 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32 * GNU General Public License for more details.
34 * You should have received a copy of the GNU General Public License
35 * along with this program; if not, write to the Free Software
36 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
39 #undef DEBUG
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/kernel.h>
43 #include <linux/slab.h>
44 #include <linux/errno.h>
45 #include <linux/types.h>
46 #include <linux/interrupt.h>
47 #include <linux/timer.h>
48 #include <linux/bitops.h>
50 #include <linux/signal.h>
51 #include <linux/string.h>
53 #include <linux/ip.h>
54 #include <linux/if_arp.h>
55 #include <linux/tcp.h>
56 #include <linux/skbuff.h>
57 #include <linux/ctype.h>
58 #include <net/dst.h>
60 #include <asm/io.h>
61 #include <asm/ccwdev.h>
62 #include <asm/ccwgroup.h>
63 #include <asm/uaccess.h>
65 #include <asm/idals.h>
67 #include "fsm.h"
68 #include "cu3088.h"
70 #include "ctcdbug.h"
71 #include "ctcmain.h"
73 MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
74 MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
75 MODULE_LICENSE("GPL");
76 /**
77 * States of the interface statemachine.
79 enum dev_states {
80 DEV_STATE_STOPPED,
81 DEV_STATE_STARTWAIT_RXTX,
82 DEV_STATE_STARTWAIT_RX,
83 DEV_STATE_STARTWAIT_TX,
84 DEV_STATE_STOPWAIT_RXTX,
85 DEV_STATE_STOPWAIT_RX,
86 DEV_STATE_STOPWAIT_TX,
87 DEV_STATE_RUNNING,
88 /**
89 * MUST be always the last element!!
91 CTC_NR_DEV_STATES
94 static const char *dev_state_names[] = {
95 "Stopped",
96 "StartWait RXTX",
97 "StartWait RX",
98 "StartWait TX",
99 "StopWait RXTX",
100 "StopWait RX",
101 "StopWait TX",
102 "Running",
106 * Events of the interface statemachine.
108 enum dev_events {
109 DEV_EVENT_START,
110 DEV_EVENT_STOP,
111 DEV_EVENT_RXUP,
112 DEV_EVENT_TXUP,
113 DEV_EVENT_RXDOWN,
114 DEV_EVENT_TXDOWN,
115 DEV_EVENT_RESTART,
117 * MUST be always the last element!!
119 CTC_NR_DEV_EVENTS
122 static const char *dev_event_names[] = {
123 "Start",
124 "Stop",
125 "RX up",
126 "TX up",
127 "RX down",
128 "TX down",
129 "Restart",
133 * Events of the channel statemachine
135 enum ch_events {
137 * Events, representing return code of
138 * I/O operations (ccw_device_start, ccw_device_halt et al.)
140 CH_EVENT_IO_SUCCESS,
141 CH_EVENT_IO_EBUSY,
142 CH_EVENT_IO_ENODEV,
143 CH_EVENT_IO_EIO,
144 CH_EVENT_IO_UNKNOWN,
146 CH_EVENT_ATTNBUSY,
147 CH_EVENT_ATTN,
148 CH_EVENT_BUSY,
151 * Events, representing unit-check
153 CH_EVENT_UC_RCRESET,
154 CH_EVENT_UC_RSRESET,
155 CH_EVENT_UC_TXTIMEOUT,
156 CH_EVENT_UC_TXPARITY,
157 CH_EVENT_UC_HWFAIL,
158 CH_EVENT_UC_RXPARITY,
159 CH_EVENT_UC_ZERO,
160 CH_EVENT_UC_UNKNOWN,
163 * Events, representing subchannel-check
165 CH_EVENT_SC_UNKNOWN,
168 * Events, representing machine checks
170 CH_EVENT_MC_FAIL,
171 CH_EVENT_MC_GOOD,
174 * Event, representing normal IRQ
176 CH_EVENT_IRQ,
177 CH_EVENT_FINSTAT,
180 * Event, representing timer expiry.
182 CH_EVENT_TIMER,
185 * Events, representing commands from upper levels.
187 CH_EVENT_START,
188 CH_EVENT_STOP,
191 * MUST be always the last element!!
193 NR_CH_EVENTS,
197 * States of the channel statemachine.
199 enum ch_states {
201 * Channel not assigned to any device,
202 * initial state, direction invalid
204 CH_STATE_IDLE,
207 * Channel assigned but not operating
209 CH_STATE_STOPPED,
210 CH_STATE_STARTWAIT,
211 CH_STATE_STARTRETRY,
212 CH_STATE_SETUPWAIT,
213 CH_STATE_RXINIT,
214 CH_STATE_TXINIT,
215 CH_STATE_RX,
216 CH_STATE_TX,
217 CH_STATE_RXIDLE,
218 CH_STATE_TXIDLE,
219 CH_STATE_RXERR,
220 CH_STATE_TXERR,
221 CH_STATE_TERM,
222 CH_STATE_DTERM,
223 CH_STATE_NOTOP,
226 * MUST be always the last element!!
228 NR_CH_STATES,
231 static int loglevel = CTC_LOGLEVEL_DEFAULT;
234 * Linked list of all detected channels.
236 static struct channel *channels = NULL;
239 * Print Banner.
241 static void
242 print_banner(void)
244 static int printed = 0;
246 if (printed)
247 return;
249 printk(KERN_INFO "CTC driver initialized\n");
250 printed = 1;
254 * Return type of a detected device.
256 static enum channel_types
257 get_channel_type(struct ccw_device_id *id)
259 enum channel_types type = (enum channel_types) id->driver_info;
261 if (type == channel_type_ficon)
262 type = channel_type_escon;
264 return type;
267 static const char *ch_event_names[] = {
268 "ccw_device success",
269 "ccw_device busy",
270 "ccw_device enodev",
271 "ccw_device ioerr",
272 "ccw_device unknown",
274 "Status ATTN & BUSY",
275 "Status ATTN",
276 "Status BUSY",
278 "Unit check remote reset",
279 "Unit check remote system reset",
280 "Unit check TX timeout",
281 "Unit check TX parity",
282 "Unit check Hardware failure",
283 "Unit check RX parity",
284 "Unit check ZERO",
285 "Unit check Unknown",
287 "SubChannel check Unknown",
289 "Machine check failure",
290 "Machine check operational",
292 "IRQ normal",
293 "IRQ final",
295 "Timer",
297 "Start",
298 "Stop",
301 static const char *ch_state_names[] = {
302 "Idle",
303 "Stopped",
304 "StartWait",
305 "StartRetry",
306 "SetupWait",
307 "RX init",
308 "TX init",
309 "RX",
310 "TX",
311 "RX idle",
312 "TX idle",
313 "RX error",
314 "TX error",
315 "Terminating",
316 "Restarting",
317 "Not operational",
320 #ifdef DEBUG
322 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
324 * @param skb The sk_buff to dump.
325 * @param offset Offset relative to skb-data, where to start the dump.
327 static void
328 ctc_dump_skb(struct sk_buff *skb, int offset)
330 unsigned char *p = skb->data;
331 __u16 bl;
332 struct ll_header *header;
333 int i;
335 if (!(loglevel & CTC_LOGLEVEL_DEBUG))
336 return;
337 p += offset;
338 bl = *((__u16 *) p);
339 p += 2;
340 header = (struct ll_header *) p;
341 p -= 2;
343 printk(KERN_DEBUG "dump:\n");
344 printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl);
346 printk(KERN_DEBUG "h->length=%d %04x\n", header->length,
347 header->length);
348 printk(KERN_DEBUG "h->type=%04x\n", header->type);
349 printk(KERN_DEBUG "h->unused=%04x\n", header->unused);
350 if (bl > 16)
351 bl = 16;
352 printk(KERN_DEBUG "data: ");
353 for (i = 0; i < bl; i++)
354 printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
355 printk("\n");
357 #else
358 static inline void
359 ctc_dump_skb(struct sk_buff *skb, int offset)
362 #endif
365 * Unpack a just received skb and hand it over to
366 * upper layers.
368 * @param ch The channel where this skb has been received.
369 * @param pskb The received skb.
371 static void
372 ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
374 struct net_device *dev = ch->netdev;
375 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
376 __u16 len = *((__u16 *) pskb->data);
378 DBF_TEXT(trace, 4, __FUNCTION__);
379 skb_put(pskb, 2 + LL_HEADER_LENGTH);
380 skb_pull(pskb, 2);
381 pskb->dev = dev;
382 pskb->ip_summed = CHECKSUM_UNNECESSARY;
383 while (len > 0) {
384 struct sk_buff *skb;
385 struct ll_header *header = (struct ll_header *) pskb->data;
387 skb_pull(pskb, LL_HEADER_LENGTH);
388 if ((ch->protocol == CTC_PROTO_S390) &&
389 (header->type != ETH_P_IP)) {
391 #ifndef DEBUG
392 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
393 #endif
395 * Check packet type only if we stick strictly
396 * to S/390's protocol of OS390. This only
397 * supports IP. Otherwise allow any packet
398 * type.
400 ctc_pr_warn(
401 "%s Illegal packet type 0x%04x received, dropping\n",
402 dev->name, header->type);
403 ch->logflags |= LOG_FLAG_ILLEGALPKT;
404 #ifndef DEBUG
406 #endif
407 #ifdef DEBUG
408 ctc_dump_skb(pskb, -6);
409 #endif
410 privptr->stats.rx_dropped++;
411 privptr->stats.rx_frame_errors++;
412 return;
414 pskb->protocol = ntohs(header->type);
415 if (header->length <= LL_HEADER_LENGTH) {
416 #ifndef DEBUG
417 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
418 #endif
419 ctc_pr_warn(
420 "%s Illegal packet size %d "
421 "received (MTU=%d blocklen=%d), "
422 "dropping\n", dev->name, header->length,
423 dev->mtu, len);
424 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
425 #ifndef DEBUG
427 #endif
428 #ifdef DEBUG
429 ctc_dump_skb(pskb, -6);
430 #endif
431 privptr->stats.rx_dropped++;
432 privptr->stats.rx_length_errors++;
433 return;
435 header->length -= LL_HEADER_LENGTH;
436 len -= LL_HEADER_LENGTH;
437 if ((header->length > skb_tailroom(pskb)) ||
438 (header->length > len)) {
439 #ifndef DEBUG
440 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
441 #endif
442 ctc_pr_warn(
443 "%s Illegal packet size %d "
444 "(beyond the end of received data), "
445 "dropping\n", dev->name, header->length);
446 ch->logflags |= LOG_FLAG_OVERRUN;
447 #ifndef DEBUG
449 #endif
450 #ifdef DEBUG
451 ctc_dump_skb(pskb, -6);
452 #endif
453 privptr->stats.rx_dropped++;
454 privptr->stats.rx_length_errors++;
455 return;
457 skb_put(pskb, header->length);
458 pskb->mac.raw = pskb->data;
459 len -= header->length;
460 skb = dev_alloc_skb(pskb->len);
461 if (!skb) {
462 #ifndef DEBUG
463 if (!(ch->logflags & LOG_FLAG_NOMEM)) {
464 #endif
465 ctc_pr_warn(
466 "%s Out of memory in ctc_unpack_skb\n",
467 dev->name);
468 ch->logflags |= LOG_FLAG_NOMEM;
469 #ifndef DEBUG
471 #endif
472 privptr->stats.rx_dropped++;
473 return;
475 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
476 skb->mac.raw = skb->data;
477 skb->dev = pskb->dev;
478 skb->protocol = pskb->protocol;
479 pskb->ip_summed = CHECKSUM_UNNECESSARY;
480 netif_rx_ni(skb);
482 * Successful rx; reset logflags
484 ch->logflags = 0;
485 dev->last_rx = jiffies;
486 privptr->stats.rx_packets++;
487 privptr->stats.rx_bytes += skb->len;
488 if (len > 0) {
489 skb_pull(pskb, header->length);
490 if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
491 #ifndef DEBUG
492 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
493 #endif
494 ctc_pr_warn(
495 "%s Overrun in ctc_unpack_skb\n",
496 dev->name);
497 ch->logflags |= LOG_FLAG_OVERRUN;
498 #ifndef DEBUG
500 #endif
501 return;
503 skb_put(pskb, LL_HEADER_LENGTH);
509 * Check return code of a preceeding ccw_device call, halt_IO etc...
511 * @param ch The channel, the error belongs to.
512 * @param return_code The error code to inspect.
514 static void
515 ccw_check_return_code(struct channel *ch, int return_code, char *msg)
517 DBF_TEXT(trace, 5, __FUNCTION__);
518 switch (return_code) {
519 case 0:
520 fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
521 break;
522 case -EBUSY:
523 ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg);
524 fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
525 break;
526 case -ENODEV:
527 ctc_pr_emerg("%s (%s): Invalid device called for IO\n",
528 ch->id, msg);
529 fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
530 break;
531 case -EIO:
532 ctc_pr_emerg("%s (%s): Status pending... \n",
533 ch->id, msg);
534 fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
535 break;
536 default:
537 ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
538 ch->id, msg, return_code);
539 fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
544 * Check sense of a unit check.
546 * @param ch The channel, the sense code belongs to.
547 * @param sense The sense code to inspect.
549 static void
550 ccw_unit_check(struct channel *ch, unsigned char sense)
552 DBF_TEXT(trace, 5, __FUNCTION__);
553 if (sense & SNS0_INTERVENTION_REQ) {
554 if (sense & 0x01) {
555 ctc_pr_debug("%s: Interface disc. or Sel. reset "
556 "(remote)\n", ch->id);
557 fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
558 } else {
559 ctc_pr_debug("%s: System reset (remote)\n", ch->id);
560 fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
562 } else if (sense & SNS0_EQUIPMENT_CHECK) {
563 if (sense & SNS0_BUS_OUT_CHECK) {
564 ctc_pr_warn("%s: Hardware malfunction (remote)\n",
565 ch->id);
566 fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
567 } else {
568 ctc_pr_warn("%s: Read-data parity error (remote)\n",
569 ch->id);
570 fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
572 } else if (sense & SNS0_BUS_OUT_CHECK) {
573 if (sense & 0x04) {
574 ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id);
575 fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
576 } else {
577 ctc_pr_warn("%s: Data-transfer parity error\n", ch->id);
578 fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
580 } else if (sense & SNS0_CMD_REJECT) {
581 ctc_pr_warn("%s: Command reject\n", ch->id);
582 } else if (sense == 0) {
583 ctc_pr_debug("%s: Unit check ZERO\n", ch->id);
584 fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
585 } else {
586 ctc_pr_warn("%s: Unit Check with sense code: %02x\n",
587 ch->id, sense);
588 fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
592 static void
593 ctc_purge_skb_queue(struct sk_buff_head *q)
595 struct sk_buff *skb;
597 DBF_TEXT(trace, 5, __FUNCTION__);
599 while ((skb = skb_dequeue(q))) {
600 atomic_dec(&skb->users);
601 dev_kfree_skb_irq(skb);
605 static int
606 ctc_checkalloc_buffer(struct channel *ch, int warn)
608 DBF_TEXT(trace, 5, __FUNCTION__);
609 if ((ch->trans_skb == NULL) ||
610 (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
611 if (ch->trans_skb != NULL)
612 dev_kfree_skb(ch->trans_skb);
613 clear_normalized_cda(&ch->ccw[1]);
614 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
615 GFP_ATOMIC | GFP_DMA);
616 if (ch->trans_skb == NULL) {
617 if (warn)
618 ctc_pr_warn(
619 "%s: Couldn't alloc %s trans_skb\n",
620 ch->id,
621 (CHANNEL_DIRECTION(ch->flags) == READ) ?
622 "RX" : "TX");
623 return -ENOMEM;
625 ch->ccw[1].count = ch->max_bufsize;
626 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
627 dev_kfree_skb(ch->trans_skb);
628 ch->trans_skb = NULL;
629 if (warn)
630 ctc_pr_warn(
631 "%s: set_normalized_cda for %s "
632 "trans_skb failed, dropping packets\n",
633 ch->id,
634 (CHANNEL_DIRECTION(ch->flags) == READ) ?
635 "RX" : "TX");
636 return -ENOMEM;
638 ch->ccw[1].count = 0;
639 ch->trans_skb_data = ch->trans_skb->data;
640 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
642 return 0;
646 * Dummy NOP action for statemachines
648 static void
649 fsm_action_nop(fsm_instance * fi, int event, void *arg)
654 * Actions for channel - statemachines.
655 *****************************************************************************/
658 * Normal data has been send. Free the corresponding
659 * skb (it's in io_queue), reset dev->tbusy and
660 * revert to idle state.
662 * @param fi An instance of a channel statemachine.
663 * @param event The event, just happened.
664 * @param arg Generic pointer, casted from channel * upon call.
666 static void
667 ch_action_txdone(fsm_instance * fi, int event, void *arg)
669 struct channel *ch = (struct channel *) arg;
670 struct net_device *dev = ch->netdev;
671 struct ctc_priv *privptr = dev->priv;
672 struct sk_buff *skb;
673 int first = 1;
674 int i;
675 unsigned long duration;
676 struct timespec done_stamp = xtime;
678 DBF_TEXT(trace, 4, __FUNCTION__);
680 duration =
681 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
682 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
683 if (duration > ch->prof.tx_time)
684 ch->prof.tx_time = duration;
686 if (ch->irb->scsw.count != 0)
687 ctc_pr_debug("%s: TX not complete, remaining %d bytes\n",
688 dev->name, ch->irb->scsw.count);
689 fsm_deltimer(&ch->timer);
690 while ((skb = skb_dequeue(&ch->io_queue))) {
691 privptr->stats.tx_packets++;
692 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
693 if (first) {
694 privptr->stats.tx_bytes += 2;
695 first = 0;
697 atomic_dec(&skb->users);
698 dev_kfree_skb_irq(skb);
700 spin_lock(&ch->collect_lock);
701 clear_normalized_cda(&ch->ccw[4]);
702 if (ch->collect_len > 0) {
703 int rc;
705 if (ctc_checkalloc_buffer(ch, 1)) {
706 spin_unlock(&ch->collect_lock);
707 return;
709 ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data;
710 ch->trans_skb->len = 0;
711 if (ch->prof.maxmulti < (ch->collect_len + 2))
712 ch->prof.maxmulti = ch->collect_len + 2;
713 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
714 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
715 *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
716 i = 0;
717 while ((skb = skb_dequeue(&ch->collect_queue))) {
718 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
719 skb->len);
720 privptr->stats.tx_packets++;
721 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
722 atomic_dec(&skb->users);
723 dev_kfree_skb_irq(skb);
724 i++;
726 ch->collect_len = 0;
727 spin_unlock(&ch->collect_lock);
728 ch->ccw[1].count = ch->trans_skb->len;
729 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
730 ch->prof.send_stamp = xtime;
731 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
732 (unsigned long) ch, 0xff, 0);
733 ch->prof.doios_multi++;
734 if (rc != 0) {
735 privptr->stats.tx_dropped += i;
736 privptr->stats.tx_errors += i;
737 fsm_deltimer(&ch->timer);
738 ccw_check_return_code(ch, rc, "chained TX");
740 } else {
741 spin_unlock(&ch->collect_lock);
742 fsm_newstate(fi, CH_STATE_TXIDLE);
744 ctc_clear_busy(dev);
748 * Initial data is sent.
749 * Notify device statemachine that we are up and
750 * running.
752 * @param fi An instance of a channel statemachine.
753 * @param event The event, just happened.
754 * @param arg Generic pointer, casted from channel * upon call.
756 static void
757 ch_action_txidle(fsm_instance * fi, int event, void *arg)
759 struct channel *ch = (struct channel *) arg;
761 DBF_TEXT(trace, 4, __FUNCTION__);
762 fsm_deltimer(&ch->timer);
763 fsm_newstate(fi, CH_STATE_TXIDLE);
764 fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
765 ch->netdev);
769 * Got normal data, check for sanity, queue it up, allocate new buffer
770 * trigger bottom half, and initiate next read.
772 * @param fi An instance of a channel statemachine.
773 * @param event The event, just happened.
774 * @param arg Generic pointer, casted from channel * upon call.
776 static void
777 ch_action_rx(fsm_instance * fi, int event, void *arg)
779 struct channel *ch = (struct channel *) arg;
780 struct net_device *dev = ch->netdev;
781 struct ctc_priv *privptr = dev->priv;
782 int len = ch->max_bufsize - ch->irb->scsw.count;
783 struct sk_buff *skb = ch->trans_skb;
784 __u16 block_len = *((__u16 *) skb->data);
785 int check_len;
786 int rc;
788 DBF_TEXT(trace, 4, __FUNCTION__);
789 fsm_deltimer(&ch->timer);
790 if (len < 8) {
791 ctc_pr_debug("%s: got packet with length %d < 8\n",
792 dev->name, len);
793 privptr->stats.rx_dropped++;
794 privptr->stats.rx_length_errors++;
795 goto again;
797 if (len > ch->max_bufsize) {
798 ctc_pr_debug("%s: got packet with length %d > %d\n",
799 dev->name, len, ch->max_bufsize);
800 privptr->stats.rx_dropped++;
801 privptr->stats.rx_length_errors++;
802 goto again;
806 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
808 switch (ch->protocol) {
809 case CTC_PROTO_S390:
810 case CTC_PROTO_OS390:
811 check_len = block_len + 2;
812 break;
813 default:
814 check_len = block_len;
815 break;
817 if ((len < block_len) || (len > check_len)) {
818 ctc_pr_debug("%s: got block length %d != rx length %d\n",
819 dev->name, block_len, len);
820 #ifdef DEBUG
821 ctc_dump_skb(skb, 0);
822 #endif
823 *((__u16 *) skb->data) = len;
824 privptr->stats.rx_dropped++;
825 privptr->stats.rx_length_errors++;
826 goto again;
828 block_len -= 2;
829 if (block_len > 0) {
830 *((__u16 *) skb->data) = block_len;
831 ctc_unpack_skb(ch, skb);
833 again:
834 skb->data = skb->tail = ch->trans_skb_data;
835 skb->len = 0;
836 if (ctc_checkalloc_buffer(ch, 1))
837 return;
838 ch->ccw[1].count = ch->max_bufsize;
839 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
840 if (rc != 0)
841 ccw_check_return_code(ch, rc, "normal RX");
844 static void ch_action_rxidle(fsm_instance * fi, int event, void *arg);
847 * Initialize connection by sending a __u16 of value 0.
849 * @param fi An instance of a channel statemachine.
850 * @param event The event, just happened.
851 * @param arg Generic pointer, casted from channel * upon call.
853 static void
854 ch_action_firstio(fsm_instance * fi, int event, void *arg)
856 struct channel *ch = (struct channel *) arg;
857 int rc;
859 DBF_TEXT(trace, 4, __FUNCTION__);
861 if (fsm_getstate(fi) == CH_STATE_TXIDLE)
862 ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
863 fsm_deltimer(&ch->timer);
864 if (ctc_checkalloc_buffer(ch, 1))
865 return;
866 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
867 (ch->protocol == CTC_PROTO_OS390)) {
868 /* OS/390 resp. z/OS */
869 if (CHANNEL_DIRECTION(ch->flags) == READ) {
870 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
871 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
872 CH_EVENT_TIMER, ch);
873 ch_action_rxidle(fi, event, arg);
874 } else {
875 struct net_device *dev = ch->netdev;
876 fsm_newstate(fi, CH_STATE_TXIDLE);
877 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
878 DEV_EVENT_TXUP, dev);
880 return;
884 * Don´t setup a timer for receiving the initial RX frame
885 * if in compatibility mode, since VM TCP delays the initial
886 * frame until it has some data to send.
888 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
889 (ch->protocol != CTC_PROTO_S390))
890 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
892 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
893 ch->ccw[1].count = 2; /* Transfer only length */
895 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
896 ? CH_STATE_RXINIT : CH_STATE_TXINIT);
897 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
898 if (rc != 0) {
899 fsm_deltimer(&ch->timer);
900 fsm_newstate(fi, CH_STATE_SETUPWAIT);
901 ccw_check_return_code(ch, rc, "init IO");
904 * If in compatibility mode since we don´t setup a timer, we
905 * also signal RX channel up immediately. This enables us
906 * to send packets early which in turn usually triggers some
907 * reply from VM TCP which brings up the RX channel to it´s
908 * final state.
910 if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
911 (ch->protocol == CTC_PROTO_S390)) {
912 struct net_device *dev = ch->netdev;
913 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP,
914 dev);
919 * Got initial data, check it. If OK,
920 * notify device statemachine that we are up and
921 * running.
923 * @param fi An instance of a channel statemachine.
924 * @param event The event, just happened.
925 * @param arg Generic pointer, casted from channel * upon call.
927 static void
928 ch_action_rxidle(fsm_instance * fi, int event, void *arg)
930 struct channel *ch = (struct channel *) arg;
931 struct net_device *dev = ch->netdev;
932 __u16 buflen;
933 int rc;
935 DBF_TEXT(trace, 4, __FUNCTION__);
936 fsm_deltimer(&ch->timer);
937 buflen = *((__u16 *) ch->trans_skb->data);
938 #ifdef DEBUG
939 ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
940 #endif
941 if (buflen >= CTC_INITIAL_BLOCKLEN) {
942 if (ctc_checkalloc_buffer(ch, 1))
943 return;
944 ch->ccw[1].count = ch->max_bufsize;
945 fsm_newstate(fi, CH_STATE_RXIDLE);
946 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
947 (unsigned long) ch, 0xff, 0);
948 if (rc != 0) {
949 fsm_newstate(fi, CH_STATE_RXINIT);
950 ccw_check_return_code(ch, rc, "initial RX");
951 } else
952 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
953 DEV_EVENT_RXUP, dev);
954 } else {
955 ctc_pr_debug("%s: Initial RX count %d not %d\n",
956 dev->name, buflen, CTC_INITIAL_BLOCKLEN);
957 ch_action_firstio(fi, event, arg);
962 * Set channel into extended mode.
964 * @param fi An instance of a channel statemachine.
965 * @param event The event, just happened.
966 * @param arg Generic pointer, casted from channel * upon call.
968 static void
969 ch_action_setmode(fsm_instance * fi, int event, void *arg)
971 struct channel *ch = (struct channel *) arg;
972 int rc;
973 unsigned long saveflags;
975 DBF_TEXT(trace, 4, __FUNCTION__);
976 fsm_deltimer(&ch->timer);
977 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
978 fsm_newstate(fi, CH_STATE_SETUPWAIT);
979 saveflags = 0; /* avoids compiler warning with
980 spin_unlock_irqrestore */
981 if (event == CH_EVENT_TIMER) // only for timer not yet locked
982 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
983 rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0);
984 if (event == CH_EVENT_TIMER)
985 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
986 if (rc != 0) {
987 fsm_deltimer(&ch->timer);
988 fsm_newstate(fi, CH_STATE_STARTWAIT);
989 ccw_check_return_code(ch, rc, "set Mode");
990 } else
991 ch->retry = 0;
995 * Setup channel.
997 * @param fi An instance of a channel statemachine.
998 * @param event The event, just happened.
999 * @param arg Generic pointer, casted from channel * upon call.
1001 static void
1002 ch_action_start(fsm_instance * fi, int event, void *arg)
1004 struct channel *ch = (struct channel *) arg;
1005 unsigned long saveflags;
1006 int rc;
1007 struct net_device *dev;
1009 DBF_TEXT(trace, 4, __FUNCTION__);
1010 if (ch == NULL) {
1011 ctc_pr_warn("ch_action_start ch=NULL\n");
1012 return;
1014 if (ch->netdev == NULL) {
1015 ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id);
1016 return;
1018 dev = ch->netdev;
1020 #ifdef DEBUG
1021 ctc_pr_debug("%s: %s channel start\n", dev->name,
1022 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1023 #endif
1025 if (ch->trans_skb != NULL) {
1026 clear_normalized_cda(&ch->ccw[1]);
1027 dev_kfree_skb(ch->trans_skb);
1028 ch->trans_skb = NULL;
1030 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1031 ch->ccw[1].cmd_code = CCW_CMD_READ;
1032 ch->ccw[1].flags = CCW_FLAG_SLI;
1033 ch->ccw[1].count = 0;
1034 } else {
1035 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
1036 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1037 ch->ccw[1].count = 0;
1039 if (ctc_checkalloc_buffer(ch, 0)) {
1040 ctc_pr_notice(
1041 "%s: Could not allocate %s trans_skb, delaying "
1042 "allocation until first transfer\n",
1043 dev->name,
1044 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1047 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
1048 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1049 ch->ccw[0].count = 0;
1050 ch->ccw[0].cda = 0;
1051 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
1052 ch->ccw[2].flags = CCW_FLAG_SLI;
1053 ch->ccw[2].count = 0;
1054 ch->ccw[2].cda = 0;
1055 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3);
1056 ch->ccw[4].cda = 0;
1057 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
1059 fsm_newstate(fi, CH_STATE_STARTWAIT);
1060 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1061 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1062 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1063 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1064 if (rc != 0) {
1065 if (rc != -EBUSY)
1066 fsm_deltimer(&ch->timer);
1067 ccw_check_return_code(ch, rc, "initial HaltIO");
1069 #ifdef DEBUG
1070 ctc_pr_debug("ctc: %s(): leaving\n", __func__);
1071 #endif
1075 * Shutdown a channel.
1077 * @param fi An instance of a channel statemachine.
1078 * @param event The event, just happened.
1079 * @param arg Generic pointer, casted from channel * upon call.
1081 static void
1082 ch_action_haltio(fsm_instance * fi, int event, void *arg)
1084 struct channel *ch = (struct channel *) arg;
1085 unsigned long saveflags;
1086 int rc;
1087 int oldstate;
1089 DBF_TEXT(trace, 3, __FUNCTION__);
1090 fsm_deltimer(&ch->timer);
1091 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1092 saveflags = 0; /* avoids comp warning with
1093 spin_unlock_irqrestore */
1094 if (event == CH_EVENT_STOP) // only for STOP not yet locked
1095 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1096 oldstate = fsm_getstate(fi);
1097 fsm_newstate(fi, CH_STATE_TERM);
1098 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1099 if (event == CH_EVENT_STOP)
1100 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1101 if (rc != 0) {
1102 if (rc != -EBUSY) {
1103 fsm_deltimer(&ch->timer);
1104 fsm_newstate(fi, oldstate);
1106 ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio");
1111 * A channel has successfully been halted.
1112 * Cleanup it's queue and notify interface statemachine.
1114 * @param fi An instance of a channel statemachine.
1115 * @param event The event, just happened.
1116 * @param arg Generic pointer, casted from channel * upon call.
1118 static void
1119 ch_action_stopped(fsm_instance * fi, int event, void *arg)
1121 struct channel *ch = (struct channel *) arg;
1122 struct net_device *dev = ch->netdev;
1124 DBF_TEXT(trace, 3, __FUNCTION__);
1125 fsm_deltimer(&ch->timer);
1126 fsm_newstate(fi, CH_STATE_STOPPED);
1127 if (ch->trans_skb != NULL) {
1128 clear_normalized_cda(&ch->ccw[1]);
1129 dev_kfree_skb(ch->trans_skb);
1130 ch->trans_skb = NULL;
1132 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1133 skb_queue_purge(&ch->io_queue);
1134 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1135 DEV_EVENT_RXDOWN, dev);
1136 } else {
1137 ctc_purge_skb_queue(&ch->io_queue);
1138 spin_lock(&ch->collect_lock);
1139 ctc_purge_skb_queue(&ch->collect_queue);
1140 ch->collect_len = 0;
1141 spin_unlock(&ch->collect_lock);
1142 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1143 DEV_EVENT_TXDOWN, dev);
1148 * A stop command from device statemachine arrived and we are in
1149 * not operational mode. Set state to stopped.
1151 * @param fi An instance of a channel statemachine.
1152 * @param event The event, just happened.
1153 * @param arg Generic pointer, casted from channel * upon call.
1155 static void
1156 ch_action_stop(fsm_instance * fi, int event, void *arg)
1158 fsm_newstate(fi, CH_STATE_STOPPED);
1162 * A machine check for no path, not operational status or gone device has
1163 * happened.
1164 * Cleanup queue and notify interface statemachine.
1166 * @param fi An instance of a channel statemachine.
1167 * @param event The event, just happened.
1168 * @param arg Generic pointer, casted from channel * upon call.
1170 static void
1171 ch_action_fail(fsm_instance * fi, int event, void *arg)
1173 struct channel *ch = (struct channel *) arg;
1174 struct net_device *dev = ch->netdev;
1176 DBF_TEXT(trace, 3, __FUNCTION__);
1177 fsm_deltimer(&ch->timer);
1178 fsm_newstate(fi, CH_STATE_NOTOP);
1179 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1180 skb_queue_purge(&ch->io_queue);
1181 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1182 DEV_EVENT_RXDOWN, dev);
1183 } else {
1184 ctc_purge_skb_queue(&ch->io_queue);
1185 spin_lock(&ch->collect_lock);
1186 ctc_purge_skb_queue(&ch->collect_queue);
1187 ch->collect_len = 0;
1188 spin_unlock(&ch->collect_lock);
1189 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1190 DEV_EVENT_TXDOWN, dev);
1195 * Handle error during setup of channel.
1197 * @param fi An instance of a channel statemachine.
1198 * @param event The event, just happened.
1199 * @param arg Generic pointer, casted from channel * upon call.
1201 static void
1202 ch_action_setuperr(fsm_instance * fi, int event, void *arg)
1204 struct channel *ch = (struct channel *) arg;
1205 struct net_device *dev = ch->netdev;
1207 DBF_TEXT(setup, 3, __FUNCTION__);
1209 * Special case: Got UC_RCRESET on setmode.
1210 * This means that remote side isn't setup. In this case
1211 * simply retry after some 10 secs...
1213 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1214 ((event == CH_EVENT_UC_RCRESET) ||
1215 (event == CH_EVENT_UC_RSRESET))) {
1216 fsm_newstate(fi, CH_STATE_STARTRETRY);
1217 fsm_deltimer(&ch->timer);
1218 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1219 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1220 int rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1221 if (rc != 0)
1222 ccw_check_return_code(
1223 ch, rc, "HaltIO in ch_action_setuperr");
1225 return;
1228 ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n",
1229 dev->name, ch_event_names[event],
1230 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
1231 fsm_getstate_str(fi));
1232 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1233 fsm_newstate(fi, CH_STATE_RXERR);
1234 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1235 DEV_EVENT_RXDOWN, dev);
1236 } else {
1237 fsm_newstate(fi, CH_STATE_TXERR);
1238 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1239 DEV_EVENT_TXDOWN, dev);
1244 * Restart a channel after an error.
1246 * @param fi An instance of a channel statemachine.
1247 * @param event The event, just happened.
1248 * @param arg Generic pointer, casted from channel * upon call.
1250 static void
1251 ch_action_restart(fsm_instance * fi, int event, void *arg)
1253 unsigned long saveflags;
1254 int oldstate;
1255 int rc;
1257 struct channel *ch = (struct channel *) arg;
1258 struct net_device *dev = ch->netdev;
1260 DBF_TEXT(trace, 3, __FUNCTION__);
1261 fsm_deltimer(&ch->timer);
1262 ctc_pr_debug("%s: %s channel restart\n", dev->name,
1263 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1264 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1265 oldstate = fsm_getstate(fi);
1266 fsm_newstate(fi, CH_STATE_STARTWAIT);
1267 saveflags = 0; /* avoids compiler warning with
1268 spin_unlock_irqrestore */
1269 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1270 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1271 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1272 if (event == CH_EVENT_TIMER)
1273 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1274 if (rc != 0) {
1275 if (rc != -EBUSY) {
1276 fsm_deltimer(&ch->timer);
1277 fsm_newstate(fi, oldstate);
1279 ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart");
1284 * Handle error during RX initial handshake (exchange of
1285 * 0-length block header)
1287 * @param fi An instance of a channel statemachine.
1288 * @param event The event, just happened.
1289 * @param arg Generic pointer, casted from channel * upon call.
1291 static void
1292 ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
1294 struct channel *ch = (struct channel *) arg;
1295 struct net_device *dev = ch->netdev;
1297 DBF_TEXT(setup, 3, __FUNCTION__);
1298 if (event == CH_EVENT_TIMER) {
1299 fsm_deltimer(&ch->timer);
1300 ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
1301 if (ch->retry++ < 3)
1302 ch_action_restart(fi, event, arg);
1303 else {
1304 fsm_newstate(fi, CH_STATE_RXERR);
1305 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1306 DEV_EVENT_RXDOWN, dev);
1308 } else
1309 ctc_pr_warn("%s: Error during RX init handshake\n", dev->name);
1313 * Notify device statemachine if we gave up initialization
1314 * of RX channel.
1316 * @param fi An instance of a channel statemachine.
1317 * @param event The event, just happened.
1318 * @param arg Generic pointer, casted from channel * upon call.
1320 static void
1321 ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
1323 struct channel *ch = (struct channel *) arg;
1324 struct net_device *dev = ch->netdev;
1326 DBF_TEXT(setup, 3, __FUNCTION__);
1327 fsm_newstate(fi, CH_STATE_RXERR);
1328 ctc_pr_warn("%s: RX initialization failed\n", dev->name);
1329 ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
1330 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1334 * Handle RX Unit check remote reset (remote disconnected)
1336 * @param fi An instance of a channel statemachine.
1337 * @param event The event, just happened.
1338 * @param arg Generic pointer, casted from channel * upon call.
1340 static void
1341 ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
1343 struct channel *ch = (struct channel *) arg;
1344 struct channel *ch2;
1345 struct net_device *dev = ch->netdev;
1347 DBF_TEXT(trace, 3, __FUNCTION__);
1348 fsm_deltimer(&ch->timer);
1349 ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
1350 dev->name);
1353 * Notify device statemachine
1355 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1356 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
1358 fsm_newstate(fi, CH_STATE_DTERM);
1359 ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE];
1360 fsm_newstate(ch2->fsm, CH_STATE_DTERM);
1362 ccw_device_halt(ch->cdev, (unsigned long) ch);
1363 ccw_device_halt(ch2->cdev, (unsigned long) ch2);
1367 * Handle error during TX channel initialization.
1369 * @param fi An instance of a channel statemachine.
1370 * @param event The event, just happened.
1371 * @param arg Generic pointer, casted from channel * upon call.
1373 static void
1374 ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
1376 struct channel *ch = (struct channel *) arg;
1377 struct net_device *dev = ch->netdev;
1379 DBF_TEXT(setup, 2, __FUNCTION__);
1380 if (event == CH_EVENT_TIMER) {
1381 fsm_deltimer(&ch->timer);
1382 ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
1383 if (ch->retry++ < 3)
1384 ch_action_restart(fi, event, arg);
1385 else {
1386 fsm_newstate(fi, CH_STATE_TXERR);
1387 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1388 DEV_EVENT_TXDOWN, dev);
1390 } else
1391 ctc_pr_warn("%s: Error during TX init handshake\n", dev->name);
1395 * Handle TX timeout by retrying operation.
1397 * @param fi An instance of a channel statemachine.
1398 * @param event The event, just happened.
1399 * @param arg Generic pointer, casted from channel * upon call.
1401 static void
1402 ch_action_txretry(fsm_instance * fi, int event, void *arg)
1404 struct channel *ch = (struct channel *) arg;
1405 struct net_device *dev = ch->netdev;
1406 unsigned long saveflags;
1408 DBF_TEXT(trace, 4, __FUNCTION__);
1409 fsm_deltimer(&ch->timer);
1410 if (ch->retry++ > 3) {
1411 ctc_pr_debug("%s: TX retry failed, restarting channel\n",
1412 dev->name);
1413 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1414 DEV_EVENT_TXDOWN, dev);
1415 ch_action_restart(fi, event, arg);
1416 } else {
1417 struct sk_buff *skb;
1419 ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
1420 if ((skb = skb_peek(&ch->io_queue))) {
1421 int rc = 0;
1423 clear_normalized_cda(&ch->ccw[4]);
1424 ch->ccw[4].count = skb->len;
1425 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1426 ctc_pr_debug(
1427 "%s: IDAL alloc failed, chan restart\n",
1428 dev->name);
1429 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1430 DEV_EVENT_TXDOWN, dev);
1431 ch_action_restart(fi, event, arg);
1432 return;
1434 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1435 saveflags = 0; /* avoids compiler warning with
1436 spin_unlock_irqrestore */
1437 if (event == CH_EVENT_TIMER) // only for TIMER not yet locked
1438 spin_lock_irqsave(get_ccwdev_lock(ch->cdev),
1439 saveflags);
1440 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1441 (unsigned long) ch, 0xff, 0);
1442 if (event == CH_EVENT_TIMER)
1443 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1444 saveflags);
1445 if (rc != 0) {
1446 fsm_deltimer(&ch->timer);
1447 ccw_check_return_code(ch, rc, "TX in ch_action_txretry");
1448 ctc_purge_skb_queue(&ch->io_queue);
1456 * Handle fatal errors during an I/O command.
1458 * @param fi An instance of a channel statemachine.
1459 * @param event The event, just happened.
1460 * @param arg Generic pointer, casted from channel * upon call.
1462 static void
1463 ch_action_iofatal(fsm_instance * fi, int event, void *arg)
1465 struct channel *ch = (struct channel *) arg;
1466 struct net_device *dev = ch->netdev;
1468 DBF_TEXT(trace, 3, __FUNCTION__);
1469 fsm_deltimer(&ch->timer);
1470 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1471 ctc_pr_debug("%s: RX I/O error\n", dev->name);
1472 fsm_newstate(fi, CH_STATE_RXERR);
1473 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1474 DEV_EVENT_RXDOWN, dev);
1475 } else {
1476 ctc_pr_debug("%s: TX I/O error\n", dev->name);
1477 fsm_newstate(fi, CH_STATE_TXERR);
1478 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1479 DEV_EVENT_TXDOWN, dev);
1483 static void
1484 ch_action_reinit(fsm_instance *fi, int event, void *arg)
1486 struct channel *ch = (struct channel *)arg;
1487 struct net_device *dev = ch->netdev;
1488 struct ctc_priv *privptr = dev->priv;
1490 DBF_TEXT(trace, 4, __FUNCTION__);
1491 ch_action_iofatal(fi, event, arg);
1492 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
1496 * The statemachine for a channel.
1498 static const fsm_node ch_fsm[] = {
1499 {CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop },
1500 {CH_STATE_STOPPED, CH_EVENT_START, ch_action_start },
1501 {CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop },
1502 {CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop },
1504 {CH_STATE_NOTOP, CH_EVENT_STOP, ch_action_stop },
1505 {CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop },
1506 {CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop },
1507 {CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop },
1508 {CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ch_action_start },
1510 {CH_STATE_STARTWAIT, CH_EVENT_STOP, ch_action_haltio },
1511 {CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop },
1512 {CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode },
1513 {CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr },
1514 {CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1515 {CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1516 {CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1518 {CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio },
1519 {CH_STATE_STARTRETRY, CH_EVENT_TIMER, ch_action_setmode },
1520 {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop },
1521 {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ch_action_fail },
1523 {CH_STATE_SETUPWAIT, CH_EVENT_STOP, ch_action_haltio },
1524 {CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop },
1525 {CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ch_action_firstio },
1526 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ch_action_setuperr },
1527 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr },
1528 {CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode },
1529 {CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1530 {CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1531 {CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1533 {CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio },
1534 {CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop },
1535 {CH_STATE_RXINIT, CH_EVENT_FINSTAT, ch_action_rxidle },
1536 {CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ch_action_rxiniterr },
1537 {CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ch_action_rxiniterr },
1538 {CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr },
1539 {CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail },
1540 {CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1541 {CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1542 {CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio },
1543 {CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1545 {CH_STATE_RXIDLE, CH_EVENT_STOP, ch_action_haltio },
1546 {CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop },
1547 {CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ch_action_rx },
1548 {CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc },
1549 // {CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry },
1550 {CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1551 {CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1552 {CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1553 {CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx },
1555 {CH_STATE_TXINIT, CH_EVENT_STOP, ch_action_haltio },
1556 {CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop },
1557 {CH_STATE_TXINIT, CH_EVENT_FINSTAT, ch_action_txidle },
1558 {CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ch_action_txiniterr },
1559 {CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr },
1560 {CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr },
1561 {CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1562 {CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1563 {CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1565 {CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio },
1566 {CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop },
1567 {CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ch_action_firstio },
1568 {CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop },
1569 {CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop },
1570 {CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1571 {CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1572 {CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1574 {CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop },
1575 {CH_STATE_TERM, CH_EVENT_START, ch_action_restart },
1576 {CH_STATE_TERM, CH_EVENT_FINSTAT, ch_action_stopped },
1577 {CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1578 {CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1579 {CH_STATE_TERM, CH_EVENT_MC_FAIL, ch_action_fail },
1581 {CH_STATE_DTERM, CH_EVENT_STOP, ch_action_haltio },
1582 {CH_STATE_DTERM, CH_EVENT_START, ch_action_restart },
1583 {CH_STATE_DTERM, CH_EVENT_FINSTAT, ch_action_setmode },
1584 {CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1585 {CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1586 {CH_STATE_DTERM, CH_EVENT_MC_FAIL, ch_action_fail },
1588 {CH_STATE_TX, CH_EVENT_STOP, ch_action_haltio },
1589 {CH_STATE_TX, CH_EVENT_START, fsm_action_nop },
1590 {CH_STATE_TX, CH_EVENT_FINSTAT, ch_action_txdone },
1591 {CH_STATE_TX, CH_EVENT_UC_RCRESET, ch_action_txretry },
1592 {CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry },
1593 {CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry },
1594 {CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1595 {CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit },
1596 {CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail },
1598 {CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio },
1599 {CH_STATE_TXERR, CH_EVENT_STOP, ch_action_haltio },
1600 {CH_STATE_TXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1601 {CH_STATE_RXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1604 static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
1607 * Functions related to setup and device detection.
1608 *****************************************************************************/
1610 static inline int
1611 less_than(char *id1, char *id2)
1613 int dev1, dev2, i;
1615 for (i = 0; i < 5; i++) {
1616 id1++;
1617 id2++;
1619 dev1 = simple_strtoul(id1, &id1, 16);
1620 dev2 = simple_strtoul(id2, &id2, 16);
1622 return (dev1 < dev2);
1626 * Add a new channel to the list of channels.
1627 * Keeps the channel list sorted.
1629 * @param cdev The ccw_device to be added.
1630 * @param type The type class of the new channel.
1632 * @return 0 on success, !0 on error.
1634 static int
1635 add_channel(struct ccw_device *cdev, enum channel_types type)
1637 struct channel **c = &channels;
1638 struct channel *ch;
1640 DBF_TEXT(trace, 2, __FUNCTION__);
1641 if ((ch =
1642 (struct channel *) kmalloc(sizeof (struct channel),
1643 GFP_KERNEL)) == NULL) {
1644 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1645 return -1;
1647 memset(ch, 0, sizeof (struct channel));
1648 if ((ch->ccw = kmalloc(8*sizeof(struct ccw1),
1649 GFP_KERNEL | GFP_DMA)) == NULL) {
1650 kfree(ch);
1651 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1652 return -1;
1655 memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset
1658 * "static" ccws are used in the following way:
1660 * ccw[0..2] (Channel program for generic I/O):
1661 * 0: prepare
1662 * 1: read or write (depending on direction) with fixed
1663 * buffer (idal allocated once when buffer is allocated)
1664 * 2: nop
1665 * ccw[3..5] (Channel program for direct write of packets)
1666 * 3: prepare
1667 * 4: write (idal allocated on every write).
1668 * 5: nop
1669 * ccw[6..7] (Channel program for initial channel setup):
1670 * 6: set extended mode
1671 * 7: nop
1673 * ch->ccw[0..5] are initialized in ch_action_start because
1674 * the channel's direction is yet unknown here.
1676 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1677 ch->ccw[6].flags = CCW_FLAG_SLI;
1679 ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1680 ch->ccw[7].flags = CCW_FLAG_SLI;
1682 ch->cdev = cdev;
1683 snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
1684 ch->type = type;
1685 ch->fsm = init_fsm(ch->id, ch_state_names,
1686 ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
1687 ch_fsm, CH_FSM_LEN, GFP_KERNEL);
1688 if (ch->fsm == NULL) {
1689 ctc_pr_warn("ctc: Could not create FSM in add_channel\n");
1690 kfree(ch->ccw);
1691 kfree(ch);
1692 return -1;
1694 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1695 if ((ch->irb = kmalloc(sizeof (struct irb),
1696 GFP_KERNEL)) == NULL) {
1697 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1698 kfree_fsm(ch->fsm);
1699 kfree(ch->ccw);
1700 kfree(ch);
1701 return -1;
1703 memset(ch->irb, 0, sizeof (struct irb));
1704 while (*c && less_than((*c)->id, ch->id))
1705 c = &(*c)->next;
1706 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
1707 ctc_pr_debug(
1708 "ctc: add_channel: device %s already in list, "
1709 "using old entry\n", (*c)->id);
1710 kfree(ch->irb);
1711 kfree_fsm(ch->fsm);
1712 kfree(ch->ccw);
1713 kfree(ch);
1714 return 0;
1717 spin_lock_init(&ch->collect_lock);
1719 fsm_settimer(ch->fsm, &ch->timer);
1720 skb_queue_head_init(&ch->io_queue);
1721 skb_queue_head_init(&ch->collect_queue);
1722 ch->next = *c;
1723 *c = ch;
1724 return 0;
1728 * Release a specific channel in the channel list.
1730 * @param ch Pointer to channel struct to be released.
1732 static void
1733 channel_free(struct channel *ch)
1735 ch->flags &= ~CHANNEL_FLAGS_INUSE;
1736 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1740 * Remove a specific channel in the channel list.
1742 * @param ch Pointer to channel struct to be released.
1744 static void
1745 channel_remove(struct channel *ch)
1747 struct channel **c = &channels;
1749 DBF_TEXT(trace, 2, __FUNCTION__);
1750 if (ch == NULL)
1751 return;
1753 channel_free(ch);
1754 while (*c) {
1755 if (*c == ch) {
1756 *c = ch->next;
1757 fsm_deltimer(&ch->timer);
1758 kfree_fsm(ch->fsm);
1759 clear_normalized_cda(&ch->ccw[4]);
1760 if (ch->trans_skb != NULL) {
1761 clear_normalized_cda(&ch->ccw[1]);
1762 dev_kfree_skb(ch->trans_skb);
1764 kfree(ch->ccw);
1765 kfree(ch->irb);
1766 kfree(ch);
1767 return;
1769 c = &((*c)->next);
1774 * Get a specific channel from the channel list.
1776 * @param type Type of channel we are interested in.
1777 * @param id Id of channel we are interested in.
1778 * @param direction Direction we want to use this channel for.
1780 * @return Pointer to a channel or NULL if no matching channel available.
1782 static struct channel
1784 channel_get(enum channel_types type, char *id, int direction)
1786 struct channel *ch = channels;
1788 DBF_TEXT(trace, 3, __FUNCTION__);
1789 #ifdef DEBUG
1790 ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
1791 __func__, id, type);
1792 #endif
1794 while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) {
1795 #ifdef DEBUG
1796 ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n",
1797 __func__, ch, ch->id, ch->type);
1798 #endif
1799 ch = ch->next;
1801 #ifdef DEBUG
1802 ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n",
1803 __func__, ch, ch->id, ch->type);
1804 #endif
1805 if (!ch) {
1806 ctc_pr_warn("ctc: %s(): channel with id %s "
1807 "and type %d not found in channel list\n",
1808 __func__, id, type);
1809 } else {
1810 if (ch->flags & CHANNEL_FLAGS_INUSE)
1811 ch = NULL;
1812 else {
1813 ch->flags |= CHANNEL_FLAGS_INUSE;
1814 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
1815 ch->flags |= (direction == WRITE)
1816 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
1817 fsm_newstate(ch->fsm, CH_STATE_STOPPED);
1820 return ch;
1824 * Return the channel type by name.
1826 * @param name Name of network interface.
1828 * @return Type class of channel to be used for that interface.
1830 static enum channel_types inline
1831 extract_channel_media(char *name)
1833 enum channel_types ret = channel_type_unknown;
1835 if (name != NULL) {
1836 if (strncmp(name, "ctc", 3) == 0)
1837 ret = channel_type_parallel;
1838 if (strncmp(name, "escon", 5) == 0)
1839 ret = channel_type_escon;
1841 return ret;
1844 static long
1845 __ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb)
1847 if (!IS_ERR(irb))
1848 return 0;
1850 switch (PTR_ERR(irb)) {
1851 case -EIO:
1852 ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
1853 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1854 // CTC_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1855 break;
1856 case -ETIMEDOUT:
1857 ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
1858 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1859 // CTC_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
1860 break;
1861 default:
1862 ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb),
1863 cdev->dev.bus_id);
1864 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1865 // CTC_DBF_TEXT(trace, 2, " rc???");
1867 return PTR_ERR(irb);
1871 * Main IRQ handler.
1873 * @param cdev The ccw_device the interrupt is for.
1874 * @param intparm interruption parameter.
1875 * @param irb interruption response block.
1877 static void
1878 ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1880 struct channel *ch;
1881 struct net_device *dev;
1882 struct ctc_priv *priv;
1884 DBF_TEXT(trace, 5, __FUNCTION__);
1885 if (__ctc_check_irb_error(cdev, irb))
1886 return;
1888 /* Check for unsolicited interrupts. */
1889 if (!cdev->dev.driver_data) {
1890 ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n",
1891 cdev->dev.bus_id, irb->scsw.cstat,
1892 irb->scsw.dstat);
1893 return;
1896 priv = ((struct ccwgroup_device *)cdev->dev.driver_data)
1897 ->dev.driver_data;
1899 /* Try to extract channel from driver data. */
1900 if (priv->channel[READ]->cdev == cdev)
1901 ch = priv->channel[READ];
1902 else if (priv->channel[WRITE]->cdev == cdev)
1903 ch = priv->channel[WRITE];
1904 else {
1905 ctc_pr_err("ctc: Can't determine channel for interrupt, "
1906 "device %s\n", cdev->dev.bus_id);
1907 return;
1910 dev = (struct net_device *) (ch->netdev);
1911 if (dev == NULL) {
1912 ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n",
1913 cdev->dev.bus_id, ch);
1914 return;
1917 #ifdef DEBUG
1918 ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n",
1919 dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat);
1920 #endif
1922 /* Copy interruption response block. */
1923 memcpy(ch->irb, irb, sizeof(struct irb));
1925 /* Check for good subchannel return code, otherwise error message */
1926 if (ch->irb->scsw.cstat) {
1927 fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
1928 ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n",
1929 dev->name, ch->id, ch->irb->scsw.cstat,
1930 ch->irb->scsw.dstat);
1931 return;
1934 /* Check the reason-code of a unit check */
1935 if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
1936 ccw_unit_check(ch, ch->irb->ecw[0]);
1937 return;
1939 if (ch->irb->scsw.dstat & DEV_STAT_BUSY) {
1940 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION)
1941 fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
1942 else
1943 fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
1944 return;
1946 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) {
1947 fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
1948 return;
1950 if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
1951 (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
1952 (ch->irb->scsw.stctl ==
1953 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
1954 fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
1955 else
1956 fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
1961 * Actions for interface - statemachine.
1962 *****************************************************************************/
1965 * Startup channels by sending CH_EVENT_START to each channel.
1967 * @param fi An instance of an interface statemachine.
1968 * @param event The event, just happened.
1969 * @param arg Generic pointer, casted from struct net_device * upon call.
1971 static void
1972 dev_action_start(fsm_instance * fi, int event, void *arg)
1974 struct net_device *dev = (struct net_device *) arg;
1975 struct ctc_priv *privptr = dev->priv;
1976 int direction;
1978 DBF_TEXT(setup, 3, __FUNCTION__);
1979 fsm_deltimer(&privptr->restart_timer);
1980 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
1981 for (direction = READ; direction <= WRITE; direction++) {
1982 struct channel *ch = privptr->channel[direction];
1983 fsm_event(ch->fsm, CH_EVENT_START, ch);
1988 * Shutdown channels by sending CH_EVENT_STOP to each channel.
1990 * @param fi An instance of an interface statemachine.
1991 * @param event The event, just happened.
1992 * @param arg Generic pointer, casted from struct net_device * upon call.
1994 static void
1995 dev_action_stop(fsm_instance * fi, int event, void *arg)
1997 struct net_device *dev = (struct net_device *) arg;
1998 struct ctc_priv *privptr = dev->priv;
1999 int direction;
2001 DBF_TEXT(trace, 3, __FUNCTION__);
2002 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2003 for (direction = READ; direction <= WRITE; direction++) {
2004 struct channel *ch = privptr->channel[direction];
2005 fsm_event(ch->fsm, CH_EVENT_STOP, ch);
2008 static void
2009 dev_action_restart(fsm_instance *fi, int event, void *arg)
2011 struct net_device *dev = (struct net_device *)arg;
2012 struct ctc_priv *privptr = dev->priv;
2014 DBF_TEXT(trace, 3, __FUNCTION__);
2015 ctc_pr_debug("%s: Restarting\n", dev->name);
2016 dev_action_stop(fi, event, arg);
2017 fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
2018 fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC,
2019 DEV_EVENT_START, dev);
2023 * Called from channel statemachine
2024 * when a channel is up and running.
2026 * @param fi An instance of an interface statemachine.
2027 * @param event The event, just happened.
2028 * @param arg Generic pointer, casted from struct net_device * upon call.
2030 static void
2031 dev_action_chup(fsm_instance * fi, int event, void *arg)
2033 struct net_device *dev = (struct net_device *) arg;
2035 DBF_TEXT(trace, 3, __FUNCTION__);
2036 switch (fsm_getstate(fi)) {
2037 case DEV_STATE_STARTWAIT_RXTX:
2038 if (event == DEV_EVENT_RXUP)
2039 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2040 else
2041 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2042 break;
2043 case DEV_STATE_STARTWAIT_RX:
2044 if (event == DEV_EVENT_RXUP) {
2045 fsm_newstate(fi, DEV_STATE_RUNNING);
2046 ctc_pr_info("%s: connected with remote side\n",
2047 dev->name);
2048 ctc_clear_busy(dev);
2050 break;
2051 case DEV_STATE_STARTWAIT_TX:
2052 if (event == DEV_EVENT_TXUP) {
2053 fsm_newstate(fi, DEV_STATE_RUNNING);
2054 ctc_pr_info("%s: connected with remote side\n",
2055 dev->name);
2056 ctc_clear_busy(dev);
2058 break;
2059 case DEV_STATE_STOPWAIT_TX:
2060 if (event == DEV_EVENT_RXUP)
2061 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2062 break;
2063 case DEV_STATE_STOPWAIT_RX:
2064 if (event == DEV_EVENT_TXUP)
2065 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2066 break;
2071 * Called from channel statemachine
2072 * when a channel has been shutdown.
2074 * @param fi An instance of an interface statemachine.
2075 * @param event The event, just happened.
2076 * @param arg Generic pointer, casted from struct net_device * upon call.
2078 static void
2079 dev_action_chdown(fsm_instance * fi, int event, void *arg)
2082 DBF_TEXT(trace, 3, __FUNCTION__);
2083 switch (fsm_getstate(fi)) {
2084 case DEV_STATE_RUNNING:
2085 if (event == DEV_EVENT_TXDOWN)
2086 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2087 else
2088 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2089 break;
2090 case DEV_STATE_STARTWAIT_RX:
2091 if (event == DEV_EVENT_TXDOWN)
2092 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2093 break;
2094 case DEV_STATE_STARTWAIT_TX:
2095 if (event == DEV_EVENT_RXDOWN)
2096 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2097 break;
2098 case DEV_STATE_STOPWAIT_RXTX:
2099 if (event == DEV_EVENT_TXDOWN)
2100 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2101 else
2102 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2103 break;
2104 case DEV_STATE_STOPWAIT_RX:
2105 if (event == DEV_EVENT_RXDOWN)
2106 fsm_newstate(fi, DEV_STATE_STOPPED);
2107 break;
2108 case DEV_STATE_STOPWAIT_TX:
2109 if (event == DEV_EVENT_TXDOWN)
2110 fsm_newstate(fi, DEV_STATE_STOPPED);
2111 break;
2115 static const fsm_node dev_fsm[] = {
2116 {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
2118 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2119 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2120 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2121 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2123 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2124 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2125 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2126 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2127 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2129 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2130 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2131 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2132 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2133 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2135 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2136 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2137 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2138 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2139 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2140 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2142 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2143 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2144 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2145 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2146 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2148 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2149 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2150 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2151 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2152 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2154 {DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2155 {DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2156 {DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2157 {DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop },
2158 {DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop },
2159 {DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2162 static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node);
2165 * Transmit a packet.
2166 * This is a helper function for ctc_tx().
2168 * @param ch Channel to be used for sending.
2169 * @param skb Pointer to struct sk_buff of packet to send.
2170 * The linklevel header has already been set up
2171 * by ctc_tx().
2173 * @return 0 on success, -ERRNO on failure. (Never fails.)
2175 static int
2176 transmit_skb(struct channel *ch, struct sk_buff *skb)
2178 unsigned long saveflags;
2179 struct ll_header header;
2180 int rc = 0;
2182 DBF_TEXT(trace, 5, __FUNCTION__);
2183 /* we need to acquire the lock for testing the state
2184 * otherwise we can have an IRQ changing the state to
2185 * TXIDLE after the test but before acquiring the lock.
2187 spin_lock_irqsave(&ch->collect_lock, saveflags);
2188 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
2189 int l = skb->len + LL_HEADER_LENGTH;
2191 if (ch->collect_len + l > ch->max_bufsize - 2) {
2192 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2193 return -EBUSY;
2194 } else {
2195 atomic_inc(&skb->users);
2196 header.length = l;
2197 header.type = skb->protocol;
2198 header.unused = 0;
2199 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2200 LL_HEADER_LENGTH);
2201 skb_queue_tail(&ch->collect_queue, skb);
2202 ch->collect_len += l;
2204 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2205 } else {
2206 __u16 block_len;
2207 int ccw_idx;
2208 struct sk_buff *nskb;
2209 unsigned long hi;
2210 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2212 * Protect skb against beeing free'd by upper
2213 * layers.
2215 atomic_inc(&skb->users);
2216 ch->prof.txlen += skb->len;
2217 header.length = skb->len + LL_HEADER_LENGTH;
2218 header.type = skb->protocol;
2219 header.unused = 0;
2220 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2221 LL_HEADER_LENGTH);
2222 block_len = skb->len + 2;
2223 *((__u16 *) skb_push(skb, 2)) = block_len;
2226 * IDAL support in CTC is broken, so we have to
2227 * care about skb's above 2G ourselves.
2229 hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31;
2230 if (hi) {
2231 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
2232 if (!nskb) {
2233 atomic_dec(&skb->users);
2234 skb_pull(skb, LL_HEADER_LENGTH + 2);
2235 ctc_clear_busy(ch->netdev);
2236 return -ENOMEM;
2237 } else {
2238 memcpy(skb_put(nskb, skb->len),
2239 skb->data, skb->len);
2240 atomic_inc(&nskb->users);
2241 atomic_dec(&skb->users);
2242 dev_kfree_skb_irq(skb);
2243 skb = nskb;
2247 ch->ccw[4].count = block_len;
2248 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
2250 * idal allocation failed, try via copying to
2251 * trans_skb. trans_skb usually has a pre-allocated
2252 * idal.
2254 if (ctc_checkalloc_buffer(ch, 1)) {
2256 * Remove our header. It gets added
2257 * again on retransmit.
2259 atomic_dec(&skb->users);
2260 skb_pull(skb, LL_HEADER_LENGTH + 2);
2261 ctc_clear_busy(ch->netdev);
2262 return -EBUSY;
2265 ch->trans_skb->tail = ch->trans_skb->data;
2266 ch->trans_skb->len = 0;
2267 ch->ccw[1].count = skb->len;
2268 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
2269 skb->len);
2270 atomic_dec(&skb->users);
2271 dev_kfree_skb_irq(skb);
2272 ccw_idx = 0;
2273 } else {
2274 skb_queue_tail(&ch->io_queue, skb);
2275 ccw_idx = 3;
2277 ch->retry = 0;
2278 fsm_newstate(ch->fsm, CH_STATE_TX);
2279 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
2280 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
2281 ch->prof.send_stamp = xtime;
2282 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
2283 (unsigned long) ch, 0xff, 0);
2284 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
2285 if (ccw_idx == 3)
2286 ch->prof.doios_single++;
2287 if (rc != 0) {
2288 fsm_deltimer(&ch->timer);
2289 ccw_check_return_code(ch, rc, "single skb TX");
2290 if (ccw_idx == 3)
2291 skb_dequeue_tail(&ch->io_queue);
2293 * Remove our header. It gets added
2294 * again on retransmit.
2296 skb_pull(skb, LL_HEADER_LENGTH + 2);
2297 } else {
2298 if (ccw_idx == 0) {
2299 struct net_device *dev = ch->netdev;
2300 struct ctc_priv *privptr = dev->priv;
2301 privptr->stats.tx_packets++;
2302 privptr->stats.tx_bytes +=
2303 skb->len - LL_HEADER_LENGTH;
2308 ctc_clear_busy(ch->netdev);
2309 return rc;
2313 * Interface API for upper network layers
2314 *****************************************************************************/
2317 * Open an interface.
2318 * Called from generic network layer when ifconfig up is run.
2320 * @param dev Pointer to interface struct.
2322 * @return 0 on success, -ERRNO on failure. (Never fails.)
2324 static int
2325 ctc_open(struct net_device * dev)
2327 DBF_TEXT(trace, 5, __FUNCTION__);
2328 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
2329 return 0;
2333 * Close an interface.
2334 * Called from generic network layer when ifconfig down is run.
2336 * @param dev Pointer to interface struct.
2338 * @return 0 on success, -ERRNO on failure. (Never fails.)
2340 static int
2341 ctc_close(struct net_device * dev)
2343 DBF_TEXT(trace, 5, __FUNCTION__);
2344 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
2345 return 0;
2349 * Start transmission of a packet.
2350 * Called from generic network device layer.
2352 * @param skb Pointer to buffer containing the packet.
2353 * @param dev Pointer to interface struct.
2355 * @return 0 if packet consumed, !0 if packet rejected.
2356 * Note: If we return !0, then the packet is free'd by
2357 * the generic network layer.
2359 static int
2360 ctc_tx(struct sk_buff *skb, struct net_device * dev)
2362 int rc = 0;
2363 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2365 DBF_TEXT(trace, 5, __FUNCTION__);
2367 * Some sanity checks ...
2369 if (skb == NULL) {
2370 ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name);
2371 privptr->stats.tx_dropped++;
2372 return 0;
2374 if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
2375 ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
2376 dev->name, LL_HEADER_LENGTH + 2);
2377 dev_kfree_skb(skb);
2378 privptr->stats.tx_dropped++;
2379 return 0;
2383 * If channels are not running, try to restart them
2384 * and throw away packet.
2386 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
2387 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
2388 dev_kfree_skb(skb);
2389 privptr->stats.tx_dropped++;
2390 privptr->stats.tx_errors++;
2391 privptr->stats.tx_carrier_errors++;
2392 return 0;
2395 if (ctc_test_and_set_busy(dev))
2396 return -EBUSY;
2398 dev->trans_start = jiffies;
2399 if (transmit_skb(privptr->channel[WRITE], skb) != 0)
2400 rc = 1;
2401 return rc;
2405 * Sets MTU of an interface.
2407 * @param dev Pointer to interface struct.
2408 * @param new_mtu The new MTU to use for this interface.
2410 * @return 0 on success, -EINVAL if MTU is out of valid range.
2411 * (valid range is 576 .. 65527). If VM is on the
2412 * remote side, maximum MTU is 32760, however this is
2413 * <em>not</em> checked here.
2415 static int
2416 ctc_change_mtu(struct net_device * dev, int new_mtu)
2418 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2420 DBF_TEXT(trace, 3, __FUNCTION__);
2421 if ((new_mtu < 576) || (new_mtu > 65527) ||
2422 (new_mtu > (privptr->channel[READ]->max_bufsize -
2423 LL_HEADER_LENGTH - 2)))
2424 return -EINVAL;
2425 dev->mtu = new_mtu;
2426 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2427 return 0;
2431 * Returns interface statistics of a device.
2433 * @param dev Pointer to interface struct.
2435 * @return Pointer to stats struct of this interface.
2437 static struct net_device_stats *
2438 ctc_stats(struct net_device * dev)
2440 return &((struct ctc_priv *) dev->priv)->stats;
2444 * sysfs attributes
2447 static ssize_t
2448 buffer_show(struct device *dev, struct device_attribute *attr, char *buf)
2450 struct ctc_priv *priv;
2452 priv = dev->driver_data;
2453 if (!priv)
2454 return -ENODEV;
2455 return sprintf(buf, "%d\n",
2456 priv->buffer_size);
2459 static ssize_t
2460 buffer_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2462 struct ctc_priv *priv;
2463 struct net_device *ndev;
2464 int bs1;
2465 char buffer[16];
2467 DBF_TEXT(trace, 3, __FUNCTION__);
2468 DBF_TEXT(trace, 3, buf);
2469 priv = dev->driver_data;
2470 if (!priv) {
2471 DBF_TEXT(trace, 3, "bfnopriv");
2472 return -ENODEV;
2475 sscanf(buf, "%u", &bs1);
2476 if (bs1 > CTC_BUFSIZE_LIMIT)
2477 goto einval;
2478 if (bs1 < (576 + LL_HEADER_LENGTH + 2))
2479 goto einval;
2480 priv->buffer_size = bs1; // just to overwrite the default
2482 ndev = priv->channel[READ]->netdev;
2483 if (!ndev) {
2484 DBF_TEXT(trace, 3, "bfnondev");
2485 return -ENODEV;
2488 if ((ndev->flags & IFF_RUNNING) &&
2489 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
2490 goto einval;
2492 priv->channel[READ]->max_bufsize = bs1;
2493 priv->channel[WRITE]->max_bufsize = bs1;
2494 if (!(ndev->flags & IFF_RUNNING))
2495 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
2496 priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2497 priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2499 sprintf(buffer, "%d",priv->buffer_size);
2500 DBF_TEXT(trace, 3, buffer);
2501 return count;
2503 einval:
2504 DBF_TEXT(trace, 3, "buff_err");
2505 return -EINVAL;
2508 static ssize_t
2509 loglevel_show(struct device *dev, struct device_attribute *attr, char *buf)
2511 return sprintf(buf, "%d\n", loglevel);
2514 static ssize_t
2515 loglevel_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2517 int ll1;
2519 DBF_TEXT(trace, 5, __FUNCTION__);
2520 sscanf(buf, "%i", &ll1);
2522 if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
2523 return -EINVAL;
2524 loglevel = ll1;
2525 return count;
2528 static void
2529 ctc_print_statistics(struct ctc_priv *priv)
2531 char *sbuf;
2532 char *p;
2534 DBF_TEXT(trace, 4, __FUNCTION__);
2535 if (!priv)
2536 return;
2537 sbuf = kmalloc(2048, GFP_KERNEL);
2538 if (sbuf == NULL)
2539 return;
2540 p = sbuf;
2542 p += sprintf(p, " Device FSM state: %s\n",
2543 fsm_getstate_str(priv->fsm));
2544 p += sprintf(p, " RX channel FSM state: %s\n",
2545 fsm_getstate_str(priv->channel[READ]->fsm));
2546 p += sprintf(p, " TX channel FSM state: %s\n",
2547 fsm_getstate_str(priv->channel[WRITE]->fsm));
2548 p += sprintf(p, " Max. TX buffer used: %ld\n",
2549 priv->channel[WRITE]->prof.maxmulti);
2550 p += sprintf(p, " Max. chained SKBs: %ld\n",
2551 priv->channel[WRITE]->prof.maxcqueue);
2552 p += sprintf(p, " TX single write ops: %ld\n",
2553 priv->channel[WRITE]->prof.doios_single);
2554 p += sprintf(p, " TX multi write ops: %ld\n",
2555 priv->channel[WRITE]->prof.doios_multi);
2556 p += sprintf(p, " Netto bytes written: %ld\n",
2557 priv->channel[WRITE]->prof.txlen);
2558 p += sprintf(p, " Max. TX IO-time: %ld\n",
2559 priv->channel[WRITE]->prof.tx_time);
2561 ctc_pr_debug("Statistics for %s:\n%s",
2562 priv->channel[WRITE]->netdev->name, sbuf);
2563 kfree(sbuf);
2564 return;
2567 static ssize_t
2568 stats_show(struct device *dev, struct device_attribute *attr, char *buf)
2570 struct ctc_priv *priv = dev->driver_data;
2571 if (!priv)
2572 return -ENODEV;
2573 ctc_print_statistics(priv);
2574 return sprintf(buf, "0\n");
2577 static ssize_t
2578 stats_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2580 struct ctc_priv *priv = dev->driver_data;
2581 if (!priv)
2582 return -ENODEV;
2583 /* Reset statistics */
2584 memset(&priv->channel[WRITE]->prof, 0,
2585 sizeof(priv->channel[WRITE]->prof));
2586 return count;
2589 static void
2590 ctc_netdev_unregister(struct net_device * dev)
2592 struct ctc_priv *privptr;
2594 if (!dev)
2595 return;
2596 privptr = (struct ctc_priv *) dev->priv;
2597 unregister_netdev(dev);
2600 static int
2601 ctc_netdev_register(struct net_device * dev)
2603 return register_netdev(dev);
2606 static void
2607 ctc_free_netdevice(struct net_device * dev, int free_dev)
2609 struct ctc_priv *privptr;
2610 if (!dev)
2611 return;
2612 privptr = dev->priv;
2613 if (privptr) {
2614 if (privptr->fsm)
2615 kfree_fsm(privptr->fsm);
2616 kfree(privptr);
2618 #ifdef MODULE
2619 if (free_dev)
2620 free_netdev(dev);
2621 #endif
2624 static ssize_t
2625 ctc_proto_show(struct device *dev, struct device_attribute *attr, char *buf)
2627 struct ctc_priv *priv;
2629 priv = dev->driver_data;
2630 if (!priv)
2631 return -ENODEV;
2633 return sprintf(buf, "%d\n", priv->protocol);
2636 static ssize_t
2637 ctc_proto_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2639 struct ctc_priv *priv;
2640 int value;
2642 DBF_TEXT(trace, 3, __FUNCTION__);
2643 pr_debug("%s() called\n", __FUNCTION__);
2645 priv = dev->driver_data;
2646 if (!priv)
2647 return -ENODEV;
2648 sscanf(buf, "%u", &value);
2649 if (!((value == CTC_PROTO_S390) ||
2650 (value == CTC_PROTO_LINUX) ||
2651 (value == CTC_PROTO_OS390)))
2652 return -EINVAL;
2653 priv->protocol = value;
2655 return count;
2658 static ssize_t
2659 ctc_type_show(struct device *dev, struct device_attribute *attr, char *buf)
2661 struct ccwgroup_device *cgdev;
2663 cgdev = to_ccwgroupdev(dev);
2664 if (!cgdev)
2665 return -ENODEV;
2667 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
2670 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
2671 static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
2672 static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
2674 static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
2675 static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
2677 static struct attribute *ctc_attr[] = {
2678 &dev_attr_protocol.attr,
2679 &dev_attr_type.attr,
2680 &dev_attr_buffer.attr,
2681 NULL,
2684 static struct attribute_group ctc_attr_group = {
2685 .attrs = ctc_attr,
2688 static int
2689 ctc_add_attributes(struct device *dev)
2691 int rc;
2693 rc = device_create_file(dev, &dev_attr_loglevel);
2694 if (rc)
2695 goto out;
2696 rc = device_create_file(dev, &dev_attr_stats);
2697 if (!rc)
2698 goto out;
2699 device_remove_file(dev, &dev_attr_loglevel);
2700 out:
2701 return rc;
2704 static void
2705 ctc_remove_attributes(struct device *dev)
2707 device_remove_file(dev, &dev_attr_stats);
2708 device_remove_file(dev, &dev_attr_loglevel);
2711 static int
2712 ctc_add_files(struct device *dev)
2714 pr_debug("%s() called\n", __FUNCTION__);
2716 return sysfs_create_group(&dev->kobj, &ctc_attr_group);
2719 static void
2720 ctc_remove_files(struct device *dev)
2722 pr_debug("%s() called\n", __FUNCTION__);
2724 sysfs_remove_group(&dev->kobj, &ctc_attr_group);
2728 * Add ctc specific attributes.
2729 * Add ctc private data.
2731 * @param cgdev pointer to ccwgroup_device just added
2733 * @returns 0 on success, !0 on failure.
2735 static int
2736 ctc_probe_device(struct ccwgroup_device *cgdev)
2738 struct ctc_priv *priv;
2739 int rc;
2740 char buffer[16];
2742 pr_debug("%s() called\n", __FUNCTION__);
2743 DBF_TEXT(setup, 3, __FUNCTION__);
2745 if (!get_device(&cgdev->dev))
2746 return -ENODEV;
2748 priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL);
2749 if (!priv) {
2750 ctc_pr_err("%s: Out of memory\n", __func__);
2751 put_device(&cgdev->dev);
2752 return -ENOMEM;
2755 memset(priv, 0, sizeof (struct ctc_priv));
2756 rc = ctc_add_files(&cgdev->dev);
2757 if (rc) {
2758 kfree(priv);
2759 put_device(&cgdev->dev);
2760 return rc;
2762 priv->buffer_size = CTC_BUFSIZE_DEFAULT;
2763 cgdev->cdev[0]->handler = ctc_irq_handler;
2764 cgdev->cdev[1]->handler = ctc_irq_handler;
2765 cgdev->dev.driver_data = priv;
2767 sprintf(buffer, "%p", priv);
2768 DBF_TEXT(data, 3, buffer);
2770 sprintf(buffer, "%u", (unsigned int)sizeof(struct ctc_priv));
2771 DBF_TEXT(data, 3, buffer);
2773 sprintf(buffer, "%p", &channels);
2774 DBF_TEXT(data, 3, buffer);
2776 sprintf(buffer, "%u", (unsigned int)sizeof(struct channel));
2777 DBF_TEXT(data, 3, buffer);
2779 return 0;
2783 * Initialize everything of the net device except the name and the
2784 * channel structs.
2786 static struct net_device *
2787 ctc_init_netdevice(struct net_device * dev, int alloc_device,
2788 struct ctc_priv *privptr)
2790 if (!privptr)
2791 return NULL;
2793 DBF_TEXT(setup, 3, __FUNCTION__);
2795 if (alloc_device) {
2796 dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
2797 if (!dev)
2798 return NULL;
2799 memset(dev, 0, sizeof (struct net_device));
2802 dev->priv = privptr;
2803 privptr->fsm = init_fsm("ctcdev", dev_state_names,
2804 dev_event_names, CTC_NR_DEV_STATES, CTC_NR_DEV_EVENTS,
2805 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2806 if (privptr->fsm == NULL) {
2807 if (alloc_device)
2808 kfree(dev);
2809 return NULL;
2811 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2812 fsm_settimer(privptr->fsm, &privptr->restart_timer);
2813 if (dev->mtu == 0)
2814 dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
2815 dev->hard_start_xmit = ctc_tx;
2816 dev->open = ctc_open;
2817 dev->stop = ctc_close;
2818 dev->get_stats = ctc_stats;
2819 dev->change_mtu = ctc_change_mtu;
2820 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2821 dev->addr_len = 0;
2822 dev->type = ARPHRD_SLIP;
2823 dev->tx_queue_len = 100;
2824 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2825 SET_MODULE_OWNER(dev);
2826 return dev;
2832 * Setup an interface.
2834 * @param cgdev Device to be setup.
2836 * @returns 0 on success, !0 on failure.
2838 static int
2839 ctc_new_device(struct ccwgroup_device *cgdev)
2841 char read_id[CTC_ID_SIZE];
2842 char write_id[CTC_ID_SIZE];
2843 int direction;
2844 enum channel_types type;
2845 struct ctc_priv *privptr;
2846 struct net_device *dev;
2847 int ret;
2848 char buffer[16];
2850 pr_debug("%s() called\n", __FUNCTION__);
2851 DBF_TEXT(setup, 3, __FUNCTION__);
2853 privptr = cgdev->dev.driver_data;
2854 if (!privptr)
2855 return -ENODEV;
2857 sprintf(buffer, "%d", privptr->buffer_size);
2858 DBF_TEXT(setup, 3, buffer);
2860 type = get_channel_type(&cgdev->cdev[0]->id);
2862 snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
2863 snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
2865 if (add_channel(cgdev->cdev[0], type))
2866 return -ENOMEM;
2867 if (add_channel(cgdev->cdev[1], type))
2868 return -ENOMEM;
2870 ret = ccw_device_set_online(cgdev->cdev[0]);
2871 if (ret != 0) {
2872 printk(KERN_WARNING
2873 "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret);
2876 ret = ccw_device_set_online(cgdev->cdev[1]);
2877 if (ret != 0) {
2878 printk(KERN_WARNING
2879 "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret);
2882 dev = ctc_init_netdevice(NULL, 1, privptr);
2884 if (!dev) {
2885 ctc_pr_warn("ctc_init_netdevice failed\n");
2886 goto out;
2889 strlcpy(dev->name, "ctc%d", IFNAMSIZ);
2891 for (direction = READ; direction <= WRITE; direction++) {
2892 privptr->channel[direction] =
2893 channel_get(type, direction == READ ? read_id : write_id,
2894 direction);
2895 if (privptr->channel[direction] == NULL) {
2896 if (direction == WRITE)
2897 channel_free(privptr->channel[READ]);
2899 ctc_free_netdevice(dev, 1);
2900 goto out;
2902 privptr->channel[direction]->netdev = dev;
2903 privptr->channel[direction]->protocol = privptr->protocol;
2904 privptr->channel[direction]->max_bufsize = privptr->buffer_size;
2906 /* sysfs magic */
2907 SET_NETDEV_DEV(dev, &cgdev->dev);
2909 if (ctc_netdev_register(dev) != 0) {
2910 ctc_free_netdevice(dev, 1);
2911 goto out;
2914 if (ctc_add_attributes(&cgdev->dev)) {
2915 ctc_netdev_unregister(dev);
2916 dev->priv = NULL;
2917 ctc_free_netdevice(dev, 1);
2918 goto out;
2921 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
2923 print_banner();
2925 ctc_pr_info("%s: read: %s, write: %s, proto: %d\n",
2926 dev->name, privptr->channel[READ]->id,
2927 privptr->channel[WRITE]->id, privptr->protocol);
2929 return 0;
2930 out:
2931 ccw_device_set_offline(cgdev->cdev[1]);
2932 ccw_device_set_offline(cgdev->cdev[0]);
2934 return -ENODEV;
2938 * Shutdown an interface.
2940 * @param cgdev Device to be shut down.
2942 * @returns 0 on success, !0 on failure.
2944 static int
2945 ctc_shutdown_device(struct ccwgroup_device *cgdev)
2947 struct ctc_priv *priv;
2948 struct net_device *ndev;
2950 DBF_TEXT(setup, 3, __FUNCTION__);
2951 pr_debug("%s() called\n", __FUNCTION__);
2954 priv = cgdev->dev.driver_data;
2955 ndev = NULL;
2956 if (!priv)
2957 return -ENODEV;
2959 if (priv->channel[READ]) {
2960 ndev = priv->channel[READ]->netdev;
2962 /* Close the device */
2963 ctc_close(ndev);
2964 ndev->flags &=~IFF_RUNNING;
2966 ctc_remove_attributes(&cgdev->dev);
2968 channel_free(priv->channel[READ]);
2970 if (priv->channel[WRITE])
2971 channel_free(priv->channel[WRITE]);
2973 if (ndev) {
2974 ctc_netdev_unregister(ndev);
2975 ndev->priv = NULL;
2976 ctc_free_netdevice(ndev, 1);
2979 if (priv->fsm)
2980 kfree_fsm(priv->fsm);
2982 ccw_device_set_offline(cgdev->cdev[1]);
2983 ccw_device_set_offline(cgdev->cdev[0]);
2985 if (priv->channel[READ])
2986 channel_remove(priv->channel[READ]);
2987 if (priv->channel[WRITE])
2988 channel_remove(priv->channel[WRITE]);
2989 priv->channel[READ] = priv->channel[WRITE] = NULL;
2991 return 0;
2995 static void
2996 ctc_remove_device(struct ccwgroup_device *cgdev)
2998 struct ctc_priv *priv;
3000 pr_debug("%s() called\n", __FUNCTION__);
3001 DBF_TEXT(setup, 3, __FUNCTION__);
3003 priv = cgdev->dev.driver_data;
3004 if (!priv)
3005 return;
3006 if (cgdev->state == CCWGROUP_ONLINE)
3007 ctc_shutdown_device(cgdev);
3008 ctc_remove_files(&cgdev->dev);
3009 cgdev->dev.driver_data = NULL;
3010 kfree(priv);
3011 put_device(&cgdev->dev);
3014 static struct ccwgroup_driver ctc_group_driver = {
3015 .owner = THIS_MODULE,
3016 .name = "ctc",
3017 .max_slaves = 2,
3018 .driver_id = 0xC3E3C3,
3019 .probe = ctc_probe_device,
3020 .remove = ctc_remove_device,
3021 .set_online = ctc_new_device,
3022 .set_offline = ctc_shutdown_device,
3026 * Module related routines
3027 *****************************************************************************/
3030 * Prepare to be unloaded. Free IRQ's and release all resources.
3031 * This is called just before this module is unloaded. It is
3032 * <em>not</em> called, if the usage count is !0, so we don't need to check
3033 * for that.
3035 static void __exit
3036 ctc_exit(void)
3038 DBF_TEXT(setup, 3, __FUNCTION__);
3039 unregister_cu3088_discipline(&ctc_group_driver);
3040 ctc_unregister_dbf_views();
3041 ctc_pr_info("CTC driver unloaded\n");
3045 * Initialize module.
3046 * This is called just after the module is loaded.
3048 * @return 0 on success, !0 on error.
3050 static int __init
3051 ctc_init(void)
3053 int ret = 0;
3055 loglevel = CTC_LOGLEVEL_DEFAULT;
3057 DBF_TEXT(setup, 3, __FUNCTION__);
3059 print_banner();
3061 ret = ctc_register_dbf_views();
3062 if (ret){
3063 ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
3064 return ret;
3066 ret = register_cu3088_discipline(&ctc_group_driver);
3067 if (ret) {
3068 ctc_unregister_dbf_views();
3070 return ret;
3073 module_init(ctc_init);
3074 module_exit(ctc_exit);
3076 /* --- This is the END my friend --- */