[PATCH] Fix build failure in recent pm_prepare_* changes.
[linux-2.6.git] / drivers / s390 / net / ctcmain.c
blobaf9f212314b3a3c00cf5e298daba5f95d9b1acce
1 /*
2 * CTC / ESCON network driver
4 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6 * Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
7 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 Peter Tiedemann (ptiedem@de.ibm.com)
9 * Driver Model stuff by : Cornelia Huck <huckc@de.ibm.com>
11 * Documentation used:
12 * - Principles of Operation (IBM doc#: SA22-7201-06)
13 * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
14 * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
15 * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
16 * - ESCON I/O Interface (IBM doc#: SA22-7202-029
18 * and the source of the original CTC driver by:
19 * Dieter Wellerdiek (wel@de.ibm.com)
20 * Martin Schwidefsky (schwidefsky@de.ibm.com)
21 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
22 * Jochen Röhrig (roehrig@de.ibm.com)
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
27 * any later version.
29 * This program is distributed in the hope that it will be useful,
30 * but WITHOUT ANY WARRANTY; without even the implied warranty of
31 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32 * GNU General Public License for more details.
34 * You should have received a copy of the GNU General Public License
35 * along with this program; if not, write to the Free Software
36 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
39 #undef DEBUG
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/kernel.h>
43 #include <linux/slab.h>
44 #include <linux/errno.h>
45 #include <linux/types.h>
46 #include <linux/interrupt.h>
47 #include <linux/timer.h>
48 #include <linux/sched.h>
49 #include <linux/bitops.h>
51 #include <linux/signal.h>
52 #include <linux/string.h>
54 #include <linux/ip.h>
55 #include <linux/if_arp.h>
56 #include <linux/tcp.h>
57 #include <linux/skbuff.h>
58 #include <linux/ctype.h>
59 #include <net/dst.h>
61 #include <asm/io.h>
62 #include <asm/ccwdev.h>
63 #include <asm/ccwgroup.h>
64 #include <asm/uaccess.h>
66 #include <asm/idals.h>
68 #include "ctctty.h"
69 #include "fsm.h"
70 #include "cu3088.h"
72 #include "ctcdbug.h"
73 #include "ctcmain.h"
75 MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
76 MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
77 MODULE_LICENSE("GPL");
78 /**
79 * States of the interface statemachine.
81 enum dev_states {
82 DEV_STATE_STOPPED,
83 DEV_STATE_STARTWAIT_RXTX,
84 DEV_STATE_STARTWAIT_RX,
85 DEV_STATE_STARTWAIT_TX,
86 DEV_STATE_STOPWAIT_RXTX,
87 DEV_STATE_STOPWAIT_RX,
88 DEV_STATE_STOPWAIT_TX,
89 DEV_STATE_RUNNING,
90 /**
91 * MUST be always the last element!!
93 CTC_NR_DEV_STATES
96 static const char *dev_state_names[] = {
97 "Stopped",
98 "StartWait RXTX",
99 "StartWait RX",
100 "StartWait TX",
101 "StopWait RXTX",
102 "StopWait RX",
103 "StopWait TX",
104 "Running",
108 * Events of the interface statemachine.
110 enum dev_events {
111 DEV_EVENT_START,
112 DEV_EVENT_STOP,
113 DEV_EVENT_RXUP,
114 DEV_EVENT_TXUP,
115 DEV_EVENT_RXDOWN,
116 DEV_EVENT_TXDOWN,
117 DEV_EVENT_RESTART,
119 * MUST be always the last element!!
121 CTC_NR_DEV_EVENTS
124 static const char *dev_event_names[] = {
125 "Start",
126 "Stop",
127 "RX up",
128 "TX up",
129 "RX down",
130 "TX down",
131 "Restart",
135 * Events of the channel statemachine
137 enum ch_events {
139 * Events, representing return code of
140 * I/O operations (ccw_device_start, ccw_device_halt et al.)
142 CH_EVENT_IO_SUCCESS,
143 CH_EVENT_IO_EBUSY,
144 CH_EVENT_IO_ENODEV,
145 CH_EVENT_IO_EIO,
146 CH_EVENT_IO_UNKNOWN,
148 CH_EVENT_ATTNBUSY,
149 CH_EVENT_ATTN,
150 CH_EVENT_BUSY,
153 * Events, representing unit-check
155 CH_EVENT_UC_RCRESET,
156 CH_EVENT_UC_RSRESET,
157 CH_EVENT_UC_TXTIMEOUT,
158 CH_EVENT_UC_TXPARITY,
159 CH_EVENT_UC_HWFAIL,
160 CH_EVENT_UC_RXPARITY,
161 CH_EVENT_UC_ZERO,
162 CH_EVENT_UC_UNKNOWN,
165 * Events, representing subchannel-check
167 CH_EVENT_SC_UNKNOWN,
170 * Events, representing machine checks
172 CH_EVENT_MC_FAIL,
173 CH_EVENT_MC_GOOD,
176 * Event, representing normal IRQ
178 CH_EVENT_IRQ,
179 CH_EVENT_FINSTAT,
182 * Event, representing timer expiry.
184 CH_EVENT_TIMER,
187 * Events, representing commands from upper levels.
189 CH_EVENT_START,
190 CH_EVENT_STOP,
193 * MUST be always the last element!!
195 NR_CH_EVENTS,
199 * States of the channel statemachine.
201 enum ch_states {
203 * Channel not assigned to any device,
204 * initial state, direction invalid
206 CH_STATE_IDLE,
209 * Channel assigned but not operating
211 CH_STATE_STOPPED,
212 CH_STATE_STARTWAIT,
213 CH_STATE_STARTRETRY,
214 CH_STATE_SETUPWAIT,
215 CH_STATE_RXINIT,
216 CH_STATE_TXINIT,
217 CH_STATE_RX,
218 CH_STATE_TX,
219 CH_STATE_RXIDLE,
220 CH_STATE_TXIDLE,
221 CH_STATE_RXERR,
222 CH_STATE_TXERR,
223 CH_STATE_TERM,
224 CH_STATE_DTERM,
225 CH_STATE_NOTOP,
228 * MUST be always the last element!!
230 NR_CH_STATES,
233 static int loglevel = CTC_LOGLEVEL_DEFAULT;
236 * Linked list of all detected channels.
238 static struct channel *channels = NULL;
241 * Print Banner.
243 static void
244 print_banner(void)
246 static int printed = 0;
248 if (printed)
249 return;
251 printk(KERN_INFO "CTC driver initialized\n");
252 printed = 1;
256 * Return type of a detected device.
258 static enum channel_types
259 get_channel_type(struct ccw_device_id *id)
261 enum channel_types type = (enum channel_types) id->driver_info;
263 if (type == channel_type_ficon)
264 type = channel_type_escon;
266 return type;
269 static const char *ch_event_names[] = {
270 "ccw_device success",
271 "ccw_device busy",
272 "ccw_device enodev",
273 "ccw_device ioerr",
274 "ccw_device unknown",
276 "Status ATTN & BUSY",
277 "Status ATTN",
278 "Status BUSY",
280 "Unit check remote reset",
281 "Unit check remote system reset",
282 "Unit check TX timeout",
283 "Unit check TX parity",
284 "Unit check Hardware failure",
285 "Unit check RX parity",
286 "Unit check ZERO",
287 "Unit check Unknown",
289 "SubChannel check Unknown",
291 "Machine check failure",
292 "Machine check operational",
294 "IRQ normal",
295 "IRQ final",
297 "Timer",
299 "Start",
300 "Stop",
303 static const char *ch_state_names[] = {
304 "Idle",
305 "Stopped",
306 "StartWait",
307 "StartRetry",
308 "SetupWait",
309 "RX init",
310 "TX init",
311 "RX",
312 "TX",
313 "RX idle",
314 "TX idle",
315 "RX error",
316 "TX error",
317 "Terminating",
318 "Restarting",
319 "Not operational",
322 #ifdef DEBUG
324 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
326 * @param skb The sk_buff to dump.
327 * @param offset Offset relative to skb-data, where to start the dump.
329 static void
330 ctc_dump_skb(struct sk_buff *skb, int offset)
332 unsigned char *p = skb->data;
333 __u16 bl;
334 struct ll_header *header;
335 int i;
337 if (!(loglevel & CTC_LOGLEVEL_DEBUG))
338 return;
339 p += offset;
340 bl = *((__u16 *) p);
341 p += 2;
342 header = (struct ll_header *) p;
343 p -= 2;
345 printk(KERN_DEBUG "dump:\n");
346 printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl);
348 printk(KERN_DEBUG "h->length=%d %04x\n", header->length,
349 header->length);
350 printk(KERN_DEBUG "h->type=%04x\n", header->type);
351 printk(KERN_DEBUG "h->unused=%04x\n", header->unused);
352 if (bl > 16)
353 bl = 16;
354 printk(KERN_DEBUG "data: ");
355 for (i = 0; i < bl; i++)
356 printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
357 printk("\n");
359 #else
360 static inline void
361 ctc_dump_skb(struct sk_buff *skb, int offset)
364 #endif
367 * Unpack a just received skb and hand it over to
368 * upper layers.
370 * @param ch The channel where this skb has been received.
371 * @param pskb The received skb.
373 static __inline__ void
374 ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
376 struct net_device *dev = ch->netdev;
377 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
378 __u16 len = *((__u16 *) pskb->data);
380 DBF_TEXT(trace, 4, __FUNCTION__);
381 skb_put(pskb, 2 + LL_HEADER_LENGTH);
382 skb_pull(pskb, 2);
383 pskb->dev = dev;
384 pskb->ip_summed = CHECKSUM_UNNECESSARY;
385 while (len > 0) {
386 struct sk_buff *skb;
387 struct ll_header *header = (struct ll_header *) pskb->data;
389 skb_pull(pskb, LL_HEADER_LENGTH);
390 if ((ch->protocol == CTC_PROTO_S390) &&
391 (header->type != ETH_P_IP)) {
393 #ifndef DEBUG
394 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
395 #endif
397 * Check packet type only if we stick strictly
398 * to S/390's protocol of OS390. This only
399 * supports IP. Otherwise allow any packet
400 * type.
402 ctc_pr_warn(
403 "%s Illegal packet type 0x%04x received, dropping\n",
404 dev->name, header->type);
405 ch->logflags |= LOG_FLAG_ILLEGALPKT;
406 #ifndef DEBUG
408 #endif
409 #ifdef DEBUG
410 ctc_dump_skb(pskb, -6);
411 #endif
412 privptr->stats.rx_dropped++;
413 privptr->stats.rx_frame_errors++;
414 return;
416 pskb->protocol = ntohs(header->type);
417 if (header->length <= LL_HEADER_LENGTH) {
418 #ifndef DEBUG
419 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
420 #endif
421 ctc_pr_warn(
422 "%s Illegal packet size %d "
423 "received (MTU=%d blocklen=%d), "
424 "dropping\n", dev->name, header->length,
425 dev->mtu, len);
426 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
427 #ifndef DEBUG
429 #endif
430 #ifdef DEBUG
431 ctc_dump_skb(pskb, -6);
432 #endif
433 privptr->stats.rx_dropped++;
434 privptr->stats.rx_length_errors++;
435 return;
437 header->length -= LL_HEADER_LENGTH;
438 len -= LL_HEADER_LENGTH;
439 if ((header->length > skb_tailroom(pskb)) ||
440 (header->length > len)) {
441 #ifndef DEBUG
442 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
443 #endif
444 ctc_pr_warn(
445 "%s Illegal packet size %d "
446 "(beyond the end of received data), "
447 "dropping\n", dev->name, header->length);
448 ch->logflags |= LOG_FLAG_OVERRUN;
449 #ifndef DEBUG
451 #endif
452 #ifdef DEBUG
453 ctc_dump_skb(pskb, -6);
454 #endif
455 privptr->stats.rx_dropped++;
456 privptr->stats.rx_length_errors++;
457 return;
459 skb_put(pskb, header->length);
460 pskb->mac.raw = pskb->data;
461 len -= header->length;
462 skb = dev_alloc_skb(pskb->len);
463 if (!skb) {
464 #ifndef DEBUG
465 if (!(ch->logflags & LOG_FLAG_NOMEM)) {
466 #endif
467 ctc_pr_warn(
468 "%s Out of memory in ctc_unpack_skb\n",
469 dev->name);
470 ch->logflags |= LOG_FLAG_NOMEM;
471 #ifndef DEBUG
473 #endif
474 privptr->stats.rx_dropped++;
475 return;
477 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
478 skb->mac.raw = skb->data;
479 skb->dev = pskb->dev;
480 skb->protocol = pskb->protocol;
481 pskb->ip_summed = CHECKSUM_UNNECESSARY;
482 if (ch->protocol == CTC_PROTO_LINUX_TTY)
483 ctc_tty_netif_rx(skb);
484 else
485 netif_rx_ni(skb);
487 * Successful rx; reset logflags
489 ch->logflags = 0;
490 dev->last_rx = jiffies;
491 privptr->stats.rx_packets++;
492 privptr->stats.rx_bytes += skb->len;
493 if (len > 0) {
494 skb_pull(pskb, header->length);
495 if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
496 #ifndef DEBUG
497 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
498 #endif
499 ctc_pr_warn(
500 "%s Overrun in ctc_unpack_skb\n",
501 dev->name);
502 ch->logflags |= LOG_FLAG_OVERRUN;
503 #ifndef DEBUG
505 #endif
506 return;
508 skb_put(pskb, LL_HEADER_LENGTH);
514 * Check return code of a preceeding ccw_device call, halt_IO etc...
516 * @param ch The channel, the error belongs to.
517 * @param return_code The error code to inspect.
519 static void inline
520 ccw_check_return_code(struct channel *ch, int return_code, char *msg)
522 DBF_TEXT(trace, 5, __FUNCTION__);
523 switch (return_code) {
524 case 0:
525 fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
526 break;
527 case -EBUSY:
528 ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg);
529 fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
530 break;
531 case -ENODEV:
532 ctc_pr_emerg("%s (%s): Invalid device called for IO\n",
533 ch->id, msg);
534 fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
535 break;
536 case -EIO:
537 ctc_pr_emerg("%s (%s): Status pending... \n",
538 ch->id, msg);
539 fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
540 break;
541 default:
542 ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
543 ch->id, msg, return_code);
544 fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
549 * Check sense of a unit check.
551 * @param ch The channel, the sense code belongs to.
552 * @param sense The sense code to inspect.
554 static void inline
555 ccw_unit_check(struct channel *ch, unsigned char sense)
557 DBF_TEXT(trace, 5, __FUNCTION__);
558 if (sense & SNS0_INTERVENTION_REQ) {
559 if (sense & 0x01) {
560 if (ch->protocol != CTC_PROTO_LINUX_TTY)
561 ctc_pr_debug("%s: Interface disc. or Sel. reset "
562 "(remote)\n", ch->id);
563 fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
564 } else {
565 ctc_pr_debug("%s: System reset (remote)\n", ch->id);
566 fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
568 } else if (sense & SNS0_EQUIPMENT_CHECK) {
569 if (sense & SNS0_BUS_OUT_CHECK) {
570 ctc_pr_warn("%s: Hardware malfunction (remote)\n",
571 ch->id);
572 fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
573 } else {
574 ctc_pr_warn("%s: Read-data parity error (remote)\n",
575 ch->id);
576 fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
578 } else if (sense & SNS0_BUS_OUT_CHECK) {
579 if (sense & 0x04) {
580 ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id);
581 fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
582 } else {
583 ctc_pr_warn("%s: Data-transfer parity error\n", ch->id);
584 fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
586 } else if (sense & SNS0_CMD_REJECT) {
587 ctc_pr_warn("%s: Command reject\n", ch->id);
588 } else if (sense == 0) {
589 ctc_pr_debug("%s: Unit check ZERO\n", ch->id);
590 fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
591 } else {
592 ctc_pr_warn("%s: Unit Check with sense code: %02x\n",
593 ch->id, sense);
594 fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
598 static void
599 ctc_purge_skb_queue(struct sk_buff_head *q)
601 struct sk_buff *skb;
603 DBF_TEXT(trace, 5, __FUNCTION__);
605 while ((skb = skb_dequeue(q))) {
606 atomic_dec(&skb->users);
607 dev_kfree_skb_irq(skb);
611 static __inline__ int
612 ctc_checkalloc_buffer(struct channel *ch, int warn)
614 DBF_TEXT(trace, 5, __FUNCTION__);
615 if ((ch->trans_skb == NULL) ||
616 (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
617 if (ch->trans_skb != NULL)
618 dev_kfree_skb(ch->trans_skb);
619 clear_normalized_cda(&ch->ccw[1]);
620 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
621 GFP_ATOMIC | GFP_DMA);
622 if (ch->trans_skb == NULL) {
623 if (warn)
624 ctc_pr_warn(
625 "%s: Couldn't alloc %s trans_skb\n",
626 ch->id,
627 (CHANNEL_DIRECTION(ch->flags) == READ) ?
628 "RX" : "TX");
629 return -ENOMEM;
631 ch->ccw[1].count = ch->max_bufsize;
632 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
633 dev_kfree_skb(ch->trans_skb);
634 ch->trans_skb = NULL;
635 if (warn)
636 ctc_pr_warn(
637 "%s: set_normalized_cda for %s "
638 "trans_skb failed, dropping packets\n",
639 ch->id,
640 (CHANNEL_DIRECTION(ch->flags) == READ) ?
641 "RX" : "TX");
642 return -ENOMEM;
644 ch->ccw[1].count = 0;
645 ch->trans_skb_data = ch->trans_skb->data;
646 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
648 return 0;
652 * Dummy NOP action for statemachines
654 static void
655 fsm_action_nop(fsm_instance * fi, int event, void *arg)
660 * Actions for channel - statemachines.
661 *****************************************************************************/
664 * Normal data has been send. Free the corresponding
665 * skb (it's in io_queue), reset dev->tbusy and
666 * revert to idle state.
668 * @param fi An instance of a channel statemachine.
669 * @param event The event, just happened.
670 * @param arg Generic pointer, casted from channel * upon call.
672 static void
673 ch_action_txdone(fsm_instance * fi, int event, void *arg)
675 struct channel *ch = (struct channel *) arg;
676 struct net_device *dev = ch->netdev;
677 struct ctc_priv *privptr = dev->priv;
678 struct sk_buff *skb;
679 int first = 1;
680 int i;
681 unsigned long duration;
682 struct timespec done_stamp = xtime;
684 DBF_TEXT(trace, 4, __FUNCTION__);
686 duration =
687 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
688 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
689 if (duration > ch->prof.tx_time)
690 ch->prof.tx_time = duration;
692 if (ch->irb->scsw.count != 0)
693 ctc_pr_debug("%s: TX not complete, remaining %d bytes\n",
694 dev->name, ch->irb->scsw.count);
695 fsm_deltimer(&ch->timer);
696 while ((skb = skb_dequeue(&ch->io_queue))) {
697 privptr->stats.tx_packets++;
698 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
699 if (first) {
700 privptr->stats.tx_bytes += 2;
701 first = 0;
703 atomic_dec(&skb->users);
704 dev_kfree_skb_irq(skb);
706 spin_lock(&ch->collect_lock);
707 clear_normalized_cda(&ch->ccw[4]);
708 if (ch->collect_len > 0) {
709 int rc;
711 if (ctc_checkalloc_buffer(ch, 1)) {
712 spin_unlock(&ch->collect_lock);
713 return;
715 ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data;
716 ch->trans_skb->len = 0;
717 if (ch->prof.maxmulti < (ch->collect_len + 2))
718 ch->prof.maxmulti = ch->collect_len + 2;
719 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
720 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
721 *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
722 i = 0;
723 while ((skb = skb_dequeue(&ch->collect_queue))) {
724 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
725 skb->len);
726 privptr->stats.tx_packets++;
727 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
728 atomic_dec(&skb->users);
729 dev_kfree_skb_irq(skb);
730 i++;
732 ch->collect_len = 0;
733 spin_unlock(&ch->collect_lock);
734 ch->ccw[1].count = ch->trans_skb->len;
735 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
736 ch->prof.send_stamp = xtime;
737 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
738 (unsigned long) ch, 0xff, 0);
739 ch->prof.doios_multi++;
740 if (rc != 0) {
741 privptr->stats.tx_dropped += i;
742 privptr->stats.tx_errors += i;
743 fsm_deltimer(&ch->timer);
744 ccw_check_return_code(ch, rc, "chained TX");
746 } else {
747 spin_unlock(&ch->collect_lock);
748 fsm_newstate(fi, CH_STATE_TXIDLE);
750 ctc_clear_busy(dev);
754 * Initial data is sent.
755 * Notify device statemachine that we are up and
756 * running.
758 * @param fi An instance of a channel statemachine.
759 * @param event The event, just happened.
760 * @param arg Generic pointer, casted from channel * upon call.
762 static void
763 ch_action_txidle(fsm_instance * fi, int event, void *arg)
765 struct channel *ch = (struct channel *) arg;
767 DBF_TEXT(trace, 4, __FUNCTION__);
768 fsm_deltimer(&ch->timer);
769 fsm_newstate(fi, CH_STATE_TXIDLE);
770 fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
771 ch->netdev);
775 * Got normal data, check for sanity, queue it up, allocate new buffer
776 * trigger bottom half, and initiate next read.
778 * @param fi An instance of a channel statemachine.
779 * @param event The event, just happened.
780 * @param arg Generic pointer, casted from channel * upon call.
782 static void
783 ch_action_rx(fsm_instance * fi, int event, void *arg)
785 struct channel *ch = (struct channel *) arg;
786 struct net_device *dev = ch->netdev;
787 struct ctc_priv *privptr = dev->priv;
788 int len = ch->max_bufsize - ch->irb->scsw.count;
789 struct sk_buff *skb = ch->trans_skb;
790 __u16 block_len = *((__u16 *) skb->data);
791 int check_len;
792 int rc;
794 DBF_TEXT(trace, 4, __FUNCTION__);
795 fsm_deltimer(&ch->timer);
796 if (len < 8) {
797 ctc_pr_debug("%s: got packet with length %d < 8\n",
798 dev->name, len);
799 privptr->stats.rx_dropped++;
800 privptr->stats.rx_length_errors++;
801 goto again;
803 if (len > ch->max_bufsize) {
804 ctc_pr_debug("%s: got packet with length %d > %d\n",
805 dev->name, len, ch->max_bufsize);
806 privptr->stats.rx_dropped++;
807 privptr->stats.rx_length_errors++;
808 goto again;
812 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
814 switch (ch->protocol) {
815 case CTC_PROTO_S390:
816 case CTC_PROTO_OS390:
817 check_len = block_len + 2;
818 break;
819 default:
820 check_len = block_len;
821 break;
823 if ((len < block_len) || (len > check_len)) {
824 ctc_pr_debug("%s: got block length %d != rx length %d\n",
825 dev->name, block_len, len);
826 #ifdef DEBUG
827 ctc_dump_skb(skb, 0);
828 #endif
829 *((__u16 *) skb->data) = len;
830 privptr->stats.rx_dropped++;
831 privptr->stats.rx_length_errors++;
832 goto again;
834 block_len -= 2;
835 if (block_len > 0) {
836 *((__u16 *) skb->data) = block_len;
837 ctc_unpack_skb(ch, skb);
839 again:
840 skb->data = skb->tail = ch->trans_skb_data;
841 skb->len = 0;
842 if (ctc_checkalloc_buffer(ch, 1))
843 return;
844 ch->ccw[1].count = ch->max_bufsize;
845 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
846 if (rc != 0)
847 ccw_check_return_code(ch, rc, "normal RX");
850 static void ch_action_rxidle(fsm_instance * fi, int event, void *arg);
853 * Initialize connection by sending a __u16 of value 0.
855 * @param fi An instance of a channel statemachine.
856 * @param event The event, just happened.
857 * @param arg Generic pointer, casted from channel * upon call.
859 static void
860 ch_action_firstio(fsm_instance * fi, int event, void *arg)
862 struct channel *ch = (struct channel *) arg;
863 int rc;
865 DBF_TEXT(trace, 4, __FUNCTION__);
867 if (fsm_getstate(fi) == CH_STATE_TXIDLE)
868 ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
869 fsm_deltimer(&ch->timer);
870 if (ctc_checkalloc_buffer(ch, 1))
871 return;
872 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
873 (ch->protocol == CTC_PROTO_OS390)) {
874 /* OS/390 resp. z/OS */
875 if (CHANNEL_DIRECTION(ch->flags) == READ) {
876 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
877 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
878 CH_EVENT_TIMER, ch);
879 ch_action_rxidle(fi, event, arg);
880 } else {
881 struct net_device *dev = ch->netdev;
882 fsm_newstate(fi, CH_STATE_TXIDLE);
883 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
884 DEV_EVENT_TXUP, dev);
886 return;
890 * Don´t setup a timer for receiving the initial RX frame
891 * if in compatibility mode, since VM TCP delays the initial
892 * frame until it has some data to send.
894 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
895 (ch->protocol != CTC_PROTO_S390))
896 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
898 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
899 ch->ccw[1].count = 2; /* Transfer only length */
901 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
902 ? CH_STATE_RXINIT : CH_STATE_TXINIT);
903 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
904 if (rc != 0) {
905 fsm_deltimer(&ch->timer);
906 fsm_newstate(fi, CH_STATE_SETUPWAIT);
907 ccw_check_return_code(ch, rc, "init IO");
910 * If in compatibility mode since we don´t setup a timer, we
911 * also signal RX channel up immediately. This enables us
912 * to send packets early which in turn usually triggers some
913 * reply from VM TCP which brings up the RX channel to it´s
914 * final state.
916 if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
917 (ch->protocol == CTC_PROTO_S390)) {
918 struct net_device *dev = ch->netdev;
919 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP,
920 dev);
925 * Got initial data, check it. If OK,
926 * notify device statemachine that we are up and
927 * running.
929 * @param fi An instance of a channel statemachine.
930 * @param event The event, just happened.
931 * @param arg Generic pointer, casted from channel * upon call.
933 static void
934 ch_action_rxidle(fsm_instance * fi, int event, void *arg)
936 struct channel *ch = (struct channel *) arg;
937 struct net_device *dev = ch->netdev;
938 __u16 buflen;
939 int rc;
941 DBF_TEXT(trace, 4, __FUNCTION__);
942 fsm_deltimer(&ch->timer);
943 buflen = *((__u16 *) ch->trans_skb->data);
944 #ifdef DEBUG
945 ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
946 #endif
947 if (buflen >= CTC_INITIAL_BLOCKLEN) {
948 if (ctc_checkalloc_buffer(ch, 1))
949 return;
950 ch->ccw[1].count = ch->max_bufsize;
951 fsm_newstate(fi, CH_STATE_RXIDLE);
952 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
953 (unsigned long) ch, 0xff, 0);
954 if (rc != 0) {
955 fsm_newstate(fi, CH_STATE_RXINIT);
956 ccw_check_return_code(ch, rc, "initial RX");
957 } else
958 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
959 DEV_EVENT_RXUP, dev);
960 } else {
961 ctc_pr_debug("%s: Initial RX count %d not %d\n",
962 dev->name, buflen, CTC_INITIAL_BLOCKLEN);
963 ch_action_firstio(fi, event, arg);
968 * Set channel into extended mode.
970 * @param fi An instance of a channel statemachine.
971 * @param event The event, just happened.
972 * @param arg Generic pointer, casted from channel * upon call.
974 static void
975 ch_action_setmode(fsm_instance * fi, int event, void *arg)
977 struct channel *ch = (struct channel *) arg;
978 int rc;
979 unsigned long saveflags;
981 DBF_TEXT(trace, 4, __FUNCTION__);
982 fsm_deltimer(&ch->timer);
983 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
984 fsm_newstate(fi, CH_STATE_SETUPWAIT);
985 saveflags = 0; /* avoids compiler warning with
986 spin_unlock_irqrestore */
987 if (event == CH_EVENT_TIMER) // only for timer not yet locked
988 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
989 rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0);
990 if (event == CH_EVENT_TIMER)
991 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
992 if (rc != 0) {
993 fsm_deltimer(&ch->timer);
994 fsm_newstate(fi, CH_STATE_STARTWAIT);
995 ccw_check_return_code(ch, rc, "set Mode");
996 } else
997 ch->retry = 0;
1001 * Setup channel.
1003 * @param fi An instance of a channel statemachine.
1004 * @param event The event, just happened.
1005 * @param arg Generic pointer, casted from channel * upon call.
1007 static void
1008 ch_action_start(fsm_instance * fi, int event, void *arg)
1010 struct channel *ch = (struct channel *) arg;
1011 unsigned long saveflags;
1012 int rc;
1013 struct net_device *dev;
1015 DBF_TEXT(trace, 4, __FUNCTION__);
1016 if (ch == NULL) {
1017 ctc_pr_warn("ch_action_start ch=NULL\n");
1018 return;
1020 if (ch->netdev == NULL) {
1021 ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id);
1022 return;
1024 dev = ch->netdev;
1026 #ifdef DEBUG
1027 ctc_pr_debug("%s: %s channel start\n", dev->name,
1028 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1029 #endif
1031 if (ch->trans_skb != NULL) {
1032 clear_normalized_cda(&ch->ccw[1]);
1033 dev_kfree_skb(ch->trans_skb);
1034 ch->trans_skb = NULL;
1036 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1037 ch->ccw[1].cmd_code = CCW_CMD_READ;
1038 ch->ccw[1].flags = CCW_FLAG_SLI;
1039 ch->ccw[1].count = 0;
1040 } else {
1041 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
1042 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1043 ch->ccw[1].count = 0;
1045 if (ctc_checkalloc_buffer(ch, 0)) {
1046 ctc_pr_notice(
1047 "%s: Could not allocate %s trans_skb, delaying "
1048 "allocation until first transfer\n",
1049 dev->name,
1050 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1053 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
1054 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1055 ch->ccw[0].count = 0;
1056 ch->ccw[0].cda = 0;
1057 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
1058 ch->ccw[2].flags = CCW_FLAG_SLI;
1059 ch->ccw[2].count = 0;
1060 ch->ccw[2].cda = 0;
1061 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3);
1062 ch->ccw[4].cda = 0;
1063 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
1065 fsm_newstate(fi, CH_STATE_STARTWAIT);
1066 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1067 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1068 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1069 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1070 if (rc != 0) {
1071 if (rc != -EBUSY)
1072 fsm_deltimer(&ch->timer);
1073 ccw_check_return_code(ch, rc, "initial HaltIO");
1075 #ifdef DEBUG
1076 ctc_pr_debug("ctc: %s(): leaving\n", __func__);
1077 #endif
1081 * Shutdown a channel.
1083 * @param fi An instance of a channel statemachine.
1084 * @param event The event, just happened.
1085 * @param arg Generic pointer, casted from channel * upon call.
1087 static void
1088 ch_action_haltio(fsm_instance * fi, int event, void *arg)
1090 struct channel *ch = (struct channel *) arg;
1091 unsigned long saveflags;
1092 int rc;
1093 int oldstate;
1095 DBF_TEXT(trace, 3, __FUNCTION__);
1096 fsm_deltimer(&ch->timer);
1097 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1098 saveflags = 0; /* avoids comp warning with
1099 spin_unlock_irqrestore */
1100 if (event == CH_EVENT_STOP) // only for STOP not yet locked
1101 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1102 oldstate = fsm_getstate(fi);
1103 fsm_newstate(fi, CH_STATE_TERM);
1104 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1105 if (event == CH_EVENT_STOP)
1106 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1107 if (rc != 0) {
1108 if (rc != -EBUSY) {
1109 fsm_deltimer(&ch->timer);
1110 fsm_newstate(fi, oldstate);
1112 ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio");
1117 * A channel has successfully been halted.
1118 * Cleanup it's queue and notify interface statemachine.
1120 * @param fi An instance of a channel statemachine.
1121 * @param event The event, just happened.
1122 * @param arg Generic pointer, casted from channel * upon call.
1124 static void
1125 ch_action_stopped(fsm_instance * fi, int event, void *arg)
1127 struct channel *ch = (struct channel *) arg;
1128 struct net_device *dev = ch->netdev;
1130 DBF_TEXT(trace, 3, __FUNCTION__);
1131 fsm_deltimer(&ch->timer);
1132 fsm_newstate(fi, CH_STATE_STOPPED);
1133 if (ch->trans_skb != NULL) {
1134 clear_normalized_cda(&ch->ccw[1]);
1135 dev_kfree_skb(ch->trans_skb);
1136 ch->trans_skb = NULL;
1138 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1139 skb_queue_purge(&ch->io_queue);
1140 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1141 DEV_EVENT_RXDOWN, dev);
1142 } else {
1143 ctc_purge_skb_queue(&ch->io_queue);
1144 spin_lock(&ch->collect_lock);
1145 ctc_purge_skb_queue(&ch->collect_queue);
1146 ch->collect_len = 0;
1147 spin_unlock(&ch->collect_lock);
1148 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1149 DEV_EVENT_TXDOWN, dev);
1154 * A stop command from device statemachine arrived and we are in
1155 * not operational mode. Set state to stopped.
1157 * @param fi An instance of a channel statemachine.
1158 * @param event The event, just happened.
1159 * @param arg Generic pointer, casted from channel * upon call.
1161 static void
1162 ch_action_stop(fsm_instance * fi, int event, void *arg)
1164 fsm_newstate(fi, CH_STATE_STOPPED);
1168 * A machine check for no path, not operational status or gone device has
1169 * happened.
1170 * Cleanup queue and notify interface statemachine.
1172 * @param fi An instance of a channel statemachine.
1173 * @param event The event, just happened.
1174 * @param arg Generic pointer, casted from channel * upon call.
1176 static void
1177 ch_action_fail(fsm_instance * fi, int event, void *arg)
1179 struct channel *ch = (struct channel *) arg;
1180 struct net_device *dev = ch->netdev;
1182 DBF_TEXT(trace, 3, __FUNCTION__);
1183 fsm_deltimer(&ch->timer);
1184 fsm_newstate(fi, CH_STATE_NOTOP);
1185 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1186 skb_queue_purge(&ch->io_queue);
1187 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1188 DEV_EVENT_RXDOWN, dev);
1189 } else {
1190 ctc_purge_skb_queue(&ch->io_queue);
1191 spin_lock(&ch->collect_lock);
1192 ctc_purge_skb_queue(&ch->collect_queue);
1193 ch->collect_len = 0;
1194 spin_unlock(&ch->collect_lock);
1195 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1196 DEV_EVENT_TXDOWN, dev);
1201 * Handle error during setup of channel.
1203 * @param fi An instance of a channel statemachine.
1204 * @param event The event, just happened.
1205 * @param arg Generic pointer, casted from channel * upon call.
1207 static void
1208 ch_action_setuperr(fsm_instance * fi, int event, void *arg)
1210 struct channel *ch = (struct channel *) arg;
1211 struct net_device *dev = ch->netdev;
1213 DBF_TEXT(setup, 3, __FUNCTION__);
1215 * Special case: Got UC_RCRESET on setmode.
1216 * This means that remote side isn't setup. In this case
1217 * simply retry after some 10 secs...
1219 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1220 ((event == CH_EVENT_UC_RCRESET) ||
1221 (event == CH_EVENT_UC_RSRESET))) {
1222 fsm_newstate(fi, CH_STATE_STARTRETRY);
1223 fsm_deltimer(&ch->timer);
1224 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1225 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1226 int rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1227 if (rc != 0)
1228 ccw_check_return_code(
1229 ch, rc, "HaltIO in ch_action_setuperr");
1231 return;
1234 ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n",
1235 dev->name, ch_event_names[event],
1236 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
1237 fsm_getstate_str(fi));
1238 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1239 fsm_newstate(fi, CH_STATE_RXERR);
1240 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1241 DEV_EVENT_RXDOWN, dev);
1242 } else {
1243 fsm_newstate(fi, CH_STATE_TXERR);
1244 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1245 DEV_EVENT_TXDOWN, dev);
1250 * Restart a channel after an error.
1252 * @param fi An instance of a channel statemachine.
1253 * @param event The event, just happened.
1254 * @param arg Generic pointer, casted from channel * upon call.
1256 static void
1257 ch_action_restart(fsm_instance * fi, int event, void *arg)
1259 unsigned long saveflags;
1260 int oldstate;
1261 int rc;
1263 struct channel *ch = (struct channel *) arg;
1264 struct net_device *dev = ch->netdev;
1266 DBF_TEXT(trace, 3, __FUNCTION__);
1267 fsm_deltimer(&ch->timer);
1268 ctc_pr_debug("%s: %s channel restart\n", dev->name,
1269 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1270 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1271 oldstate = fsm_getstate(fi);
1272 fsm_newstate(fi, CH_STATE_STARTWAIT);
1273 saveflags = 0; /* avoids compiler warning with
1274 spin_unlock_irqrestore */
1275 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1276 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1277 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1278 if (event == CH_EVENT_TIMER)
1279 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1280 if (rc != 0) {
1281 if (rc != -EBUSY) {
1282 fsm_deltimer(&ch->timer);
1283 fsm_newstate(fi, oldstate);
1285 ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart");
1290 * Handle error during RX initial handshake (exchange of
1291 * 0-length block header)
1293 * @param fi An instance of a channel statemachine.
1294 * @param event The event, just happened.
1295 * @param arg Generic pointer, casted from channel * upon call.
1297 static void
1298 ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
1300 struct channel *ch = (struct channel *) arg;
1301 struct net_device *dev = ch->netdev;
1303 DBF_TEXT(setup, 3, __FUNCTION__);
1304 if (event == CH_EVENT_TIMER) {
1305 fsm_deltimer(&ch->timer);
1306 ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
1307 if (ch->retry++ < 3)
1308 ch_action_restart(fi, event, arg);
1309 else {
1310 fsm_newstate(fi, CH_STATE_RXERR);
1311 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1312 DEV_EVENT_RXDOWN, dev);
1314 } else
1315 ctc_pr_warn("%s: Error during RX init handshake\n", dev->name);
1319 * Notify device statemachine if we gave up initialization
1320 * of RX channel.
1322 * @param fi An instance of a channel statemachine.
1323 * @param event The event, just happened.
1324 * @param arg Generic pointer, casted from channel * upon call.
1326 static void
1327 ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
1329 struct channel *ch = (struct channel *) arg;
1330 struct net_device *dev = ch->netdev;
1332 DBF_TEXT(setup, 3, __FUNCTION__);
1333 fsm_newstate(fi, CH_STATE_RXERR);
1334 ctc_pr_warn("%s: RX initialization failed\n", dev->name);
1335 ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
1336 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1340 * Handle RX Unit check remote reset (remote disconnected)
1342 * @param fi An instance of a channel statemachine.
1343 * @param event The event, just happened.
1344 * @param arg Generic pointer, casted from channel * upon call.
1346 static void
1347 ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
1349 struct channel *ch = (struct channel *) arg;
1350 struct channel *ch2;
1351 struct net_device *dev = ch->netdev;
1353 DBF_TEXT(trace, 3, __FUNCTION__);
1354 fsm_deltimer(&ch->timer);
1355 ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
1356 dev->name);
1359 * Notify device statemachine
1361 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1362 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
1364 fsm_newstate(fi, CH_STATE_DTERM);
1365 ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE];
1366 fsm_newstate(ch2->fsm, CH_STATE_DTERM);
1368 ccw_device_halt(ch->cdev, (unsigned long) ch);
1369 ccw_device_halt(ch2->cdev, (unsigned long) ch2);
1373 * Handle error during TX channel initialization.
1375 * @param fi An instance of a channel statemachine.
1376 * @param event The event, just happened.
1377 * @param arg Generic pointer, casted from channel * upon call.
1379 static void
1380 ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
1382 struct channel *ch = (struct channel *) arg;
1383 struct net_device *dev = ch->netdev;
1385 DBF_TEXT(setup, 2, __FUNCTION__);
1386 if (event == CH_EVENT_TIMER) {
1387 fsm_deltimer(&ch->timer);
1388 ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
1389 if (ch->retry++ < 3)
1390 ch_action_restart(fi, event, arg);
1391 else {
1392 fsm_newstate(fi, CH_STATE_TXERR);
1393 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1394 DEV_EVENT_TXDOWN, dev);
1396 } else
1397 ctc_pr_warn("%s: Error during TX init handshake\n", dev->name);
1401 * Handle TX timeout by retrying operation.
1403 * @param fi An instance of a channel statemachine.
1404 * @param event The event, just happened.
1405 * @param arg Generic pointer, casted from channel * upon call.
1407 static void
1408 ch_action_txretry(fsm_instance * fi, int event, void *arg)
1410 struct channel *ch = (struct channel *) arg;
1411 struct net_device *dev = ch->netdev;
1412 unsigned long saveflags;
1414 DBF_TEXT(trace, 4, __FUNCTION__);
1415 fsm_deltimer(&ch->timer);
1416 if (ch->retry++ > 3) {
1417 ctc_pr_debug("%s: TX retry failed, restarting channel\n",
1418 dev->name);
1419 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1420 DEV_EVENT_TXDOWN, dev);
1421 ch_action_restart(fi, event, arg);
1422 } else {
1423 struct sk_buff *skb;
1425 ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
1426 if ((skb = skb_peek(&ch->io_queue))) {
1427 int rc = 0;
1429 clear_normalized_cda(&ch->ccw[4]);
1430 ch->ccw[4].count = skb->len;
1431 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1432 ctc_pr_debug(
1433 "%s: IDAL alloc failed, chan restart\n",
1434 dev->name);
1435 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1436 DEV_EVENT_TXDOWN, dev);
1437 ch_action_restart(fi, event, arg);
1438 return;
1440 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1441 saveflags = 0; /* avoids compiler warning with
1442 spin_unlock_irqrestore */
1443 if (event == CH_EVENT_TIMER) // only for TIMER not yet locked
1444 spin_lock_irqsave(get_ccwdev_lock(ch->cdev),
1445 saveflags);
1446 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1447 (unsigned long) ch, 0xff, 0);
1448 if (event == CH_EVENT_TIMER)
1449 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1450 saveflags);
1451 if (rc != 0) {
1452 fsm_deltimer(&ch->timer);
1453 ccw_check_return_code(ch, rc, "TX in ch_action_txretry");
1454 ctc_purge_skb_queue(&ch->io_queue);
1462 * Handle fatal errors during an I/O command.
1464 * @param fi An instance of a channel statemachine.
1465 * @param event The event, just happened.
1466 * @param arg Generic pointer, casted from channel * upon call.
1468 static void
1469 ch_action_iofatal(fsm_instance * fi, int event, void *arg)
1471 struct channel *ch = (struct channel *) arg;
1472 struct net_device *dev = ch->netdev;
1474 DBF_TEXT(trace, 3, __FUNCTION__);
1475 fsm_deltimer(&ch->timer);
1476 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1477 ctc_pr_debug("%s: RX I/O error\n", dev->name);
1478 fsm_newstate(fi, CH_STATE_RXERR);
1479 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1480 DEV_EVENT_RXDOWN, dev);
1481 } else {
1482 ctc_pr_debug("%s: TX I/O error\n", dev->name);
1483 fsm_newstate(fi, CH_STATE_TXERR);
1484 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1485 DEV_EVENT_TXDOWN, dev);
1489 static void
1490 ch_action_reinit(fsm_instance *fi, int event, void *arg)
1492 struct channel *ch = (struct channel *)arg;
1493 struct net_device *dev = ch->netdev;
1494 struct ctc_priv *privptr = dev->priv;
1496 DBF_TEXT(trace, 4, __FUNCTION__);
1497 ch_action_iofatal(fi, event, arg);
1498 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
1502 * The statemachine for a channel.
1504 static const fsm_node ch_fsm[] = {
1505 {CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop },
1506 {CH_STATE_STOPPED, CH_EVENT_START, ch_action_start },
1507 {CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop },
1508 {CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop },
1510 {CH_STATE_NOTOP, CH_EVENT_STOP, ch_action_stop },
1511 {CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop },
1512 {CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop },
1513 {CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop },
1514 {CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ch_action_start },
1516 {CH_STATE_STARTWAIT, CH_EVENT_STOP, ch_action_haltio },
1517 {CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop },
1518 {CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode },
1519 {CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr },
1520 {CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1521 {CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1522 {CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1524 {CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio },
1525 {CH_STATE_STARTRETRY, CH_EVENT_TIMER, ch_action_setmode },
1526 {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop },
1527 {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ch_action_fail },
1529 {CH_STATE_SETUPWAIT, CH_EVENT_STOP, ch_action_haltio },
1530 {CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop },
1531 {CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ch_action_firstio },
1532 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ch_action_setuperr },
1533 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr },
1534 {CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode },
1535 {CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1536 {CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1537 {CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1539 {CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio },
1540 {CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop },
1541 {CH_STATE_RXINIT, CH_EVENT_FINSTAT, ch_action_rxidle },
1542 {CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ch_action_rxiniterr },
1543 {CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ch_action_rxiniterr },
1544 {CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr },
1545 {CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail },
1546 {CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1547 {CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1548 {CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio },
1549 {CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1551 {CH_STATE_RXIDLE, CH_EVENT_STOP, ch_action_haltio },
1552 {CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop },
1553 {CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ch_action_rx },
1554 {CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc },
1555 // {CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry },
1556 {CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1557 {CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1558 {CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1559 {CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx },
1561 {CH_STATE_TXINIT, CH_EVENT_STOP, ch_action_haltio },
1562 {CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop },
1563 {CH_STATE_TXINIT, CH_EVENT_FINSTAT, ch_action_txidle },
1564 {CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ch_action_txiniterr },
1565 {CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr },
1566 {CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr },
1567 {CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1568 {CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1569 {CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1571 {CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio },
1572 {CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop },
1573 {CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ch_action_firstio },
1574 {CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop },
1575 {CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop },
1576 {CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1577 {CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1578 {CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1580 {CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop },
1581 {CH_STATE_TERM, CH_EVENT_START, ch_action_restart },
1582 {CH_STATE_TERM, CH_EVENT_FINSTAT, ch_action_stopped },
1583 {CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1584 {CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1585 {CH_STATE_TERM, CH_EVENT_MC_FAIL, ch_action_fail },
1587 {CH_STATE_DTERM, CH_EVENT_STOP, ch_action_haltio },
1588 {CH_STATE_DTERM, CH_EVENT_START, ch_action_restart },
1589 {CH_STATE_DTERM, CH_EVENT_FINSTAT, ch_action_setmode },
1590 {CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1591 {CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1592 {CH_STATE_DTERM, CH_EVENT_MC_FAIL, ch_action_fail },
1594 {CH_STATE_TX, CH_EVENT_STOP, ch_action_haltio },
1595 {CH_STATE_TX, CH_EVENT_START, fsm_action_nop },
1596 {CH_STATE_TX, CH_EVENT_FINSTAT, ch_action_txdone },
1597 {CH_STATE_TX, CH_EVENT_UC_RCRESET, ch_action_txretry },
1598 {CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry },
1599 {CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry },
1600 {CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1601 {CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit },
1602 {CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail },
1604 {CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio },
1605 {CH_STATE_TXERR, CH_EVENT_STOP, ch_action_haltio },
1606 {CH_STATE_TXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1607 {CH_STATE_RXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1610 static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
1613 * Functions related to setup and device detection.
1614 *****************************************************************************/
1616 static inline int
1617 less_than(char *id1, char *id2)
1619 int dev1, dev2, i;
1621 for (i = 0; i < 5; i++) {
1622 id1++;
1623 id2++;
1625 dev1 = simple_strtoul(id1, &id1, 16);
1626 dev2 = simple_strtoul(id2, &id2, 16);
1628 return (dev1 < dev2);
1632 * Add a new channel to the list of channels.
1633 * Keeps the channel list sorted.
1635 * @param cdev The ccw_device to be added.
1636 * @param type The type class of the new channel.
1638 * @return 0 on success, !0 on error.
1640 static int
1641 add_channel(struct ccw_device *cdev, enum channel_types type)
1643 struct channel **c = &channels;
1644 struct channel *ch;
1646 DBF_TEXT(trace, 2, __FUNCTION__);
1647 if ((ch =
1648 (struct channel *) kmalloc(sizeof (struct channel),
1649 GFP_KERNEL)) == NULL) {
1650 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1651 return -1;
1653 memset(ch, 0, sizeof (struct channel));
1654 if ((ch->ccw = (struct ccw1 *) kmalloc(8*sizeof(struct ccw1),
1655 GFP_KERNEL | GFP_DMA)) == NULL) {
1656 kfree(ch);
1657 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1658 return -1;
1661 memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset
1664 * "static" ccws are used in the following way:
1666 * ccw[0..2] (Channel program for generic I/O):
1667 * 0: prepare
1668 * 1: read or write (depending on direction) with fixed
1669 * buffer (idal allocated once when buffer is allocated)
1670 * 2: nop
1671 * ccw[3..5] (Channel program for direct write of packets)
1672 * 3: prepare
1673 * 4: write (idal allocated on every write).
1674 * 5: nop
1675 * ccw[6..7] (Channel program for initial channel setup):
1676 * 6: set extended mode
1677 * 7: nop
1679 * ch->ccw[0..5] are initialized in ch_action_start because
1680 * the channel's direction is yet unknown here.
1682 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1683 ch->ccw[6].flags = CCW_FLAG_SLI;
1685 ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1686 ch->ccw[7].flags = CCW_FLAG_SLI;
1688 ch->cdev = cdev;
1689 snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
1690 ch->type = type;
1691 ch->fsm = init_fsm(ch->id, ch_state_names,
1692 ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
1693 ch_fsm, CH_FSM_LEN, GFP_KERNEL);
1694 if (ch->fsm == NULL) {
1695 ctc_pr_warn("ctc: Could not create FSM in add_channel\n");
1696 kfree(ch->ccw);
1697 kfree(ch);
1698 return -1;
1700 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1701 if ((ch->irb = (struct irb *) kmalloc(sizeof (struct irb),
1702 GFP_KERNEL)) == NULL) {
1703 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1704 kfree_fsm(ch->fsm);
1705 kfree(ch->ccw);
1706 kfree(ch);
1707 return -1;
1709 memset(ch->irb, 0, sizeof (struct irb));
1710 while (*c && less_than((*c)->id, ch->id))
1711 c = &(*c)->next;
1712 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
1713 ctc_pr_debug(
1714 "ctc: add_channel: device %s already in list, "
1715 "using old entry\n", (*c)->id);
1716 kfree(ch->irb);
1717 kfree_fsm(ch->fsm);
1718 kfree(ch->ccw);
1719 kfree(ch);
1720 return 0;
1722 fsm_settimer(ch->fsm, &ch->timer);
1723 skb_queue_head_init(&ch->io_queue);
1724 skb_queue_head_init(&ch->collect_queue);
1725 ch->next = *c;
1726 *c = ch;
1727 return 0;
1731 * Release a specific channel in the channel list.
1733 * @param ch Pointer to channel struct to be released.
1735 static void
1736 channel_free(struct channel *ch)
1738 ch->flags &= ~CHANNEL_FLAGS_INUSE;
1739 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1743 * Remove a specific channel in the channel list.
1745 * @param ch Pointer to channel struct to be released.
1747 static void
1748 channel_remove(struct channel *ch)
1750 struct channel **c = &channels;
1752 DBF_TEXT(trace, 2, __FUNCTION__);
1753 if (ch == NULL)
1754 return;
1756 channel_free(ch);
1757 while (*c) {
1758 if (*c == ch) {
1759 *c = ch->next;
1760 fsm_deltimer(&ch->timer);
1761 kfree_fsm(ch->fsm);
1762 clear_normalized_cda(&ch->ccw[4]);
1763 if (ch->trans_skb != NULL) {
1764 clear_normalized_cda(&ch->ccw[1]);
1765 dev_kfree_skb(ch->trans_skb);
1767 kfree(ch->ccw);
1768 kfree(ch->irb);
1769 kfree(ch);
1770 return;
1772 c = &((*c)->next);
1777 * Get a specific channel from the channel list.
1779 * @param type Type of channel we are interested in.
1780 * @param id Id of channel we are interested in.
1781 * @param direction Direction we want to use this channel for.
1783 * @return Pointer to a channel or NULL if no matching channel available.
1785 static struct channel
1787 channel_get(enum channel_types type, char *id, int direction)
1789 struct channel *ch = channels;
1791 DBF_TEXT(trace, 3, __FUNCTION__);
1792 #ifdef DEBUG
1793 ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
1794 __func__, id, type);
1795 #endif
1797 while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) {
1798 #ifdef DEBUG
1799 ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n",
1800 __func__, ch, ch->id, ch->type);
1801 #endif
1802 ch = ch->next;
1804 #ifdef DEBUG
1805 ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n",
1806 __func__, ch, ch->id, ch->type);
1807 #endif
1808 if (!ch) {
1809 ctc_pr_warn("ctc: %s(): channel with id %s "
1810 "and type %d not found in channel list\n",
1811 __func__, id, type);
1812 } else {
1813 if (ch->flags & CHANNEL_FLAGS_INUSE)
1814 ch = NULL;
1815 else {
1816 ch->flags |= CHANNEL_FLAGS_INUSE;
1817 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
1818 ch->flags |= (direction == WRITE)
1819 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
1820 fsm_newstate(ch->fsm, CH_STATE_STOPPED);
1823 return ch;
1827 * Return the channel type by name.
1829 * @param name Name of network interface.
1831 * @return Type class of channel to be used for that interface.
1833 static enum channel_types inline
1834 extract_channel_media(char *name)
1836 enum channel_types ret = channel_type_unknown;
1838 if (name != NULL) {
1839 if (strncmp(name, "ctc", 3) == 0)
1840 ret = channel_type_parallel;
1841 if (strncmp(name, "escon", 5) == 0)
1842 ret = channel_type_escon;
1844 return ret;
1847 static long
1848 __ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb)
1850 if (!IS_ERR(irb))
1851 return 0;
1853 switch (PTR_ERR(irb)) {
1854 case -EIO:
1855 ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
1856 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1857 // CTC_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1858 break;
1859 case -ETIMEDOUT:
1860 ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
1861 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1862 // CTC_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
1863 break;
1864 default:
1865 ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb),
1866 cdev->dev.bus_id);
1867 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1868 // CTC_DBF_TEXT(trace, 2, " rc???");
1870 return PTR_ERR(irb);
1874 * Main IRQ handler.
1876 * @param cdev The ccw_device the interrupt is for.
1877 * @param intparm interruption parameter.
1878 * @param irb interruption response block.
1880 static void
1881 ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1883 struct channel *ch;
1884 struct net_device *dev;
1885 struct ctc_priv *priv;
1887 DBF_TEXT(trace, 5, __FUNCTION__);
1888 if (__ctc_check_irb_error(cdev, irb))
1889 return;
1891 /* Check for unsolicited interrupts. */
1892 if (!cdev->dev.driver_data) {
1893 ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n",
1894 cdev->dev.bus_id, irb->scsw.cstat,
1895 irb->scsw.dstat);
1896 return;
1899 priv = ((struct ccwgroup_device *)cdev->dev.driver_data)
1900 ->dev.driver_data;
1902 /* Try to extract channel from driver data. */
1903 if (priv->channel[READ]->cdev == cdev)
1904 ch = priv->channel[READ];
1905 else if (priv->channel[WRITE]->cdev == cdev)
1906 ch = priv->channel[WRITE];
1907 else {
1908 ctc_pr_err("ctc: Can't determine channel for interrupt, "
1909 "device %s\n", cdev->dev.bus_id);
1910 return;
1913 dev = (struct net_device *) (ch->netdev);
1914 if (dev == NULL) {
1915 ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n",
1916 cdev->dev.bus_id, ch);
1917 return;
1920 #ifdef DEBUG
1921 ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n",
1922 dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat);
1923 #endif
1925 /* Copy interruption response block. */
1926 memcpy(ch->irb, irb, sizeof(struct irb));
1928 /* Check for good subchannel return code, otherwise error message */
1929 if (ch->irb->scsw.cstat) {
1930 fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
1931 ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n",
1932 dev->name, ch->id, ch->irb->scsw.cstat,
1933 ch->irb->scsw.dstat);
1934 return;
1937 /* Check the reason-code of a unit check */
1938 if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
1939 ccw_unit_check(ch, ch->irb->ecw[0]);
1940 return;
1942 if (ch->irb->scsw.dstat & DEV_STAT_BUSY) {
1943 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION)
1944 fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
1945 else
1946 fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
1947 return;
1949 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) {
1950 fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
1951 return;
1953 if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
1954 (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
1955 (ch->irb->scsw.stctl ==
1956 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
1957 fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
1958 else
1959 fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
1964 * Actions for interface - statemachine.
1965 *****************************************************************************/
1968 * Startup channels by sending CH_EVENT_START to each channel.
1970 * @param fi An instance of an interface statemachine.
1971 * @param event The event, just happened.
1972 * @param arg Generic pointer, casted from struct net_device * upon call.
1974 static void
1975 dev_action_start(fsm_instance * fi, int event, void *arg)
1977 struct net_device *dev = (struct net_device *) arg;
1978 struct ctc_priv *privptr = dev->priv;
1979 int direction;
1981 DBF_TEXT(setup, 3, __FUNCTION__);
1982 fsm_deltimer(&privptr->restart_timer);
1983 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
1984 for (direction = READ; direction <= WRITE; direction++) {
1985 struct channel *ch = privptr->channel[direction];
1986 fsm_event(ch->fsm, CH_EVENT_START, ch);
1991 * Shutdown channels by sending CH_EVENT_STOP to each channel.
1993 * @param fi An instance of an interface statemachine.
1994 * @param event The event, just happened.
1995 * @param arg Generic pointer, casted from struct net_device * upon call.
1997 static void
1998 dev_action_stop(fsm_instance * fi, int event, void *arg)
2000 struct net_device *dev = (struct net_device *) arg;
2001 struct ctc_priv *privptr = dev->priv;
2002 int direction;
2004 DBF_TEXT(trace, 3, __FUNCTION__);
2005 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2006 for (direction = READ; direction <= WRITE; direction++) {
2007 struct channel *ch = privptr->channel[direction];
2008 fsm_event(ch->fsm, CH_EVENT_STOP, ch);
2011 static void
2012 dev_action_restart(fsm_instance *fi, int event, void *arg)
2014 struct net_device *dev = (struct net_device *)arg;
2015 struct ctc_priv *privptr = dev->priv;
2017 DBF_TEXT(trace, 3, __FUNCTION__);
2018 ctc_pr_debug("%s: Restarting\n", dev->name);
2019 dev_action_stop(fi, event, arg);
2020 fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
2021 fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC,
2022 DEV_EVENT_START, dev);
2026 * Called from channel statemachine
2027 * when a channel is up and running.
2029 * @param fi An instance of an interface statemachine.
2030 * @param event The event, just happened.
2031 * @param arg Generic pointer, casted from struct net_device * upon call.
2033 static void
2034 dev_action_chup(fsm_instance * fi, int event, void *arg)
2036 struct net_device *dev = (struct net_device *) arg;
2037 struct ctc_priv *privptr = dev->priv;
2039 DBF_TEXT(trace, 3, __FUNCTION__);
2040 switch (fsm_getstate(fi)) {
2041 case DEV_STATE_STARTWAIT_RXTX:
2042 if (event == DEV_EVENT_RXUP)
2043 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2044 else
2045 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2046 break;
2047 case DEV_STATE_STARTWAIT_RX:
2048 if (event == DEV_EVENT_RXUP) {
2049 fsm_newstate(fi, DEV_STATE_RUNNING);
2050 ctc_pr_info("%s: connected with remote side\n",
2051 dev->name);
2052 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2053 ctc_tty_setcarrier(dev, 1);
2054 ctc_clear_busy(dev);
2056 break;
2057 case DEV_STATE_STARTWAIT_TX:
2058 if (event == DEV_EVENT_TXUP) {
2059 fsm_newstate(fi, DEV_STATE_RUNNING);
2060 ctc_pr_info("%s: connected with remote side\n",
2061 dev->name);
2062 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2063 ctc_tty_setcarrier(dev, 1);
2064 ctc_clear_busy(dev);
2066 break;
2067 case DEV_STATE_STOPWAIT_TX:
2068 if (event == DEV_EVENT_RXUP)
2069 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2070 break;
2071 case DEV_STATE_STOPWAIT_RX:
2072 if (event == DEV_EVENT_TXUP)
2073 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2074 break;
2079 * Called from channel statemachine
2080 * when a channel has been shutdown.
2082 * @param fi An instance of an interface statemachine.
2083 * @param event The event, just happened.
2084 * @param arg Generic pointer, casted from struct net_device * upon call.
2086 static void
2087 dev_action_chdown(fsm_instance * fi, int event, void *arg)
2089 struct net_device *dev = (struct net_device *) arg;
2090 struct ctc_priv *privptr = dev->priv;
2092 DBF_TEXT(trace, 3, __FUNCTION__);
2093 switch (fsm_getstate(fi)) {
2094 case DEV_STATE_RUNNING:
2095 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2096 ctc_tty_setcarrier(dev, 0);
2097 if (event == DEV_EVENT_TXDOWN)
2098 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2099 else
2100 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2101 break;
2102 case DEV_STATE_STARTWAIT_RX:
2103 if (event == DEV_EVENT_TXDOWN)
2104 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2105 break;
2106 case DEV_STATE_STARTWAIT_TX:
2107 if (event == DEV_EVENT_RXDOWN)
2108 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2109 break;
2110 case DEV_STATE_STOPWAIT_RXTX:
2111 if (event == DEV_EVENT_TXDOWN)
2112 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2113 else
2114 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2115 break;
2116 case DEV_STATE_STOPWAIT_RX:
2117 if (event == DEV_EVENT_RXDOWN)
2118 fsm_newstate(fi, DEV_STATE_STOPPED);
2119 break;
2120 case DEV_STATE_STOPWAIT_TX:
2121 if (event == DEV_EVENT_TXDOWN)
2122 fsm_newstate(fi, DEV_STATE_STOPPED);
2123 break;
2127 static const fsm_node dev_fsm[] = {
2128 {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
2130 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2131 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2132 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2133 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2135 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2136 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2137 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2138 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2139 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2141 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2142 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2143 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2144 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2145 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2147 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2148 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2149 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2150 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2151 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2152 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2154 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2155 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2156 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2157 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2158 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2160 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2161 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2162 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2163 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2164 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2166 {DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2167 {DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2168 {DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2169 {DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop },
2170 {DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop },
2171 {DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2174 static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node);
2177 * Transmit a packet.
2178 * This is a helper function for ctc_tx().
2180 * @param ch Channel to be used for sending.
2181 * @param skb Pointer to struct sk_buff of packet to send.
2182 * The linklevel header has already been set up
2183 * by ctc_tx().
2185 * @return 0 on success, -ERRNO on failure. (Never fails.)
2187 static int
2188 transmit_skb(struct channel *ch, struct sk_buff *skb)
2190 unsigned long saveflags;
2191 struct ll_header header;
2192 int rc = 0;
2194 DBF_TEXT(trace, 5, __FUNCTION__);
2195 /* we need to acquire the lock for testing the state
2196 * otherwise we can have an IRQ changing the state to
2197 * TXIDLE after the test but before acquiring the lock.
2199 spin_lock_irqsave(&ch->collect_lock, saveflags);
2200 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
2201 int l = skb->len + LL_HEADER_LENGTH;
2203 if (ch->collect_len + l > ch->max_bufsize - 2) {
2204 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2205 return -EBUSY;
2206 } else {
2207 atomic_inc(&skb->users);
2208 header.length = l;
2209 header.type = skb->protocol;
2210 header.unused = 0;
2211 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2212 LL_HEADER_LENGTH);
2213 skb_queue_tail(&ch->collect_queue, skb);
2214 ch->collect_len += l;
2216 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2217 } else {
2218 __u16 block_len;
2219 int ccw_idx;
2220 struct sk_buff *nskb;
2221 unsigned long hi;
2222 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2224 * Protect skb against beeing free'd by upper
2225 * layers.
2227 atomic_inc(&skb->users);
2228 ch->prof.txlen += skb->len;
2229 header.length = skb->len + LL_HEADER_LENGTH;
2230 header.type = skb->protocol;
2231 header.unused = 0;
2232 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2233 LL_HEADER_LENGTH);
2234 block_len = skb->len + 2;
2235 *((__u16 *) skb_push(skb, 2)) = block_len;
2238 * IDAL support in CTC is broken, so we have to
2239 * care about skb's above 2G ourselves.
2241 hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31;
2242 if (hi) {
2243 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
2244 if (!nskb) {
2245 atomic_dec(&skb->users);
2246 skb_pull(skb, LL_HEADER_LENGTH + 2);
2247 ctc_clear_busy(ch->netdev);
2248 return -ENOMEM;
2249 } else {
2250 memcpy(skb_put(nskb, skb->len),
2251 skb->data, skb->len);
2252 atomic_inc(&nskb->users);
2253 atomic_dec(&skb->users);
2254 dev_kfree_skb_irq(skb);
2255 skb = nskb;
2259 ch->ccw[4].count = block_len;
2260 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
2262 * idal allocation failed, try via copying to
2263 * trans_skb. trans_skb usually has a pre-allocated
2264 * idal.
2266 if (ctc_checkalloc_buffer(ch, 1)) {
2268 * Remove our header. It gets added
2269 * again on retransmit.
2271 atomic_dec(&skb->users);
2272 skb_pull(skb, LL_HEADER_LENGTH + 2);
2273 ctc_clear_busy(ch->netdev);
2274 return -EBUSY;
2277 ch->trans_skb->tail = ch->trans_skb->data;
2278 ch->trans_skb->len = 0;
2279 ch->ccw[1].count = skb->len;
2280 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
2281 skb->len);
2282 atomic_dec(&skb->users);
2283 dev_kfree_skb_irq(skb);
2284 ccw_idx = 0;
2285 } else {
2286 skb_queue_tail(&ch->io_queue, skb);
2287 ccw_idx = 3;
2289 ch->retry = 0;
2290 fsm_newstate(ch->fsm, CH_STATE_TX);
2291 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
2292 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
2293 ch->prof.send_stamp = xtime;
2294 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
2295 (unsigned long) ch, 0xff, 0);
2296 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
2297 if (ccw_idx == 3)
2298 ch->prof.doios_single++;
2299 if (rc != 0) {
2300 fsm_deltimer(&ch->timer);
2301 ccw_check_return_code(ch, rc, "single skb TX");
2302 if (ccw_idx == 3)
2303 skb_dequeue_tail(&ch->io_queue);
2305 * Remove our header. It gets added
2306 * again on retransmit.
2308 skb_pull(skb, LL_HEADER_LENGTH + 2);
2309 } else {
2310 if (ccw_idx == 0) {
2311 struct net_device *dev = ch->netdev;
2312 struct ctc_priv *privptr = dev->priv;
2313 privptr->stats.tx_packets++;
2314 privptr->stats.tx_bytes +=
2315 skb->len - LL_HEADER_LENGTH;
2320 ctc_clear_busy(ch->netdev);
2321 return rc;
2325 * Interface API for upper network layers
2326 *****************************************************************************/
2329 * Open an interface.
2330 * Called from generic network layer when ifconfig up is run.
2332 * @param dev Pointer to interface struct.
2334 * @return 0 on success, -ERRNO on failure. (Never fails.)
2336 static int
2337 ctc_open(struct net_device * dev)
2339 DBF_TEXT(trace, 5, __FUNCTION__);
2340 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
2341 return 0;
2345 * Close an interface.
2346 * Called from generic network layer when ifconfig down is run.
2348 * @param dev Pointer to interface struct.
2350 * @return 0 on success, -ERRNO on failure. (Never fails.)
2352 static int
2353 ctc_close(struct net_device * dev)
2355 DBF_TEXT(trace, 5, __FUNCTION__);
2356 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
2357 return 0;
2361 * Start transmission of a packet.
2362 * Called from generic network device layer.
2364 * @param skb Pointer to buffer containing the packet.
2365 * @param dev Pointer to interface struct.
2367 * @return 0 if packet consumed, !0 if packet rejected.
2368 * Note: If we return !0, then the packet is free'd by
2369 * the generic network layer.
2371 static int
2372 ctc_tx(struct sk_buff *skb, struct net_device * dev)
2374 int rc = 0;
2375 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2377 DBF_TEXT(trace, 5, __FUNCTION__);
2379 * Some sanity checks ...
2381 if (skb == NULL) {
2382 ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name);
2383 privptr->stats.tx_dropped++;
2384 return 0;
2386 if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
2387 ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
2388 dev->name, LL_HEADER_LENGTH + 2);
2389 dev_kfree_skb(skb);
2390 privptr->stats.tx_dropped++;
2391 return 0;
2395 * If channels are not running, try to restart them
2396 * and throw away packet.
2398 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
2399 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
2400 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2401 return -EBUSY;
2402 dev_kfree_skb(skb);
2403 privptr->stats.tx_dropped++;
2404 privptr->stats.tx_errors++;
2405 privptr->stats.tx_carrier_errors++;
2406 return 0;
2409 if (ctc_test_and_set_busy(dev))
2410 return -EBUSY;
2412 dev->trans_start = jiffies;
2413 if (transmit_skb(privptr->channel[WRITE], skb) != 0)
2414 rc = 1;
2415 return rc;
2419 * Sets MTU of an interface.
2421 * @param dev Pointer to interface struct.
2422 * @param new_mtu The new MTU to use for this interface.
2424 * @return 0 on success, -EINVAL if MTU is out of valid range.
2425 * (valid range is 576 .. 65527). If VM is on the
2426 * remote side, maximum MTU is 32760, however this is
2427 * <em>not</em> checked here.
2429 static int
2430 ctc_change_mtu(struct net_device * dev, int new_mtu)
2432 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2434 DBF_TEXT(trace, 3, __FUNCTION__);
2435 if ((new_mtu < 576) || (new_mtu > 65527) ||
2436 (new_mtu > (privptr->channel[READ]->max_bufsize -
2437 LL_HEADER_LENGTH - 2)))
2438 return -EINVAL;
2439 dev->mtu = new_mtu;
2440 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2441 return 0;
2445 * Returns interface statistics of a device.
2447 * @param dev Pointer to interface struct.
2449 * @return Pointer to stats struct of this interface.
2451 static struct net_device_stats *
2452 ctc_stats(struct net_device * dev)
2454 return &((struct ctc_priv *) dev->priv)->stats;
2458 * sysfs attributes
2461 static ssize_t
2462 buffer_show(struct device *dev, struct device_attribute *attr, char *buf)
2464 struct ctc_priv *priv;
2466 priv = dev->driver_data;
2467 if (!priv)
2468 return -ENODEV;
2469 return sprintf(buf, "%d\n",
2470 priv->buffer_size);
2473 static ssize_t
2474 buffer_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2476 struct ctc_priv *priv;
2477 struct net_device *ndev;
2478 int bs1;
2479 char buffer[16];
2481 DBF_TEXT(trace, 3, __FUNCTION__);
2482 DBF_TEXT(trace, 3, buf);
2483 priv = dev->driver_data;
2484 if (!priv) {
2485 DBF_TEXT(trace, 3, "bfnopriv");
2486 return -ENODEV;
2489 sscanf(buf, "%u", &bs1);
2490 if (bs1 > CTC_BUFSIZE_LIMIT)
2491 goto einval;
2492 if (bs1 < (576 + LL_HEADER_LENGTH + 2))
2493 goto einval;
2494 priv->buffer_size = bs1; // just to overwrite the default
2496 ndev = priv->channel[READ]->netdev;
2497 if (!ndev) {
2498 DBF_TEXT(trace, 3, "bfnondev");
2499 return -ENODEV;
2502 if ((ndev->flags & IFF_RUNNING) &&
2503 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
2504 goto einval;
2506 priv->channel[READ]->max_bufsize = bs1;
2507 priv->channel[WRITE]->max_bufsize = bs1;
2508 if (!(ndev->flags & IFF_RUNNING))
2509 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
2510 priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2511 priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2513 sprintf(buffer, "%d",priv->buffer_size);
2514 DBF_TEXT(trace, 3, buffer);
2515 return count;
2517 einval:
2518 DBF_TEXT(trace, 3, "buff_err");
2519 return -EINVAL;
2522 static ssize_t
2523 loglevel_show(struct device *dev, struct device_attribute *attr, char *buf)
2525 return sprintf(buf, "%d\n", loglevel);
2528 static ssize_t
2529 loglevel_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2531 int ll1;
2533 DBF_TEXT(trace, 5, __FUNCTION__);
2534 sscanf(buf, "%i", &ll1);
2536 if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
2537 return -EINVAL;
2538 loglevel = ll1;
2539 return count;
2542 static void
2543 ctc_print_statistics(struct ctc_priv *priv)
2545 char *sbuf;
2546 char *p;
2548 DBF_TEXT(trace, 4, __FUNCTION__);
2549 if (!priv)
2550 return;
2551 sbuf = (char *)kmalloc(2048, GFP_KERNEL);
2552 if (sbuf == NULL)
2553 return;
2554 p = sbuf;
2556 p += sprintf(p, " Device FSM state: %s\n",
2557 fsm_getstate_str(priv->fsm));
2558 p += sprintf(p, " RX channel FSM state: %s\n",
2559 fsm_getstate_str(priv->channel[READ]->fsm));
2560 p += sprintf(p, " TX channel FSM state: %s\n",
2561 fsm_getstate_str(priv->channel[WRITE]->fsm));
2562 p += sprintf(p, " Max. TX buffer used: %ld\n",
2563 priv->channel[WRITE]->prof.maxmulti);
2564 p += sprintf(p, " Max. chained SKBs: %ld\n",
2565 priv->channel[WRITE]->prof.maxcqueue);
2566 p += sprintf(p, " TX single write ops: %ld\n",
2567 priv->channel[WRITE]->prof.doios_single);
2568 p += sprintf(p, " TX multi write ops: %ld\n",
2569 priv->channel[WRITE]->prof.doios_multi);
2570 p += sprintf(p, " Netto bytes written: %ld\n",
2571 priv->channel[WRITE]->prof.txlen);
2572 p += sprintf(p, " Max. TX IO-time: %ld\n",
2573 priv->channel[WRITE]->prof.tx_time);
2575 ctc_pr_debug("Statistics for %s:\n%s",
2576 priv->channel[WRITE]->netdev->name, sbuf);
2577 kfree(sbuf);
2578 return;
2581 static ssize_t
2582 stats_show(struct device *dev, struct device_attribute *attr, char *buf)
2584 struct ctc_priv *priv = dev->driver_data;
2585 if (!priv)
2586 return -ENODEV;
2587 ctc_print_statistics(priv);
2588 return sprintf(buf, "0\n");
2591 static ssize_t
2592 stats_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2594 struct ctc_priv *priv = dev->driver_data;
2595 if (!priv)
2596 return -ENODEV;
2597 /* Reset statistics */
2598 memset(&priv->channel[WRITE]->prof, 0,
2599 sizeof(priv->channel[WRITE]->prof));
2600 return count;
2603 static void
2604 ctc_netdev_unregister(struct net_device * dev)
2606 struct ctc_priv *privptr;
2608 if (!dev)
2609 return;
2610 privptr = (struct ctc_priv *) dev->priv;
2611 if (privptr->protocol != CTC_PROTO_LINUX_TTY)
2612 unregister_netdev(dev);
2613 else
2614 ctc_tty_unregister_netdev(dev);
2617 static int
2618 ctc_netdev_register(struct net_device * dev)
2620 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2621 if (privptr->protocol != CTC_PROTO_LINUX_TTY)
2622 return register_netdev(dev);
2623 else
2624 return ctc_tty_register_netdev(dev);
2627 static void
2628 ctc_free_netdevice(struct net_device * dev, int free_dev)
2630 struct ctc_priv *privptr;
2631 if (!dev)
2632 return;
2633 privptr = dev->priv;
2634 if (privptr) {
2635 if (privptr->fsm)
2636 kfree_fsm(privptr->fsm);
2637 kfree(privptr);
2639 #ifdef MODULE
2640 if (free_dev)
2641 free_netdev(dev);
2642 #endif
2645 static ssize_t
2646 ctc_proto_show(struct device *dev, struct device_attribute *attr, char *buf)
2648 struct ctc_priv *priv;
2650 priv = dev->driver_data;
2651 if (!priv)
2652 return -ENODEV;
2654 return sprintf(buf, "%d\n", priv->protocol);
2657 static ssize_t
2658 ctc_proto_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2660 struct ctc_priv *priv;
2661 int value;
2663 DBF_TEXT(trace, 3, __FUNCTION__);
2664 pr_debug("%s() called\n", __FUNCTION__);
2666 priv = dev->driver_data;
2667 if (!priv)
2668 return -ENODEV;
2669 sscanf(buf, "%u", &value);
2670 if ((value < 0) || (value > CTC_PROTO_MAX))
2671 return -EINVAL;
2672 priv->protocol = value;
2674 return count;
2677 static ssize_t
2678 ctc_type_show(struct device *dev, struct device_attribute *attr, char *buf)
2680 struct ccwgroup_device *cgdev;
2682 cgdev = to_ccwgroupdev(dev);
2683 if (!cgdev)
2684 return -ENODEV;
2686 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
2689 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
2690 static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
2691 static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
2693 static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
2694 static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
2696 static struct attribute *ctc_attr[] = {
2697 &dev_attr_protocol.attr,
2698 &dev_attr_type.attr,
2699 &dev_attr_buffer.attr,
2700 NULL,
2703 static struct attribute_group ctc_attr_group = {
2704 .attrs = ctc_attr,
2707 static int
2708 ctc_add_attributes(struct device *dev)
2710 device_create_file(dev, &dev_attr_loglevel);
2711 device_create_file(dev, &dev_attr_stats);
2712 return 0;
2715 static void
2716 ctc_remove_attributes(struct device *dev)
2718 device_remove_file(dev, &dev_attr_stats);
2719 device_remove_file(dev, &dev_attr_loglevel);
2722 static int
2723 ctc_add_files(struct device *dev)
2725 pr_debug("%s() called\n", __FUNCTION__);
2727 return sysfs_create_group(&dev->kobj, &ctc_attr_group);
2730 static void
2731 ctc_remove_files(struct device *dev)
2733 pr_debug("%s() called\n", __FUNCTION__);
2735 sysfs_remove_group(&dev->kobj, &ctc_attr_group);
2739 * Add ctc specific attributes.
2740 * Add ctc private data.
2742 * @param cgdev pointer to ccwgroup_device just added
2744 * @returns 0 on success, !0 on failure.
2746 static int
2747 ctc_probe_device(struct ccwgroup_device *cgdev)
2749 struct ctc_priv *priv;
2750 int rc;
2751 char buffer[16];
2753 pr_debug("%s() called\n", __FUNCTION__);
2754 DBF_TEXT(setup, 3, __FUNCTION__);
2756 if (!get_device(&cgdev->dev))
2757 return -ENODEV;
2759 priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL);
2760 if (!priv) {
2761 ctc_pr_err("%s: Out of memory\n", __func__);
2762 put_device(&cgdev->dev);
2763 return -ENOMEM;
2766 memset(priv, 0, sizeof (struct ctc_priv));
2767 rc = ctc_add_files(&cgdev->dev);
2768 if (rc) {
2769 kfree(priv);
2770 put_device(&cgdev->dev);
2771 return rc;
2773 priv->buffer_size = CTC_BUFSIZE_DEFAULT;
2774 cgdev->cdev[0]->handler = ctc_irq_handler;
2775 cgdev->cdev[1]->handler = ctc_irq_handler;
2776 cgdev->dev.driver_data = priv;
2778 sprintf(buffer, "%p", priv);
2779 DBF_TEXT(data, 3, buffer);
2781 sprintf(buffer, "%u", (unsigned int)sizeof(struct ctc_priv));
2782 DBF_TEXT(data, 3, buffer);
2784 sprintf(buffer, "%p", &channels);
2785 DBF_TEXT(data, 3, buffer);
2787 sprintf(buffer, "%u", (unsigned int)sizeof(struct channel));
2788 DBF_TEXT(data, 3, buffer);
2790 return 0;
2794 * Initialize everything of the net device except the name and the
2795 * channel structs.
2797 static struct net_device *
2798 ctc_init_netdevice(struct net_device * dev, int alloc_device,
2799 struct ctc_priv *privptr)
2801 if (!privptr)
2802 return NULL;
2804 DBF_TEXT(setup, 3, __FUNCTION__);
2806 if (alloc_device) {
2807 dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
2808 if (!dev)
2809 return NULL;
2810 memset(dev, 0, sizeof (struct net_device));
2813 dev->priv = privptr;
2814 privptr->fsm = init_fsm("ctcdev", dev_state_names,
2815 dev_event_names, CTC_NR_DEV_STATES, CTC_NR_DEV_EVENTS,
2816 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2817 if (privptr->fsm == NULL) {
2818 if (alloc_device)
2819 kfree(dev);
2820 return NULL;
2822 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2823 fsm_settimer(privptr->fsm, &privptr->restart_timer);
2824 if (dev->mtu == 0)
2825 dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
2826 dev->hard_start_xmit = ctc_tx;
2827 dev->open = ctc_open;
2828 dev->stop = ctc_close;
2829 dev->get_stats = ctc_stats;
2830 dev->change_mtu = ctc_change_mtu;
2831 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2832 dev->addr_len = 0;
2833 dev->type = ARPHRD_SLIP;
2834 dev->tx_queue_len = 100;
2835 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2836 SET_MODULE_OWNER(dev);
2837 return dev;
2843 * Setup an interface.
2845 * @param cgdev Device to be setup.
2847 * @returns 0 on success, !0 on failure.
2849 static int
2850 ctc_new_device(struct ccwgroup_device *cgdev)
2852 char read_id[CTC_ID_SIZE];
2853 char write_id[CTC_ID_SIZE];
2854 int direction;
2855 enum channel_types type;
2856 struct ctc_priv *privptr;
2857 struct net_device *dev;
2858 int ret;
2859 char buffer[16];
2861 pr_debug("%s() called\n", __FUNCTION__);
2862 DBF_TEXT(setup, 3, __FUNCTION__);
2864 privptr = cgdev->dev.driver_data;
2865 if (!privptr)
2866 return -ENODEV;
2868 sprintf(buffer, "%d", privptr->buffer_size);
2869 DBF_TEXT(setup, 3, buffer);
2871 type = get_channel_type(&cgdev->cdev[0]->id);
2873 snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
2874 snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
2876 if (add_channel(cgdev->cdev[0], type))
2877 return -ENOMEM;
2878 if (add_channel(cgdev->cdev[1], type))
2879 return -ENOMEM;
2881 ret = ccw_device_set_online(cgdev->cdev[0]);
2882 if (ret != 0) {
2883 printk(KERN_WARNING
2884 "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret);
2887 ret = ccw_device_set_online(cgdev->cdev[1]);
2888 if (ret != 0) {
2889 printk(KERN_WARNING
2890 "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret);
2893 dev = ctc_init_netdevice(NULL, 1, privptr);
2895 if (!dev) {
2896 ctc_pr_warn("ctc_init_netdevice failed\n");
2897 goto out;
2900 if (privptr->protocol == CTC_PROTO_LINUX_TTY)
2901 strlcpy(dev->name, "ctctty%d", IFNAMSIZ);
2902 else
2903 strlcpy(dev->name, "ctc%d", IFNAMSIZ);
2905 for (direction = READ; direction <= WRITE; direction++) {
2906 privptr->channel[direction] =
2907 channel_get(type, direction == READ ? read_id : write_id,
2908 direction);
2909 if (privptr->channel[direction] == NULL) {
2910 if (direction == WRITE)
2911 channel_free(privptr->channel[READ]);
2913 ctc_free_netdevice(dev, 1);
2914 goto out;
2916 privptr->channel[direction]->netdev = dev;
2917 privptr->channel[direction]->protocol = privptr->protocol;
2918 privptr->channel[direction]->max_bufsize = privptr->buffer_size;
2920 /* sysfs magic */
2921 SET_NETDEV_DEV(dev, &cgdev->dev);
2923 if (ctc_netdev_register(dev) != 0) {
2924 ctc_free_netdevice(dev, 1);
2925 goto out;
2928 ctc_add_attributes(&cgdev->dev);
2930 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
2932 print_banner();
2934 ctc_pr_info("%s: read: %s, write: %s, proto: %d\n",
2935 dev->name, privptr->channel[READ]->id,
2936 privptr->channel[WRITE]->id, privptr->protocol);
2938 return 0;
2939 out:
2940 ccw_device_set_offline(cgdev->cdev[1]);
2941 ccw_device_set_offline(cgdev->cdev[0]);
2943 return -ENODEV;
2947 * Shutdown an interface.
2949 * @param cgdev Device to be shut down.
2951 * @returns 0 on success, !0 on failure.
2953 static int
2954 ctc_shutdown_device(struct ccwgroup_device *cgdev)
2956 struct ctc_priv *priv;
2957 struct net_device *ndev;
2959 DBF_TEXT(setup, 3, __FUNCTION__);
2960 pr_debug("%s() called\n", __FUNCTION__);
2963 priv = cgdev->dev.driver_data;
2964 ndev = NULL;
2965 if (!priv)
2966 return -ENODEV;
2968 if (priv->channel[READ]) {
2969 ndev = priv->channel[READ]->netdev;
2971 /* Close the device */
2972 ctc_close(ndev);
2973 ndev->flags &=~IFF_RUNNING;
2975 ctc_remove_attributes(&cgdev->dev);
2977 channel_free(priv->channel[READ]);
2979 if (priv->channel[WRITE])
2980 channel_free(priv->channel[WRITE]);
2982 if (ndev) {
2983 ctc_netdev_unregister(ndev);
2984 ndev->priv = NULL;
2985 ctc_free_netdevice(ndev, 1);
2988 if (priv->fsm)
2989 kfree_fsm(priv->fsm);
2991 ccw_device_set_offline(cgdev->cdev[1]);
2992 ccw_device_set_offline(cgdev->cdev[0]);
2994 if (priv->channel[READ])
2995 channel_remove(priv->channel[READ]);
2996 if (priv->channel[WRITE])
2997 channel_remove(priv->channel[WRITE]);
2998 priv->channel[READ] = priv->channel[WRITE] = NULL;
3000 return 0;
3004 static void
3005 ctc_remove_device(struct ccwgroup_device *cgdev)
3007 struct ctc_priv *priv;
3009 pr_debug("%s() called\n", __FUNCTION__);
3010 DBF_TEXT(setup, 3, __FUNCTION__);
3012 priv = cgdev->dev.driver_data;
3013 if (!priv)
3014 return;
3015 if (cgdev->state == CCWGROUP_ONLINE)
3016 ctc_shutdown_device(cgdev);
3017 ctc_remove_files(&cgdev->dev);
3018 cgdev->dev.driver_data = NULL;
3019 kfree(priv);
3020 put_device(&cgdev->dev);
3023 static struct ccwgroup_driver ctc_group_driver = {
3024 .owner = THIS_MODULE,
3025 .name = "ctc",
3026 .max_slaves = 2,
3027 .driver_id = 0xC3E3C3,
3028 .probe = ctc_probe_device,
3029 .remove = ctc_remove_device,
3030 .set_online = ctc_new_device,
3031 .set_offline = ctc_shutdown_device,
3035 * Module related routines
3036 *****************************************************************************/
3039 * Prepare to be unloaded. Free IRQ's and release all resources.
3040 * This is called just before this module is unloaded. It is
3041 * <em>not</em> called, if the usage count is !0, so we don't need to check
3042 * for that.
3044 static void __exit
3045 ctc_exit(void)
3047 DBF_TEXT(setup, 3, __FUNCTION__);
3048 unregister_cu3088_discipline(&ctc_group_driver);
3049 ctc_tty_cleanup();
3050 ctc_unregister_dbf_views();
3051 ctc_pr_info("CTC driver unloaded\n");
3055 * Initialize module.
3056 * This is called just after the module is loaded.
3058 * @return 0 on success, !0 on error.
3060 static int __init
3061 ctc_init(void)
3063 int ret = 0;
3065 loglevel = CTC_LOGLEVEL_DEFAULT;
3067 DBF_TEXT(setup, 3, __FUNCTION__);
3069 print_banner();
3071 ret = ctc_register_dbf_views();
3072 if (ret){
3073 ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
3074 return ret;
3076 ctc_tty_init();
3077 ret = register_cu3088_discipline(&ctc_group_driver);
3078 if (ret) {
3079 ctc_tty_cleanup();
3080 ctc_unregister_dbf_views();
3082 return ret;
3085 module_init(ctc_init);
3086 module_exit(ctc_exit);
3088 /* --- This is the END my friend --- */