4 * Copyright IBM Corp. 2001, 2009
7 * Original netiucv driver:
8 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
9 * Sysfs integration and all bugs therein:
10 * Cornelia Huck (cornelia.huck@de.ibm.com)
12 * Ursula Braun (ursula.braun@de.ibm.com)
15 * the source of the original IUCV driver by:
16 * Stefan Hegewald <hegewald@de.ibm.com>
17 * Hartmut Penner <hpenner@de.ibm.com>
18 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
19 * Martin Schwidefsky (schwidefsky@de.ibm.com)
20 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2, or (at your option)
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, write to the Free Software
34 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
38 #define KMSG_COMPONENT "netiucv"
39 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
43 #include <linux/module.h>
44 #include <linux/init.h>
45 #include <linux/kernel.h>
46 #include <linux/slab.h>
47 #include <linux/errno.h>
48 #include <linux/types.h>
49 #include <linux/interrupt.h>
50 #include <linux/timer.h>
51 #include <linux/bitops.h>
53 #include <linux/signal.h>
54 #include <linux/string.h>
55 #include <linux/device.h>
58 #include <linux/if_arp.h>
59 #include <linux/tcp.h>
60 #include <linux/skbuff.h>
61 #include <linux/ctype.h>
65 #include <asm/uaccess.h>
67 #include <net/iucv/iucv.h>
71 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
72 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
75 * Debug Facility stuff
77 #define IUCV_DBF_SETUP_NAME "iucv_setup"
78 #define IUCV_DBF_SETUP_LEN 32
79 #define IUCV_DBF_SETUP_PAGES 2
80 #define IUCV_DBF_SETUP_NR_AREAS 1
81 #define IUCV_DBF_SETUP_LEVEL 3
83 #define IUCV_DBF_DATA_NAME "iucv_data"
84 #define IUCV_DBF_DATA_LEN 128
85 #define IUCV_DBF_DATA_PAGES 2
86 #define IUCV_DBF_DATA_NR_AREAS 1
87 #define IUCV_DBF_DATA_LEVEL 2
89 #define IUCV_DBF_TRACE_NAME "iucv_trace"
90 #define IUCV_DBF_TRACE_LEN 16
91 #define IUCV_DBF_TRACE_PAGES 4
92 #define IUCV_DBF_TRACE_NR_AREAS 1
93 #define IUCV_DBF_TRACE_LEVEL 3
95 #define IUCV_DBF_TEXT(name,level,text) \
97 debug_text_event(iucv_dbf_##name,level,text); \
100 #define IUCV_DBF_HEX(name,level,addr,len) \
102 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
105 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf
);
107 /* Allow to sort out low debug levels early to avoid wasted sprints */
108 static inline int iucv_dbf_passes(debug_info_t
*dbf_grp
, int level
)
110 return (level
<= dbf_grp
->level
);
113 #define IUCV_DBF_TEXT_(name, level, text...) \
115 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
116 char* iucv_dbf_txt_buf = \
117 get_cpu_var(iucv_dbf_txt_buf); \
118 sprintf(iucv_dbf_txt_buf, text); \
119 debug_text_event(iucv_dbf_##name, level, \
121 put_cpu_var(iucv_dbf_txt_buf); \
125 #define IUCV_DBF_SPRINTF(name,level,text...) \
127 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
128 debug_sprintf_event(iucv_dbf_trace, level, text ); \
132 * some more debug stuff
134 #define IUCV_HEXDUMP16(importance,header,ptr) \
135 PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
136 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
137 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
138 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
139 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
140 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
141 *(((char*)ptr)+12),*(((char*)ptr)+13), \
142 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
143 PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
144 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
145 *(((char*)ptr)+16),*(((char*)ptr)+17), \
146 *(((char*)ptr)+18),*(((char*)ptr)+19), \
147 *(((char*)ptr)+20),*(((char*)ptr)+21), \
148 *(((char*)ptr)+22),*(((char*)ptr)+23), \
149 *(((char*)ptr)+24),*(((char*)ptr)+25), \
150 *(((char*)ptr)+26),*(((char*)ptr)+27), \
151 *(((char*)ptr)+28),*(((char*)ptr)+29), \
152 *(((char*)ptr)+30),*(((char*)ptr)+31));
154 #define PRINTK_HEADER " iucv: " /* for debugging */
156 /* dummy device to make sure netiucv_pm functions are called */
157 static struct device
*netiucv_dev
;
159 static int netiucv_pm_prepare(struct device
*);
160 static void netiucv_pm_complete(struct device
*);
161 static int netiucv_pm_freeze(struct device
*);
162 static int netiucv_pm_restore_thaw(struct device
*);
164 static struct dev_pm_ops netiucv_pm_ops
= {
165 .prepare
= netiucv_pm_prepare
,
166 .complete
= netiucv_pm_complete
,
167 .freeze
= netiucv_pm_freeze
,
168 .thaw
= netiucv_pm_restore_thaw
,
169 .restore
= netiucv_pm_restore_thaw
,
172 static struct device_driver netiucv_driver
= {
173 .owner
= THIS_MODULE
,
176 .pm
= &netiucv_pm_ops
,
179 static int netiucv_callback_connreq(struct iucv_path
*,
180 u8 ipvmid
[8], u8 ipuser
[16]);
181 static void netiucv_callback_connack(struct iucv_path
*, u8 ipuser
[16]);
182 static void netiucv_callback_connrej(struct iucv_path
*, u8 ipuser
[16]);
183 static void netiucv_callback_connsusp(struct iucv_path
*, u8 ipuser
[16]);
184 static void netiucv_callback_connres(struct iucv_path
*, u8 ipuser
[16]);
185 static void netiucv_callback_rx(struct iucv_path
*, struct iucv_message
*);
186 static void netiucv_callback_txdone(struct iucv_path
*, struct iucv_message
*);
188 static struct iucv_handler netiucv_handler
= {
189 .path_pending
= netiucv_callback_connreq
,
190 .path_complete
= netiucv_callback_connack
,
191 .path_severed
= netiucv_callback_connrej
,
192 .path_quiesced
= netiucv_callback_connsusp
,
193 .path_resumed
= netiucv_callback_connres
,
194 .message_pending
= netiucv_callback_rx
,
195 .message_complete
= netiucv_callback_txdone
199 * Per connection profiling data
201 struct connection_profile
{
202 unsigned long maxmulti
;
203 unsigned long maxcqueue
;
204 unsigned long doios_single
;
205 unsigned long doios_multi
;
207 unsigned long tx_time
;
208 struct timespec send_stamp
;
209 unsigned long tx_pending
;
210 unsigned long tx_max_pending
;
214 * Representation of one iucv connection
216 struct iucv_connection
{
217 struct list_head list
;
218 struct iucv_path
*path
;
219 struct sk_buff
*rx_buff
;
220 struct sk_buff
*tx_buff
;
221 struct sk_buff_head collect_queue
;
222 struct sk_buff_head commit_queue
;
223 spinlock_t collect_lock
;
228 struct net_device
*netdev
;
229 struct connection_profile prof
;
234 * Linked list of all connection structs.
236 static LIST_HEAD(iucv_connection_list
);
237 static DEFINE_RWLOCK(iucv_connection_rwlock
);
240 * Representation of event-data for the
241 * connection state machine.
244 struct iucv_connection
*conn
;
249 * Private part of the network device structure
251 struct netiucv_priv
{
252 struct net_device_stats stats
;
255 struct iucv_connection
*conn
;
261 * Link level header for a packet.
267 #define NETIUCV_HDRLEN (sizeof(struct ll_header))
268 #define NETIUCV_BUFSIZE_MAX 32768
269 #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
270 #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
271 #define NETIUCV_MTU_DEFAULT 9216
272 #define NETIUCV_QUEUELEN_DEFAULT 50
273 #define NETIUCV_TIMEOUT_5SEC 5000
276 * Compatibility macros for busy handling
277 * of network devices.
279 static inline void netiucv_clear_busy(struct net_device
*dev
)
281 struct netiucv_priv
*priv
= netdev_priv(dev
);
282 clear_bit(0, &priv
->tbusy
);
283 netif_wake_queue(dev
);
286 static inline int netiucv_test_and_set_busy(struct net_device
*dev
)
288 struct netiucv_priv
*priv
= netdev_priv(dev
);
289 netif_stop_queue(dev
);
290 return test_and_set_bit(0, &priv
->tbusy
);
293 static u8 iucvMagic
[16] = {
294 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
295 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
299 * Convert an iucv userId to its printable
300 * form (strip whitespace at end).
302 * @param An iucv userId
304 * @returns The printable string (static data!!)
306 static char *netiucv_printname(char *name
)
310 memcpy(tmp
, name
, 8);
312 while (*p
&& (!isspace(*p
)))
319 * States of the interface statemachine.
327 * MUST be always the last element!!
332 static const char *dev_state_names
[] = {
340 * Events of the interface statemachine.
348 * MUST be always the last element!!
353 static const char *dev_event_names
[] = {
361 * Events of the connection statemachine
365 * Events, representing callbacks from
366 * lowlevel iucv layer)
377 * Events, representing errors return codes from
378 * calls to lowlevel iucv layer
382 * Event, representing timer expiry.
387 * Events, representing commands from upper levels.
393 * MUST be always the last element!!
398 static const char *conn_event_names
[] = {
399 "Remote connection request",
400 "Remote connection acknowledge",
401 "Remote connection reject",
402 "Connection suspended",
403 "Connection resumed",
414 * States of the connection statemachine.
418 * Connection not assigned to any device,
419 * initial state, invalid
424 * Userid assigned but not operating
429 * Connection registered,
430 * no connection request sent yet,
431 * no connection request received
433 CONN_STATE_STARTWAIT
,
436 * Connection registered and connection request sent,
437 * no acknowledge and no connection request received yet.
439 CONN_STATE_SETUPWAIT
,
442 * Connection up and running idle
447 * Data sent, awaiting CONN_EVENT_TXDONE
452 * Error during registration.
457 * Error during registration.
462 * MUST be always the last element!!
467 static const char *conn_state_names
[] = {
475 "Registration error",
481 * Debug Facility Stuff
483 static debug_info_t
*iucv_dbf_setup
= NULL
;
484 static debug_info_t
*iucv_dbf_data
= NULL
;
485 static debug_info_t
*iucv_dbf_trace
= NULL
;
487 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf
);
489 static void iucv_unregister_dbf_views(void)
492 debug_unregister(iucv_dbf_setup
);
494 debug_unregister(iucv_dbf_data
);
496 debug_unregister(iucv_dbf_trace
);
498 static int iucv_register_dbf_views(void)
500 iucv_dbf_setup
= debug_register(IUCV_DBF_SETUP_NAME
,
501 IUCV_DBF_SETUP_PAGES
,
502 IUCV_DBF_SETUP_NR_AREAS
,
504 iucv_dbf_data
= debug_register(IUCV_DBF_DATA_NAME
,
506 IUCV_DBF_DATA_NR_AREAS
,
508 iucv_dbf_trace
= debug_register(IUCV_DBF_TRACE_NAME
,
509 IUCV_DBF_TRACE_PAGES
,
510 IUCV_DBF_TRACE_NR_AREAS
,
513 if ((iucv_dbf_setup
== NULL
) || (iucv_dbf_data
== NULL
) ||
514 (iucv_dbf_trace
== NULL
)) {
515 iucv_unregister_dbf_views();
518 debug_register_view(iucv_dbf_setup
, &debug_hex_ascii_view
);
519 debug_set_level(iucv_dbf_setup
, IUCV_DBF_SETUP_LEVEL
);
521 debug_register_view(iucv_dbf_data
, &debug_hex_ascii_view
);
522 debug_set_level(iucv_dbf_data
, IUCV_DBF_DATA_LEVEL
);
524 debug_register_view(iucv_dbf_trace
, &debug_hex_ascii_view
);
525 debug_set_level(iucv_dbf_trace
, IUCV_DBF_TRACE_LEVEL
);
531 * Callback-wrappers, called from lowlevel iucv layer.
534 static void netiucv_callback_rx(struct iucv_path
*path
,
535 struct iucv_message
*msg
)
537 struct iucv_connection
*conn
= path
->private;
538 struct iucv_event ev
;
542 fsm_event(conn
->fsm
, CONN_EVENT_RX
, &ev
);
545 static void netiucv_callback_txdone(struct iucv_path
*path
,
546 struct iucv_message
*msg
)
548 struct iucv_connection
*conn
= path
->private;
549 struct iucv_event ev
;
553 fsm_event(conn
->fsm
, CONN_EVENT_TXDONE
, &ev
);
556 static void netiucv_callback_connack(struct iucv_path
*path
, u8 ipuser
[16])
558 struct iucv_connection
*conn
= path
->private;
560 fsm_event(conn
->fsm
, CONN_EVENT_CONN_ACK
, conn
);
563 static int netiucv_callback_connreq(struct iucv_path
*path
,
564 u8 ipvmid
[8], u8 ipuser
[16])
566 struct iucv_connection
*conn
= path
->private;
567 struct iucv_event ev
;
570 if (memcmp(iucvMagic
, ipuser
, sizeof(ipuser
)))
571 /* ipuser must match iucvMagic. */
574 read_lock_bh(&iucv_connection_rwlock
);
575 list_for_each_entry(conn
, &iucv_connection_list
, list
) {
576 if (strncmp(ipvmid
, conn
->userid
, 8))
578 /* Found a matching connection for this path. */
582 fsm_event(conn
->fsm
, CONN_EVENT_CONN_REQ
, &ev
);
585 read_unlock_bh(&iucv_connection_rwlock
);
589 static void netiucv_callback_connrej(struct iucv_path
*path
, u8 ipuser
[16])
591 struct iucv_connection
*conn
= path
->private;
593 fsm_event(conn
->fsm
, CONN_EVENT_CONN_REJ
, conn
);
596 static void netiucv_callback_connsusp(struct iucv_path
*path
, u8 ipuser
[16])
598 struct iucv_connection
*conn
= path
->private;
600 fsm_event(conn
->fsm
, CONN_EVENT_CONN_SUS
, conn
);
603 static void netiucv_callback_connres(struct iucv_path
*path
, u8 ipuser
[16])
605 struct iucv_connection
*conn
= path
->private;
607 fsm_event(conn
->fsm
, CONN_EVENT_CONN_RES
, conn
);
611 * NOP action for statemachines
613 static void netiucv_action_nop(fsm_instance
*fi
, int event
, void *arg
)
618 * Actions of the connection statemachine
623 * @conn: The connection where this skb has been received.
624 * @pskb: The received skb.
626 * Unpack a just received skb and hand it over to upper layers.
627 * Helper function for conn_action_rx.
629 static void netiucv_unpack_skb(struct iucv_connection
*conn
,
630 struct sk_buff
*pskb
)
632 struct net_device
*dev
= conn
->netdev
;
633 struct netiucv_priv
*privptr
= netdev_priv(dev
);
636 skb_put(pskb
, NETIUCV_HDRLEN
);
638 pskb
->ip_summed
= CHECKSUM_NONE
;
639 pskb
->protocol
= ntohs(ETH_P_IP
);
643 struct ll_header
*header
= (struct ll_header
*) pskb
->data
;
648 skb_pull(pskb
, NETIUCV_HDRLEN
);
649 header
->next
-= offset
;
650 offset
+= header
->next
;
651 header
->next
-= NETIUCV_HDRLEN
;
652 if (skb_tailroom(pskb
) < header
->next
) {
653 IUCV_DBF_TEXT_(data
, 2, "Illegal next field: %d > %d\n",
654 header
->next
, skb_tailroom(pskb
));
657 skb_put(pskb
, header
->next
);
658 skb_reset_mac_header(pskb
);
659 skb
= dev_alloc_skb(pskb
->len
);
661 IUCV_DBF_TEXT(data
, 2,
662 "Out of memory in netiucv_unpack_skb\n");
663 privptr
->stats
.rx_dropped
++;
666 skb_copy_from_linear_data(pskb
, skb_put(skb
, pskb
->len
),
668 skb_reset_mac_header(skb
);
669 skb
->dev
= pskb
->dev
;
670 skb
->protocol
= pskb
->protocol
;
671 pskb
->ip_summed
= CHECKSUM_UNNECESSARY
;
672 privptr
->stats
.rx_packets
++;
673 privptr
->stats
.rx_bytes
+= skb
->len
;
675 * Since receiving is always initiated from a tasklet (in iucv.c),
676 * we must use netif_rx_ni() instead of netif_rx()
679 dev
->last_rx
= jiffies
;
680 skb_pull(pskb
, header
->next
);
681 skb_put(pskb
, NETIUCV_HDRLEN
);
685 static void conn_action_rx(fsm_instance
*fi
, int event
, void *arg
)
687 struct iucv_event
*ev
= arg
;
688 struct iucv_connection
*conn
= ev
->conn
;
689 struct iucv_message
*msg
= ev
->data
;
690 struct netiucv_priv
*privptr
= netdev_priv(conn
->netdev
);
693 IUCV_DBF_TEXT(trace
, 4, __func__
);
696 iucv_message_reject(conn
->path
, msg
);
697 IUCV_DBF_TEXT(data
, 2,
698 "Received data for unlinked connection\n");
701 if (msg
->length
> conn
->max_buffsize
) {
702 iucv_message_reject(conn
->path
, msg
);
703 privptr
->stats
.rx_dropped
++;
704 IUCV_DBF_TEXT_(data
, 2, "msglen %d > max_buffsize %d\n",
705 msg
->length
, conn
->max_buffsize
);
708 conn
->rx_buff
->data
= conn
->rx_buff
->head
;
709 skb_reset_tail_pointer(conn
->rx_buff
);
710 conn
->rx_buff
->len
= 0;
711 rc
= iucv_message_receive(conn
->path
, msg
, 0, conn
->rx_buff
->data
,
713 if (rc
|| msg
->length
< 5) {
714 privptr
->stats
.rx_errors
++;
715 IUCV_DBF_TEXT_(data
, 2, "rc %d from iucv_receive\n", rc
);
718 netiucv_unpack_skb(conn
, conn
->rx_buff
);
721 static void conn_action_txdone(fsm_instance
*fi
, int event
, void *arg
)
723 struct iucv_event
*ev
= arg
;
724 struct iucv_connection
*conn
= ev
->conn
;
725 struct iucv_message
*msg
= ev
->data
;
726 struct iucv_message txmsg
;
727 struct netiucv_priv
*privptr
= NULL
;
728 u32 single_flag
= msg
->tag
;
733 unsigned long saveflags
;
734 struct ll_header header
;
737 IUCV_DBF_TEXT(trace
, 4, __func__
);
739 if (conn
&& conn
->netdev
)
740 privptr
= netdev_priv(conn
->netdev
);
741 conn
->prof
.tx_pending
--;
743 if ((skb
= skb_dequeue(&conn
->commit_queue
))) {
744 atomic_dec(&skb
->users
);
745 dev_kfree_skb_any(skb
);
747 privptr
->stats
.tx_packets
++;
748 privptr
->stats
.tx_bytes
+=
749 (skb
->len
- NETIUCV_HDRLEN
754 conn
->tx_buff
->data
= conn
->tx_buff
->head
;
755 skb_reset_tail_pointer(conn
->tx_buff
);
756 conn
->tx_buff
->len
= 0;
757 spin_lock_irqsave(&conn
->collect_lock
, saveflags
);
758 while ((skb
= skb_dequeue(&conn
->collect_queue
))) {
759 header
.next
= conn
->tx_buff
->len
+ skb
->len
+ NETIUCV_HDRLEN
;
760 memcpy(skb_put(conn
->tx_buff
, NETIUCV_HDRLEN
), &header
,
762 skb_copy_from_linear_data(skb
,
763 skb_put(conn
->tx_buff
, skb
->len
),
768 atomic_dec(&skb
->users
);
769 dev_kfree_skb_any(skb
);
771 if (conn
->collect_len
> conn
->prof
.maxmulti
)
772 conn
->prof
.maxmulti
= conn
->collect_len
;
773 conn
->collect_len
= 0;
774 spin_unlock_irqrestore(&conn
->collect_lock
, saveflags
);
775 if (conn
->tx_buff
->len
== 0) {
776 fsm_newstate(fi
, CONN_STATE_IDLE
);
781 memcpy(skb_put(conn
->tx_buff
, NETIUCV_HDRLEN
), &header
, NETIUCV_HDRLEN
);
782 conn
->prof
.send_stamp
= current_kernel_time();
785 rc
= iucv_message_send(conn
->path
, &txmsg
, 0, 0,
786 conn
->tx_buff
->data
, conn
->tx_buff
->len
);
787 conn
->prof
.doios_multi
++;
788 conn
->prof
.txlen
+= conn
->tx_buff
->len
;
789 conn
->prof
.tx_pending
++;
790 if (conn
->prof
.tx_pending
> conn
->prof
.tx_max_pending
)
791 conn
->prof
.tx_max_pending
= conn
->prof
.tx_pending
;
793 conn
->prof
.tx_pending
--;
794 fsm_newstate(fi
, CONN_STATE_IDLE
);
796 privptr
->stats
.tx_errors
+= txpackets
;
797 IUCV_DBF_TEXT_(data
, 2, "rc %d from iucv_send\n", rc
);
800 privptr
->stats
.tx_packets
+= txpackets
;
801 privptr
->stats
.tx_bytes
+= txbytes
;
803 if (stat_maxcq
> conn
->prof
.maxcqueue
)
804 conn
->prof
.maxcqueue
= stat_maxcq
;
808 static void conn_action_connaccept(fsm_instance
*fi
, int event
, void *arg
)
810 struct iucv_event
*ev
= arg
;
811 struct iucv_connection
*conn
= ev
->conn
;
812 struct iucv_path
*path
= ev
->data
;
813 struct net_device
*netdev
= conn
->netdev
;
814 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
817 IUCV_DBF_TEXT(trace
, 3, __func__
);
820 path
->msglim
= NETIUCV_QUEUELEN_DEFAULT
;
822 rc
= iucv_path_accept(path
, &netiucv_handler
, NULL
, conn
);
824 IUCV_DBF_TEXT_(setup
, 2, "rc %d from iucv_accept", rc
);
827 fsm_newstate(fi
, CONN_STATE_IDLE
);
828 netdev
->tx_queue_len
= conn
->path
->msglim
;
829 fsm_event(privptr
->fsm
, DEV_EVENT_CONUP
, netdev
);
832 static void conn_action_connreject(fsm_instance
*fi
, int event
, void *arg
)
834 struct iucv_event
*ev
= arg
;
835 struct iucv_path
*path
= ev
->data
;
837 IUCV_DBF_TEXT(trace
, 3, __func__
);
838 iucv_path_sever(path
, NULL
);
841 static void conn_action_connack(fsm_instance
*fi
, int event
, void *arg
)
843 struct iucv_connection
*conn
= arg
;
844 struct net_device
*netdev
= conn
->netdev
;
845 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
847 IUCV_DBF_TEXT(trace
, 3, __func__
);
848 fsm_deltimer(&conn
->timer
);
849 fsm_newstate(fi
, CONN_STATE_IDLE
);
850 netdev
->tx_queue_len
= conn
->path
->msglim
;
851 fsm_event(privptr
->fsm
, DEV_EVENT_CONUP
, netdev
);
854 static void conn_action_conntimsev(fsm_instance
*fi
, int event
, void *arg
)
856 struct iucv_connection
*conn
= arg
;
858 IUCV_DBF_TEXT(trace
, 3, __func__
);
859 fsm_deltimer(&conn
->timer
);
860 iucv_path_sever(conn
->path
, NULL
);
861 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
864 static void conn_action_connsever(fsm_instance
*fi
, int event
, void *arg
)
866 struct iucv_connection
*conn
= arg
;
867 struct net_device
*netdev
= conn
->netdev
;
868 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
870 IUCV_DBF_TEXT(trace
, 3, __func__
);
872 fsm_deltimer(&conn
->timer
);
873 iucv_path_sever(conn
->path
, NULL
);
874 dev_info(privptr
->dev
, "The peer interface of the IUCV device"
875 " has closed the connection\n");
876 IUCV_DBF_TEXT(data
, 2,
877 "conn_action_connsever: Remote dropped connection\n");
878 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
879 fsm_event(privptr
->fsm
, DEV_EVENT_CONDOWN
, netdev
);
882 static void conn_action_start(fsm_instance
*fi
, int event
, void *arg
)
884 struct iucv_connection
*conn
= arg
;
885 struct net_device
*netdev
= conn
->netdev
;
886 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
889 IUCV_DBF_TEXT(trace
, 3, __func__
);
891 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
892 IUCV_DBF_TEXT_(setup
, 2, "%s('%s'): connecting ...\n",
893 netdev
->name
, conn
->userid
);
896 * We must set the state before calling iucv_connect because the
897 * callback handler could be called at any point after the connection
901 fsm_newstate(fi
, CONN_STATE_SETUPWAIT
);
902 conn
->path
= iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT
, 0, GFP_KERNEL
);
903 rc
= iucv_path_connect(conn
->path
, &netiucv_handler
, conn
->userid
,
904 NULL
, iucvMagic
, conn
);
907 netdev
->tx_queue_len
= conn
->path
->msglim
;
908 fsm_addtimer(&conn
->timer
, NETIUCV_TIMEOUT_5SEC
,
909 CONN_EVENT_TIMER
, conn
);
912 dev_warn(privptr
->dev
,
913 "The IUCV device failed to connect to z/VM guest %s\n",
914 netiucv_printname(conn
->userid
));
915 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
918 dev_warn(privptr
->dev
,
919 "The IUCV device failed to connect to the peer on z/VM"
920 " guest %s\n", netiucv_printname(conn
->userid
));
921 fsm_newstate(fi
, CONN_STATE_STARTWAIT
);
924 dev_err(privptr
->dev
,
925 "Connecting the IUCV device would exceed the maximum"
926 " number of IUCV connections\n");
927 fsm_newstate(fi
, CONN_STATE_CONNERR
);
930 dev_err(privptr
->dev
,
931 "z/VM guest %s has too many IUCV connections"
932 " to connect with the IUCV device\n",
933 netiucv_printname(conn
->userid
));
934 fsm_newstate(fi
, CONN_STATE_CONNERR
);
937 dev_err(privptr
->dev
,
938 "The IUCV device cannot connect to a z/VM guest with no"
939 " IUCV authorization\n");
940 fsm_newstate(fi
, CONN_STATE_CONNERR
);
943 dev_err(privptr
->dev
,
944 "Connecting the IUCV device failed with error %d\n",
946 fsm_newstate(fi
, CONN_STATE_CONNERR
);
949 IUCV_DBF_TEXT_(setup
, 5, "iucv_connect rc is %d\n", rc
);
954 static void netiucv_purge_skb_queue(struct sk_buff_head
*q
)
958 while ((skb
= skb_dequeue(q
))) {
959 atomic_dec(&skb
->users
);
960 dev_kfree_skb_any(skb
);
964 static void conn_action_stop(fsm_instance
*fi
, int event
, void *arg
)
966 struct iucv_event
*ev
= arg
;
967 struct iucv_connection
*conn
= ev
->conn
;
968 struct net_device
*netdev
= conn
->netdev
;
969 struct netiucv_priv
*privptr
= netdev_priv(netdev
);
971 IUCV_DBF_TEXT(trace
, 3, __func__
);
973 fsm_deltimer(&conn
->timer
);
974 fsm_newstate(fi
, CONN_STATE_STOPPED
);
975 netiucv_purge_skb_queue(&conn
->collect_queue
);
977 IUCV_DBF_TEXT(trace
, 5, "calling iucv_path_sever\n");
978 iucv_path_sever(conn
->path
, iucvMagic
);
982 netiucv_purge_skb_queue(&conn
->commit_queue
);
983 fsm_event(privptr
->fsm
, DEV_EVENT_CONDOWN
, netdev
);
986 static void conn_action_inval(fsm_instance
*fi
, int event
, void *arg
)
988 struct iucv_connection
*conn
= arg
;
989 struct net_device
*netdev
= conn
->netdev
;
991 IUCV_DBF_TEXT_(data
, 2, "%s('%s'): conn_action_inval called\n",
992 netdev
->name
, conn
->userid
);
995 static const fsm_node conn_fsm
[] = {
996 { CONN_STATE_INVALID
, CONN_EVENT_START
, conn_action_inval
},
997 { CONN_STATE_STOPPED
, CONN_EVENT_START
, conn_action_start
},
999 { CONN_STATE_STOPPED
, CONN_EVENT_STOP
, conn_action_stop
},
1000 { CONN_STATE_STARTWAIT
, CONN_EVENT_STOP
, conn_action_stop
},
1001 { CONN_STATE_SETUPWAIT
, CONN_EVENT_STOP
, conn_action_stop
},
1002 { CONN_STATE_IDLE
, CONN_EVENT_STOP
, conn_action_stop
},
1003 { CONN_STATE_TX
, CONN_EVENT_STOP
, conn_action_stop
},
1004 { CONN_STATE_REGERR
, CONN_EVENT_STOP
, conn_action_stop
},
1005 { CONN_STATE_CONNERR
, CONN_EVENT_STOP
, conn_action_stop
},
1007 { CONN_STATE_STOPPED
, CONN_EVENT_CONN_REQ
, conn_action_connreject
},
1008 { CONN_STATE_STARTWAIT
, CONN_EVENT_CONN_REQ
, conn_action_connaccept
},
1009 { CONN_STATE_SETUPWAIT
, CONN_EVENT_CONN_REQ
, conn_action_connaccept
},
1010 { CONN_STATE_IDLE
, CONN_EVENT_CONN_REQ
, conn_action_connreject
},
1011 { CONN_STATE_TX
, CONN_EVENT_CONN_REQ
, conn_action_connreject
},
1013 { CONN_STATE_SETUPWAIT
, CONN_EVENT_CONN_ACK
, conn_action_connack
},
1014 { CONN_STATE_SETUPWAIT
, CONN_EVENT_TIMER
, conn_action_conntimsev
},
1016 { CONN_STATE_SETUPWAIT
, CONN_EVENT_CONN_REJ
, conn_action_connsever
},
1017 { CONN_STATE_IDLE
, CONN_EVENT_CONN_REJ
, conn_action_connsever
},
1018 { CONN_STATE_TX
, CONN_EVENT_CONN_REJ
, conn_action_connsever
},
1020 { CONN_STATE_IDLE
, CONN_EVENT_RX
, conn_action_rx
},
1021 { CONN_STATE_TX
, CONN_EVENT_RX
, conn_action_rx
},
1023 { CONN_STATE_TX
, CONN_EVENT_TXDONE
, conn_action_txdone
},
1024 { CONN_STATE_IDLE
, CONN_EVENT_TXDONE
, conn_action_txdone
},
1027 static const int CONN_FSM_LEN
= sizeof(conn_fsm
) / sizeof(fsm_node
);
1031 * Actions for interface - statemachine.
1036 * @fi: An instance of an interface statemachine.
1037 * @event: The event, just happened.
1038 * @arg: Generic pointer, casted from struct net_device * upon call.
1040 * Startup connection by sending CONN_EVENT_START to it.
1042 static void dev_action_start(fsm_instance
*fi
, int event
, void *arg
)
1044 struct net_device
*dev
= arg
;
1045 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1047 IUCV_DBF_TEXT(trace
, 3, __func__
);
1049 fsm_newstate(fi
, DEV_STATE_STARTWAIT
);
1050 fsm_event(privptr
->conn
->fsm
, CONN_EVENT_START
, privptr
->conn
);
1054 * Shutdown connection by sending CONN_EVENT_STOP to it.
1056 * @param fi An instance of an interface statemachine.
1057 * @param event The event, just happened.
1058 * @param arg Generic pointer, casted from struct net_device * upon call.
1061 dev_action_stop(fsm_instance
*fi
, int event
, void *arg
)
1063 struct net_device
*dev
= arg
;
1064 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1065 struct iucv_event ev
;
1067 IUCV_DBF_TEXT(trace
, 3, __func__
);
1069 ev
.conn
= privptr
->conn
;
1071 fsm_newstate(fi
, DEV_STATE_STOPWAIT
);
1072 fsm_event(privptr
->conn
->fsm
, CONN_EVENT_STOP
, &ev
);
1076 * Called from connection statemachine
1077 * when a connection is up and running.
1079 * @param fi An instance of an interface statemachine.
1080 * @param event The event, just happened.
1081 * @param arg Generic pointer, casted from struct net_device * upon call.
1084 dev_action_connup(fsm_instance
*fi
, int event
, void *arg
)
1086 struct net_device
*dev
= arg
;
1087 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1089 IUCV_DBF_TEXT(trace
, 3, __func__
);
1091 switch (fsm_getstate(fi
)) {
1092 case DEV_STATE_STARTWAIT
:
1093 fsm_newstate(fi
, DEV_STATE_RUNNING
);
1094 dev_info(privptr
->dev
,
1095 "The IUCV device has been connected"
1096 " successfully to %s\n", privptr
->conn
->userid
);
1097 IUCV_DBF_TEXT(setup
, 3,
1098 "connection is up and running\n");
1100 case DEV_STATE_STOPWAIT
:
1101 IUCV_DBF_TEXT(data
, 2,
1102 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1108 * Called from connection statemachine
1109 * when a connection has been shutdown.
1111 * @param fi An instance of an interface statemachine.
1112 * @param event The event, just happened.
1113 * @param arg Generic pointer, casted from struct net_device * upon call.
1116 dev_action_conndown(fsm_instance
*fi
, int event
, void *arg
)
1118 IUCV_DBF_TEXT(trace
, 3, __func__
);
1120 switch (fsm_getstate(fi
)) {
1121 case DEV_STATE_RUNNING
:
1122 fsm_newstate(fi
, DEV_STATE_STARTWAIT
);
1124 case DEV_STATE_STOPWAIT
:
1125 fsm_newstate(fi
, DEV_STATE_STOPPED
);
1126 IUCV_DBF_TEXT(setup
, 3, "connection is down\n");
1131 static const fsm_node dev_fsm
[] = {
1132 { DEV_STATE_STOPPED
, DEV_EVENT_START
, dev_action_start
},
1134 { DEV_STATE_STOPWAIT
, DEV_EVENT_START
, dev_action_start
},
1135 { DEV_STATE_STOPWAIT
, DEV_EVENT_CONDOWN
, dev_action_conndown
},
1137 { DEV_STATE_STARTWAIT
, DEV_EVENT_STOP
, dev_action_stop
},
1138 { DEV_STATE_STARTWAIT
, DEV_EVENT_CONUP
, dev_action_connup
},
1140 { DEV_STATE_RUNNING
, DEV_EVENT_STOP
, dev_action_stop
},
1141 { DEV_STATE_RUNNING
, DEV_EVENT_CONDOWN
, dev_action_conndown
},
1142 { DEV_STATE_RUNNING
, DEV_EVENT_CONUP
, netiucv_action_nop
},
1145 static const int DEV_FSM_LEN
= sizeof(dev_fsm
) / sizeof(fsm_node
);
1148 * Transmit a packet.
1149 * This is a helper function for netiucv_tx().
1151 * @param conn Connection to be used for sending.
1152 * @param skb Pointer to struct sk_buff of packet to send.
1153 * The linklevel header has already been set up
1156 * @return 0 on success, -ERRNO on failure. (Never fails.)
1158 static int netiucv_transmit_skb(struct iucv_connection
*conn
,
1159 struct sk_buff
*skb
)
1161 struct iucv_message msg
;
1162 unsigned long saveflags
;
1163 struct ll_header header
;
1166 if (fsm_getstate(conn
->fsm
) != CONN_STATE_IDLE
) {
1167 int l
= skb
->len
+ NETIUCV_HDRLEN
;
1169 spin_lock_irqsave(&conn
->collect_lock
, saveflags
);
1170 if (conn
->collect_len
+ l
>
1171 (conn
->max_buffsize
- NETIUCV_HDRLEN
)) {
1173 IUCV_DBF_TEXT(data
, 2,
1174 "EBUSY from netiucv_transmit_skb\n");
1176 atomic_inc(&skb
->users
);
1177 skb_queue_tail(&conn
->collect_queue
, skb
);
1178 conn
->collect_len
+= l
;
1181 spin_unlock_irqrestore(&conn
->collect_lock
, saveflags
);
1183 struct sk_buff
*nskb
= skb
;
1185 * Copy the skb to a new allocated skb in lowmem only if the
1186 * data is located above 2G in memory or tailroom is < 2.
1188 unsigned long hi
= ((unsigned long)(skb_tail_pointer(skb
) +
1189 NETIUCV_HDRLEN
)) >> 31;
1191 if (hi
|| (skb_tailroom(skb
) < 2)) {
1192 nskb
= alloc_skb(skb
->len
+ NETIUCV_HDRLEN
+
1193 NETIUCV_HDRLEN
, GFP_ATOMIC
| GFP_DMA
);
1195 IUCV_DBF_TEXT(data
, 2, "alloc_skb failed\n");
1199 skb_reserve(nskb
, NETIUCV_HDRLEN
);
1200 memcpy(skb_put(nskb
, skb
->len
),
1201 skb
->data
, skb
->len
);
1206 * skb now is below 2G and has enough room. Add headers.
1208 header
.next
= nskb
->len
+ NETIUCV_HDRLEN
;
1209 memcpy(skb_push(nskb
, NETIUCV_HDRLEN
), &header
, NETIUCV_HDRLEN
);
1211 memcpy(skb_put(nskb
, NETIUCV_HDRLEN
), &header
, NETIUCV_HDRLEN
);
1213 fsm_newstate(conn
->fsm
, CONN_STATE_TX
);
1214 conn
->prof
.send_stamp
= current_kernel_time();
1218 rc
= iucv_message_send(conn
->path
, &msg
, 0, 0,
1219 nskb
->data
, nskb
->len
);
1220 conn
->prof
.doios_single
++;
1221 conn
->prof
.txlen
+= skb
->len
;
1222 conn
->prof
.tx_pending
++;
1223 if (conn
->prof
.tx_pending
> conn
->prof
.tx_max_pending
)
1224 conn
->prof
.tx_max_pending
= conn
->prof
.tx_pending
;
1226 struct netiucv_priv
*privptr
;
1227 fsm_newstate(conn
->fsm
, CONN_STATE_IDLE
);
1228 conn
->prof
.tx_pending
--;
1229 privptr
= netdev_priv(conn
->netdev
);
1231 privptr
->stats
.tx_errors
++;
1233 dev_kfree_skb(nskb
);
1236 * Remove our headers. They get added
1237 * again on retransmit.
1239 skb_pull(skb
, NETIUCV_HDRLEN
);
1240 skb_trim(skb
, skb
->len
- NETIUCV_HDRLEN
);
1242 IUCV_DBF_TEXT_(data
, 2, "rc %d from iucv_send\n", rc
);
1246 atomic_inc(&nskb
->users
);
1247 skb_queue_tail(&conn
->commit_queue
, nskb
);
1255 * Interface API for upper network layers
1259 * Open an interface.
1260 * Called from generic network layer when ifconfig up is run.
1262 * @param dev Pointer to interface struct.
1264 * @return 0 on success, -ERRNO on failure. (Never fails.)
1266 static int netiucv_open(struct net_device
*dev
)
1268 struct netiucv_priv
*priv
= netdev_priv(dev
);
1270 fsm_event(priv
->fsm
, DEV_EVENT_START
, dev
);
1275 * Close an interface.
1276 * Called from generic network layer when ifconfig down is run.
1278 * @param dev Pointer to interface struct.
1280 * @return 0 on success, -ERRNO on failure. (Never fails.)
1282 static int netiucv_close(struct net_device
*dev
)
1284 struct netiucv_priv
*priv
= netdev_priv(dev
);
1286 fsm_event(priv
->fsm
, DEV_EVENT_STOP
, dev
);
1290 static int netiucv_pm_prepare(struct device
*dev
)
1292 IUCV_DBF_TEXT(trace
, 3, __func__
);
1296 static void netiucv_pm_complete(struct device
*dev
)
1298 IUCV_DBF_TEXT(trace
, 3, __func__
);
1303 * netiucv_pm_freeze() - Freeze PM callback
1304 * @dev: netiucv device
1306 * close open netiucv interfaces
1308 static int netiucv_pm_freeze(struct device
*dev
)
1310 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1311 struct net_device
*ndev
= NULL
;
1314 IUCV_DBF_TEXT(trace
, 3, __func__
);
1315 if (priv
&& priv
->conn
)
1316 ndev
= priv
->conn
->netdev
;
1319 netif_device_detach(ndev
);
1320 priv
->pm_state
= fsm_getstate(priv
->fsm
);
1321 rc
= netiucv_close(ndev
);
1327 * netiucv_pm_restore_thaw() - Thaw and restore PM callback
1328 * @dev: netiucv device
1330 * re-open netiucv interfaces closed during freeze
1332 static int netiucv_pm_restore_thaw(struct device
*dev
)
1334 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1335 struct net_device
*ndev
= NULL
;
1338 IUCV_DBF_TEXT(trace
, 3, __func__
);
1339 if (priv
&& priv
->conn
)
1340 ndev
= priv
->conn
->netdev
;
1343 switch (priv
->pm_state
) {
1344 case DEV_STATE_RUNNING
:
1345 case DEV_STATE_STARTWAIT
:
1346 rc
= netiucv_open(ndev
);
1351 netif_device_attach(ndev
);
1357 * Start transmission of a packet.
1358 * Called from generic network device layer.
1360 * @param skb Pointer to buffer containing the packet.
1361 * @param dev Pointer to interface struct.
1363 * @return 0 if packet consumed, !0 if packet rejected.
1364 * Note: If we return !0, then the packet is free'd by
1365 * the generic network layer.
1367 static int netiucv_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1369 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1372 IUCV_DBF_TEXT(trace
, 4, __func__
);
1374 * Some sanity checks ...
1377 IUCV_DBF_TEXT(data
, 2, "netiucv_tx: skb is NULL\n");
1378 privptr
->stats
.tx_dropped
++;
1381 if (skb_headroom(skb
) < NETIUCV_HDRLEN
) {
1382 IUCV_DBF_TEXT(data
, 2,
1383 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1385 privptr
->stats
.tx_dropped
++;
1390 * If connection is not running, try to restart it
1391 * and throw away packet.
1393 if (fsm_getstate(privptr
->fsm
) != DEV_STATE_RUNNING
) {
1395 privptr
->stats
.tx_dropped
++;
1396 privptr
->stats
.tx_errors
++;
1397 privptr
->stats
.tx_carrier_errors
++;
1401 if (netiucv_test_and_set_busy(dev
)) {
1402 IUCV_DBF_TEXT(data
, 2, "EBUSY from netiucv_tx\n");
1403 return NETDEV_TX_BUSY
;
1405 dev
->trans_start
= jiffies
;
1406 rc
= netiucv_transmit_skb(privptr
->conn
, skb
);
1407 netiucv_clear_busy(dev
);
1408 return rc
? NETDEV_TX_BUSY
: NETDEV_TX_OK
;
1413 * @dev: Pointer to interface struct.
1415 * Returns interface statistics of a device.
1417 * Returns pointer to stats struct of this interface.
1419 static struct net_device_stats
*netiucv_stats (struct net_device
* dev
)
1421 struct netiucv_priv
*priv
= netdev_priv(dev
);
1423 IUCV_DBF_TEXT(trace
, 5, __func__
);
1424 return &priv
->stats
;
1428 * netiucv_change_mtu
1429 * @dev: Pointer to interface struct.
1430 * @new_mtu: The new MTU to use for this interface.
1432 * Sets MTU of an interface.
1434 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1435 * (valid range is 576 .. NETIUCV_MTU_MAX).
1437 static int netiucv_change_mtu(struct net_device
* dev
, int new_mtu
)
1439 IUCV_DBF_TEXT(trace
, 3, __func__
);
1440 if (new_mtu
< 576 || new_mtu
> NETIUCV_MTU_MAX
) {
1441 IUCV_DBF_TEXT(setup
, 2, "given MTU out of valid range\n");
1449 * attributes in sysfs
1452 static ssize_t
user_show(struct device
*dev
, struct device_attribute
*attr
,
1455 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1457 IUCV_DBF_TEXT(trace
, 5, __func__
);
1458 return sprintf(buf
, "%s\n", netiucv_printname(priv
->conn
->userid
));
1461 static ssize_t
user_write(struct device
*dev
, struct device_attribute
*attr
,
1462 const char *buf
, size_t count
)
1464 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1465 struct net_device
*ndev
= priv
->conn
->netdev
;
1470 struct iucv_connection
*cp
;
1472 IUCV_DBF_TEXT(trace
, 3, __func__
);
1474 IUCV_DBF_TEXT_(setup
, 2,
1475 "%d is length of username\n", (int) count
);
1479 tmp
= strsep((char **) &buf
, "\n");
1480 for (i
= 0, p
= tmp
; i
< 8 && *p
; i
++, p
++) {
1481 if (isalnum(*p
) || (*p
== '$')) {
1482 username
[i
]= toupper(*p
);
1486 /* trailing lf, grr */
1489 IUCV_DBF_TEXT_(setup
, 2,
1490 "username: invalid character %c\n", *p
);
1494 username
[i
++] = ' ';
1497 if (memcmp(username
, priv
->conn
->userid
, 9) &&
1498 (ndev
->flags
& (IFF_UP
| IFF_RUNNING
))) {
1499 /* username changed while the interface is active. */
1500 IUCV_DBF_TEXT(setup
, 2, "user_write: device active\n");
1503 read_lock_bh(&iucv_connection_rwlock
);
1504 list_for_each_entry(cp
, &iucv_connection_list
, list
) {
1505 if (!strncmp(username
, cp
->userid
, 9) && cp
->netdev
!= ndev
) {
1506 read_unlock_bh(&iucv_connection_rwlock
);
1507 IUCV_DBF_TEXT_(setup
, 2, "user_write: Connection "
1508 "to %s already exists\n", username
);
1512 read_unlock_bh(&iucv_connection_rwlock
);
1513 memcpy(priv
->conn
->userid
, username
, 9);
1517 static DEVICE_ATTR(user
, 0644, user_show
, user_write
);
1519 static ssize_t
buffer_show (struct device
*dev
, struct device_attribute
*attr
,
1522 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1524 IUCV_DBF_TEXT(trace
, 5, __func__
);
1525 return sprintf(buf
, "%d\n", priv
->conn
->max_buffsize
);
1528 static ssize_t
buffer_write (struct device
*dev
, struct device_attribute
*attr
,
1529 const char *buf
, size_t count
)
1531 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1532 struct net_device
*ndev
= priv
->conn
->netdev
;
1536 IUCV_DBF_TEXT(trace
, 3, __func__
);
1540 bs1
= simple_strtoul(buf
, &e
, 0);
1542 if (e
&& (!isspace(*e
))) {
1543 IUCV_DBF_TEXT_(setup
, 2, "buffer_write: invalid char %c\n", *e
);
1546 if (bs1
> NETIUCV_BUFSIZE_MAX
) {
1547 IUCV_DBF_TEXT_(setup
, 2,
1548 "buffer_write: buffer size %d too large\n",
1552 if ((ndev
->flags
& IFF_RUNNING
) &&
1553 (bs1
< (ndev
->mtu
+ NETIUCV_HDRLEN
+ 2))) {
1554 IUCV_DBF_TEXT_(setup
, 2,
1555 "buffer_write: buffer size %d too small\n",
1559 if (bs1
< (576 + NETIUCV_HDRLEN
+ NETIUCV_HDRLEN
)) {
1560 IUCV_DBF_TEXT_(setup
, 2,
1561 "buffer_write: buffer size %d too small\n",
1566 priv
->conn
->max_buffsize
= bs1
;
1567 if (!(ndev
->flags
& IFF_RUNNING
))
1568 ndev
->mtu
= bs1
- NETIUCV_HDRLEN
- NETIUCV_HDRLEN
;
1574 static DEVICE_ATTR(buffer
, 0644, buffer_show
, buffer_write
);
1576 static ssize_t
dev_fsm_show (struct device
*dev
, struct device_attribute
*attr
,
1579 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1581 IUCV_DBF_TEXT(trace
, 5, __func__
);
1582 return sprintf(buf
, "%s\n", fsm_getstate_str(priv
->fsm
));
1585 static DEVICE_ATTR(device_fsm_state
, 0444, dev_fsm_show
, NULL
);
1587 static ssize_t
conn_fsm_show (struct device
*dev
,
1588 struct device_attribute
*attr
, char *buf
)
1590 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1592 IUCV_DBF_TEXT(trace
, 5, __func__
);
1593 return sprintf(buf
, "%s\n", fsm_getstate_str(priv
->conn
->fsm
));
1596 static DEVICE_ATTR(connection_fsm_state
, 0444, conn_fsm_show
, NULL
);
1598 static ssize_t
maxmulti_show (struct device
*dev
,
1599 struct device_attribute
*attr
, char *buf
)
1601 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1603 IUCV_DBF_TEXT(trace
, 5, __func__
);
1604 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.maxmulti
);
1607 static ssize_t
maxmulti_write (struct device
*dev
,
1608 struct device_attribute
*attr
,
1609 const char *buf
, size_t count
)
1611 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1613 IUCV_DBF_TEXT(trace
, 4, __func__
);
1614 priv
->conn
->prof
.maxmulti
= 0;
1618 static DEVICE_ATTR(max_tx_buffer_used
, 0644, maxmulti_show
, maxmulti_write
);
1620 static ssize_t
maxcq_show (struct device
*dev
, struct device_attribute
*attr
,
1623 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1625 IUCV_DBF_TEXT(trace
, 5, __func__
);
1626 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.maxcqueue
);
1629 static ssize_t
maxcq_write (struct device
*dev
, struct device_attribute
*attr
,
1630 const char *buf
, size_t count
)
1632 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1634 IUCV_DBF_TEXT(trace
, 4, __func__
);
1635 priv
->conn
->prof
.maxcqueue
= 0;
1639 static DEVICE_ATTR(max_chained_skbs
, 0644, maxcq_show
, maxcq_write
);
1641 static ssize_t
sdoio_show (struct device
*dev
, struct device_attribute
*attr
,
1644 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1646 IUCV_DBF_TEXT(trace
, 5, __func__
);
1647 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.doios_single
);
1650 static ssize_t
sdoio_write (struct device
*dev
, struct device_attribute
*attr
,
1651 const char *buf
, size_t count
)
1653 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1655 IUCV_DBF_TEXT(trace
, 4, __func__
);
1656 priv
->conn
->prof
.doios_single
= 0;
1660 static DEVICE_ATTR(tx_single_write_ops
, 0644, sdoio_show
, sdoio_write
);
1662 static ssize_t
mdoio_show (struct device
*dev
, struct device_attribute
*attr
,
1665 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1667 IUCV_DBF_TEXT(trace
, 5, __func__
);
1668 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.doios_multi
);
1671 static ssize_t
mdoio_write (struct device
*dev
, struct device_attribute
*attr
,
1672 const char *buf
, size_t count
)
1674 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1676 IUCV_DBF_TEXT(trace
, 5, __func__
);
1677 priv
->conn
->prof
.doios_multi
= 0;
1681 static DEVICE_ATTR(tx_multi_write_ops
, 0644, mdoio_show
, mdoio_write
);
1683 static ssize_t
txlen_show (struct device
*dev
, struct device_attribute
*attr
,
1686 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1688 IUCV_DBF_TEXT(trace
, 5, __func__
);
1689 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.txlen
);
1692 static ssize_t
txlen_write (struct device
*dev
, struct device_attribute
*attr
,
1693 const char *buf
, size_t count
)
1695 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1697 IUCV_DBF_TEXT(trace
, 4, __func__
);
1698 priv
->conn
->prof
.txlen
= 0;
1702 static DEVICE_ATTR(netto_bytes
, 0644, txlen_show
, txlen_write
);
1704 static ssize_t
txtime_show (struct device
*dev
, struct device_attribute
*attr
,
1707 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1709 IUCV_DBF_TEXT(trace
, 5, __func__
);
1710 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.tx_time
);
1713 static ssize_t
txtime_write (struct device
*dev
, struct device_attribute
*attr
,
1714 const char *buf
, size_t count
)
1716 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1718 IUCV_DBF_TEXT(trace
, 4, __func__
);
1719 priv
->conn
->prof
.tx_time
= 0;
1723 static DEVICE_ATTR(max_tx_io_time
, 0644, txtime_show
, txtime_write
);
1725 static ssize_t
txpend_show (struct device
*dev
, struct device_attribute
*attr
,
1728 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1730 IUCV_DBF_TEXT(trace
, 5, __func__
);
1731 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.tx_pending
);
1734 static ssize_t
txpend_write (struct device
*dev
, struct device_attribute
*attr
,
1735 const char *buf
, size_t count
)
1737 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1739 IUCV_DBF_TEXT(trace
, 4, __func__
);
1740 priv
->conn
->prof
.tx_pending
= 0;
1744 static DEVICE_ATTR(tx_pending
, 0644, txpend_show
, txpend_write
);
1746 static ssize_t
txmpnd_show (struct device
*dev
, struct device_attribute
*attr
,
1749 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1751 IUCV_DBF_TEXT(trace
, 5, __func__
);
1752 return sprintf(buf
, "%ld\n", priv
->conn
->prof
.tx_max_pending
);
1755 static ssize_t
txmpnd_write (struct device
*dev
, struct device_attribute
*attr
,
1756 const char *buf
, size_t count
)
1758 struct netiucv_priv
*priv
= dev_get_drvdata(dev
);
1760 IUCV_DBF_TEXT(trace
, 4, __func__
);
1761 priv
->conn
->prof
.tx_max_pending
= 0;
1765 static DEVICE_ATTR(tx_max_pending
, 0644, txmpnd_show
, txmpnd_write
);
1767 static struct attribute
*netiucv_attrs
[] = {
1768 &dev_attr_buffer
.attr
,
1769 &dev_attr_user
.attr
,
1773 static struct attribute_group netiucv_attr_group
= {
1774 .attrs
= netiucv_attrs
,
1777 static struct attribute
*netiucv_stat_attrs
[] = {
1778 &dev_attr_device_fsm_state
.attr
,
1779 &dev_attr_connection_fsm_state
.attr
,
1780 &dev_attr_max_tx_buffer_used
.attr
,
1781 &dev_attr_max_chained_skbs
.attr
,
1782 &dev_attr_tx_single_write_ops
.attr
,
1783 &dev_attr_tx_multi_write_ops
.attr
,
1784 &dev_attr_netto_bytes
.attr
,
1785 &dev_attr_max_tx_io_time
.attr
,
1786 &dev_attr_tx_pending
.attr
,
1787 &dev_attr_tx_max_pending
.attr
,
1791 static struct attribute_group netiucv_stat_attr_group
= {
1793 .attrs
= netiucv_stat_attrs
,
1796 static int netiucv_add_files(struct device
*dev
)
1800 IUCV_DBF_TEXT(trace
, 3, __func__
);
1801 ret
= sysfs_create_group(&dev
->kobj
, &netiucv_attr_group
);
1804 ret
= sysfs_create_group(&dev
->kobj
, &netiucv_stat_attr_group
);
1806 sysfs_remove_group(&dev
->kobj
, &netiucv_attr_group
);
1810 static void netiucv_remove_files(struct device
*dev
)
1812 IUCV_DBF_TEXT(trace
, 3, __func__
);
1813 sysfs_remove_group(&dev
->kobj
, &netiucv_stat_attr_group
);
1814 sysfs_remove_group(&dev
->kobj
, &netiucv_attr_group
);
1817 static int netiucv_register_device(struct net_device
*ndev
)
1819 struct netiucv_priv
*priv
= netdev_priv(ndev
);
1820 struct device
*dev
= kzalloc(sizeof(struct device
), GFP_KERNEL
);
1823 IUCV_DBF_TEXT(trace
, 3, __func__
);
1826 dev_set_name(dev
, "net%s", ndev
->name
);
1827 dev
->bus
= &iucv_bus
;
1828 dev
->parent
= iucv_root
;
1830 * The release function could be called after the
1831 * module has been unloaded. It's _only_ task is to
1832 * free the struct. Therefore, we specify kfree()
1833 * directly here. (Probably a little bit obfuscating
1834 * but legitime ...).
1836 dev
->release
= (void (*)(struct device
*))kfree
;
1837 dev
->driver
= &netiucv_driver
;
1841 ret
= device_register(dev
);
1845 ret
= netiucv_add_files(dev
);
1849 dev_set_drvdata(dev
, priv
);
1853 device_unregister(dev
);
1857 static void netiucv_unregister_device(struct device
*dev
)
1859 IUCV_DBF_TEXT(trace
, 3, __func__
);
1860 netiucv_remove_files(dev
);
1861 device_unregister(dev
);
1865 * Allocate and initialize a new connection structure.
1866 * Add it to the list of netiucv connections;
1868 static struct iucv_connection
*netiucv_new_connection(struct net_device
*dev
,
1871 struct iucv_connection
*conn
;
1873 conn
= kzalloc(sizeof(*conn
), GFP_KERNEL
);
1876 skb_queue_head_init(&conn
->collect_queue
);
1877 skb_queue_head_init(&conn
->commit_queue
);
1878 spin_lock_init(&conn
->collect_lock
);
1879 conn
->max_buffsize
= NETIUCV_BUFSIZE_DEFAULT
;
1882 conn
->rx_buff
= alloc_skb(conn
->max_buffsize
, GFP_KERNEL
| GFP_DMA
);
1885 conn
->tx_buff
= alloc_skb(conn
->max_buffsize
, GFP_KERNEL
| GFP_DMA
);
1888 conn
->fsm
= init_fsm("netiucvconn", conn_state_names
,
1889 conn_event_names
, NR_CONN_STATES
,
1890 NR_CONN_EVENTS
, conn_fsm
, CONN_FSM_LEN
,
1895 fsm_settimer(conn
->fsm
, &conn
->timer
);
1896 fsm_newstate(conn
->fsm
, CONN_STATE_INVALID
);
1899 memcpy(conn
->userid
, username
, 9);
1900 fsm_newstate(conn
->fsm
, CONN_STATE_STOPPED
);
1903 write_lock_bh(&iucv_connection_rwlock
);
1904 list_add_tail(&conn
->list
, &iucv_connection_list
);
1905 write_unlock_bh(&iucv_connection_rwlock
);
1909 kfree_skb(conn
->tx_buff
);
1911 kfree_skb(conn
->rx_buff
);
1919 * Release a connection structure and remove it from the
1920 * list of netiucv connections.
1922 static void netiucv_remove_connection(struct iucv_connection
*conn
)
1924 IUCV_DBF_TEXT(trace
, 3, __func__
);
1925 write_lock_bh(&iucv_connection_rwlock
);
1926 list_del_init(&conn
->list
);
1927 write_unlock_bh(&iucv_connection_rwlock
);
1928 fsm_deltimer(&conn
->timer
);
1929 netiucv_purge_skb_queue(&conn
->collect_queue
);
1931 iucv_path_sever(conn
->path
, iucvMagic
);
1935 netiucv_purge_skb_queue(&conn
->commit_queue
);
1936 kfree_fsm(conn
->fsm
);
1937 kfree_skb(conn
->rx_buff
);
1938 kfree_skb(conn
->tx_buff
);
1942 * Release everything of a net device.
1944 static void netiucv_free_netdevice(struct net_device
*dev
)
1946 struct netiucv_priv
*privptr
= netdev_priv(dev
);
1948 IUCV_DBF_TEXT(trace
, 3, __func__
);
1955 netiucv_remove_connection(privptr
->conn
);
1957 kfree_fsm(privptr
->fsm
);
1958 privptr
->conn
= NULL
; privptr
->fsm
= NULL
;
1959 /* privptr gets freed by free_netdev() */
1965 * Initialize a net device. (Called from kernel in alloc_netdev())
1967 static const struct net_device_ops netiucv_netdev_ops
= {
1968 .ndo_open
= netiucv_open
,
1969 .ndo_stop
= netiucv_close
,
1970 .ndo_get_stats
= netiucv_stats
,
1971 .ndo_start_xmit
= netiucv_tx
,
1972 .ndo_change_mtu
= netiucv_change_mtu
,
1975 static void netiucv_setup_netdevice(struct net_device
*dev
)
1977 dev
->mtu
= NETIUCV_MTU_DEFAULT
;
1978 dev
->destructor
= netiucv_free_netdevice
;
1979 dev
->hard_header_len
= NETIUCV_HDRLEN
;
1981 dev
->type
= ARPHRD_SLIP
;
1982 dev
->tx_queue_len
= NETIUCV_QUEUELEN_DEFAULT
;
1983 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
;
1984 dev
->netdev_ops
= &netiucv_netdev_ops
;
1988 * Allocate and initialize everything of a net device.
1990 static struct net_device
*netiucv_init_netdevice(char *username
)
1992 struct netiucv_priv
*privptr
;
1993 struct net_device
*dev
;
1995 dev
= alloc_netdev(sizeof(struct netiucv_priv
), "iucv%d",
1996 netiucv_setup_netdevice
);
1999 if (dev_alloc_name(dev
, dev
->name
) < 0)
2002 privptr
= netdev_priv(dev
);
2003 privptr
->fsm
= init_fsm("netiucvdev", dev_state_names
,
2004 dev_event_names
, NR_DEV_STATES
, NR_DEV_EVENTS
,
2005 dev_fsm
, DEV_FSM_LEN
, GFP_KERNEL
);
2009 privptr
->conn
= netiucv_new_connection(dev
, username
);
2010 if (!privptr
->conn
) {
2011 IUCV_DBF_TEXT(setup
, 2, "NULL from netiucv_new_connection\n");
2014 fsm_newstate(privptr
->fsm
, DEV_STATE_STOPPED
);
2018 kfree_fsm(privptr
->fsm
);
2024 static ssize_t
conn_write(struct device_driver
*drv
,
2025 const char *buf
, size_t count
)
2030 struct net_device
*dev
;
2031 struct netiucv_priv
*priv
;
2032 struct iucv_connection
*cp
;
2034 IUCV_DBF_TEXT(trace
, 3, __func__
);
2036 IUCV_DBF_TEXT(setup
, 2, "conn_write: too long\n");
2040 for (i
= 0, p
= buf
; i
< 8 && *p
; i
++, p
++) {
2041 if (isalnum(*p
) || *p
== '$') {
2042 username
[i
] = toupper(*p
);
2046 /* trailing lf, grr */
2048 IUCV_DBF_TEXT_(setup
, 2,
2049 "conn_write: invalid character %c\n", *p
);
2053 username
[i
++] = ' ';
2056 read_lock_bh(&iucv_connection_rwlock
);
2057 list_for_each_entry(cp
, &iucv_connection_list
, list
) {
2058 if (!strncmp(username
, cp
->userid
, 9)) {
2059 read_unlock_bh(&iucv_connection_rwlock
);
2060 IUCV_DBF_TEXT_(setup
, 2, "conn_write: Connection "
2061 "to %s already exists\n", username
);
2065 read_unlock_bh(&iucv_connection_rwlock
);
2067 dev
= netiucv_init_netdevice(username
);
2069 IUCV_DBF_TEXT(setup
, 2, "NULL from netiucv_init_netdevice\n");
2073 rc
= netiucv_register_device(dev
);
2075 IUCV_DBF_TEXT_(setup
, 2,
2076 "ret %d from netiucv_register_device\n", rc
);
2081 priv
= netdev_priv(dev
);
2082 SET_NETDEV_DEV(dev
, priv
->dev
);
2084 rc
= register_netdev(dev
);
2088 dev_info(priv
->dev
, "The IUCV interface to %s has been"
2089 " established successfully\n", netiucv_printname(username
));
2094 netiucv_unregister_device(priv
->dev
);
2096 netiucv_free_netdevice(dev
);
2100 static DRIVER_ATTR(connection
, 0200, NULL
, conn_write
);
2102 static ssize_t
remove_write (struct device_driver
*drv
,
2103 const char *buf
, size_t count
)
2105 struct iucv_connection
*cp
;
2106 struct net_device
*ndev
;
2107 struct netiucv_priv
*priv
;
2109 char name
[IFNAMSIZ
];
2113 IUCV_DBF_TEXT(trace
, 3, __func__
);
2115 if (count
>= IFNAMSIZ
)
2116 count
= IFNAMSIZ
- 1;;
2118 for (i
= 0, p
= buf
; i
< count
&& *p
; i
++, p
++) {
2119 if (*p
== '\n' || *p
== ' ')
2120 /* trailing lf, grr */
2126 read_lock_bh(&iucv_connection_rwlock
);
2127 list_for_each_entry(cp
, &iucv_connection_list
, list
) {
2129 priv
= netdev_priv(ndev
);
2131 if (strncmp(name
, ndev
->name
, count
))
2133 read_unlock_bh(&iucv_connection_rwlock
);
2134 if (ndev
->flags
& (IFF_UP
| IFF_RUNNING
)) {
2135 dev_warn(dev
, "The IUCV device is connected"
2136 " to %s and cannot be removed\n",
2137 priv
->conn
->userid
);
2138 IUCV_DBF_TEXT(data
, 2, "remove_write: still active\n");
2141 unregister_netdev(ndev
);
2142 netiucv_unregister_device(dev
);
2145 read_unlock_bh(&iucv_connection_rwlock
);
2146 IUCV_DBF_TEXT(data
, 2, "remove_write: unknown device\n");
2150 static DRIVER_ATTR(remove
, 0200, NULL
, remove_write
);
2152 static struct attribute
* netiucv_drv_attrs
[] = {
2153 &driver_attr_connection
.attr
,
2154 &driver_attr_remove
.attr
,
2158 static struct attribute_group netiucv_drv_attr_group
= {
2159 .attrs
= netiucv_drv_attrs
,
2162 static struct attribute_group
*netiucv_drv_attr_groups
[] = {
2163 &netiucv_drv_attr_group
,
2167 static void netiucv_banner(void)
2169 pr_info("driver initialized\n");
2172 static void __exit
netiucv_exit(void)
2174 struct iucv_connection
*cp
;
2175 struct net_device
*ndev
;
2176 struct netiucv_priv
*priv
;
2179 IUCV_DBF_TEXT(trace
, 3, __func__
);
2180 while (!list_empty(&iucv_connection_list
)) {
2181 cp
= list_entry(iucv_connection_list
.next
,
2182 struct iucv_connection
, list
);
2184 priv
= netdev_priv(ndev
);
2187 unregister_netdev(ndev
);
2188 netiucv_unregister_device(dev
);
2191 device_unregister(netiucv_dev
);
2192 driver_unregister(&netiucv_driver
);
2193 iucv_unregister(&netiucv_handler
, 1);
2194 iucv_unregister_dbf_views();
2196 pr_info("driver unloaded\n");
2200 static int __init
netiucv_init(void)
2204 rc
= iucv_register_dbf_views();
2207 rc
= iucv_register(&netiucv_handler
, 1);
2210 IUCV_DBF_TEXT(trace
, 3, __func__
);
2211 netiucv_driver
.groups
= netiucv_drv_attr_groups
;
2212 rc
= driver_register(&netiucv_driver
);
2214 IUCV_DBF_TEXT_(setup
, 2, "ret %d from driver_register\n", rc
);
2217 /* establish dummy device */
2218 netiucv_dev
= kzalloc(sizeof(struct device
), GFP_KERNEL
);
2223 dev_set_name(netiucv_dev
, "netiucv");
2224 netiucv_dev
->bus
= &iucv_bus
;
2225 netiucv_dev
->parent
= iucv_root
;
2226 netiucv_dev
->release
= (void (*)(struct device
*))kfree
;
2227 netiucv_dev
->driver
= &netiucv_driver
;
2228 rc
= device_register(netiucv_dev
);
2235 driver_unregister(&netiucv_driver
);
2237 iucv_unregister(&netiucv_handler
, 1);
2239 iucv_unregister_dbf_views();
2244 module_init(netiucv_init
);
2245 module_exit(netiucv_exit
);
2246 MODULE_LICENSE("GPL");