s390: remove driver_data direct access of struct device
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / s390 / net / netiucv.c
blobd52a99f9b70220bef1c1369d6f13acfd4963a14c
1 /*
2 * IUCV network driver
4 * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
7 * Sysfs integration and all bugs therein by Cornelia Huck
8 * (cornelia.huck@de.ibm.com)
10 * Documentation used:
11 * the source of the original IUCV driver by:
12 * Stefan Hegewald <hegewald@de.ibm.com>
13 * Hartmut Penner <hpenner@de.ibm.com>
14 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
15 * Martin Schwidefsky (schwidefsky@de.ibm.com)
16 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34 #define KMSG_COMPONENT "netiucv"
35 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
37 #undef DEBUG
39 #include <linux/module.h>
40 #include <linux/init.h>
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/errno.h>
44 #include <linux/types.h>
45 #include <linux/interrupt.h>
46 #include <linux/timer.h>
47 #include <linux/bitops.h>
49 #include <linux/signal.h>
50 #include <linux/string.h>
51 #include <linux/device.h>
53 #include <linux/ip.h>
54 #include <linux/if_arp.h>
55 #include <linux/tcp.h>
56 #include <linux/skbuff.h>
57 #include <linux/ctype.h>
58 #include <net/dst.h>
60 #include <asm/io.h>
61 #include <asm/uaccess.h>
63 #include <net/iucv/iucv.h>
64 #include "fsm.h"
66 MODULE_AUTHOR
67 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
68 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
70 /**
71 * Debug Facility stuff
73 #define IUCV_DBF_SETUP_NAME "iucv_setup"
74 #define IUCV_DBF_SETUP_LEN 32
75 #define IUCV_DBF_SETUP_PAGES 2
76 #define IUCV_DBF_SETUP_NR_AREAS 1
77 #define IUCV_DBF_SETUP_LEVEL 3
79 #define IUCV_DBF_DATA_NAME "iucv_data"
80 #define IUCV_DBF_DATA_LEN 128
81 #define IUCV_DBF_DATA_PAGES 2
82 #define IUCV_DBF_DATA_NR_AREAS 1
83 #define IUCV_DBF_DATA_LEVEL 2
85 #define IUCV_DBF_TRACE_NAME "iucv_trace"
86 #define IUCV_DBF_TRACE_LEN 16
87 #define IUCV_DBF_TRACE_PAGES 4
88 #define IUCV_DBF_TRACE_NR_AREAS 1
89 #define IUCV_DBF_TRACE_LEVEL 3
91 #define IUCV_DBF_TEXT(name,level,text) \
92 do { \
93 debug_text_event(iucv_dbf_##name,level,text); \
94 } while (0)
96 #define IUCV_DBF_HEX(name,level,addr,len) \
97 do { \
98 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
99 } while (0)
101 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
103 /* Allow to sort out low debug levels early to avoid wasted sprints */
104 static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
106 return (level <= dbf_grp->level);
109 #define IUCV_DBF_TEXT_(name, level, text...) \
110 do { \
111 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
112 char* iucv_dbf_txt_buf = \
113 get_cpu_var(iucv_dbf_txt_buf); \
114 sprintf(iucv_dbf_txt_buf, text); \
115 debug_text_event(iucv_dbf_##name, level, \
116 iucv_dbf_txt_buf); \
117 put_cpu_var(iucv_dbf_txt_buf); \
119 } while (0)
121 #define IUCV_DBF_SPRINTF(name,level,text...) \
122 do { \
123 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
124 debug_sprintf_event(iucv_dbf_trace, level, text ); \
125 } while (0)
128 * some more debug stuff
130 #define IUCV_HEXDUMP16(importance,header,ptr) \
131 PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
132 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
133 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
134 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
135 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
136 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
137 *(((char*)ptr)+12),*(((char*)ptr)+13), \
138 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
139 PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
140 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
141 *(((char*)ptr)+16),*(((char*)ptr)+17), \
142 *(((char*)ptr)+18),*(((char*)ptr)+19), \
143 *(((char*)ptr)+20),*(((char*)ptr)+21), \
144 *(((char*)ptr)+22),*(((char*)ptr)+23), \
145 *(((char*)ptr)+24),*(((char*)ptr)+25), \
146 *(((char*)ptr)+26),*(((char*)ptr)+27), \
147 *(((char*)ptr)+28),*(((char*)ptr)+29), \
148 *(((char*)ptr)+30),*(((char*)ptr)+31));
150 #define PRINTK_HEADER " iucv: " /* for debugging */
152 static struct device_driver netiucv_driver = {
153 .owner = THIS_MODULE,
154 .name = "netiucv",
155 .bus = &iucv_bus,
158 static int netiucv_callback_connreq(struct iucv_path *,
159 u8 ipvmid[8], u8 ipuser[16]);
160 static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
161 static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
162 static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
163 static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
164 static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
165 static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
167 static struct iucv_handler netiucv_handler = {
168 .path_pending = netiucv_callback_connreq,
169 .path_complete = netiucv_callback_connack,
170 .path_severed = netiucv_callback_connrej,
171 .path_quiesced = netiucv_callback_connsusp,
172 .path_resumed = netiucv_callback_connres,
173 .message_pending = netiucv_callback_rx,
174 .message_complete = netiucv_callback_txdone
178 * Per connection profiling data
180 struct connection_profile {
181 unsigned long maxmulti;
182 unsigned long maxcqueue;
183 unsigned long doios_single;
184 unsigned long doios_multi;
185 unsigned long txlen;
186 unsigned long tx_time;
187 struct timespec send_stamp;
188 unsigned long tx_pending;
189 unsigned long tx_max_pending;
193 * Representation of one iucv connection
195 struct iucv_connection {
196 struct list_head list;
197 struct iucv_path *path;
198 struct sk_buff *rx_buff;
199 struct sk_buff *tx_buff;
200 struct sk_buff_head collect_queue;
201 struct sk_buff_head commit_queue;
202 spinlock_t collect_lock;
203 int collect_len;
204 int max_buffsize;
205 fsm_timer timer;
206 fsm_instance *fsm;
207 struct net_device *netdev;
208 struct connection_profile prof;
209 char userid[9];
213 * Linked list of all connection structs.
215 static LIST_HEAD(iucv_connection_list);
216 static DEFINE_RWLOCK(iucv_connection_rwlock);
219 * Representation of event-data for the
220 * connection state machine.
222 struct iucv_event {
223 struct iucv_connection *conn;
224 void *data;
228 * Private part of the network device structure
230 struct netiucv_priv {
231 struct net_device_stats stats;
232 unsigned long tbusy;
233 fsm_instance *fsm;
234 struct iucv_connection *conn;
235 struct device *dev;
239 * Link level header for a packet.
241 struct ll_header {
242 u16 next;
245 #define NETIUCV_HDRLEN (sizeof(struct ll_header))
246 #define NETIUCV_BUFSIZE_MAX 32768
247 #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
248 #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
249 #define NETIUCV_MTU_DEFAULT 9216
250 #define NETIUCV_QUEUELEN_DEFAULT 50
251 #define NETIUCV_TIMEOUT_5SEC 5000
254 * Compatibility macros for busy handling
255 * of network devices.
257 static inline void netiucv_clear_busy(struct net_device *dev)
259 struct netiucv_priv *priv = netdev_priv(dev);
260 clear_bit(0, &priv->tbusy);
261 netif_wake_queue(dev);
264 static inline int netiucv_test_and_set_busy(struct net_device *dev)
266 struct netiucv_priv *priv = netdev_priv(dev);
267 netif_stop_queue(dev);
268 return test_and_set_bit(0, &priv->tbusy);
271 static u8 iucvMagic[16] = {
272 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
273 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
277 * Convert an iucv userId to its printable
278 * form (strip whitespace at end).
280 * @param An iucv userId
282 * @returns The printable string (static data!!)
284 static char *netiucv_printname(char *name)
286 static char tmp[9];
287 char *p = tmp;
288 memcpy(tmp, name, 8);
289 tmp[8] = '\0';
290 while (*p && (!isspace(*p)))
291 p++;
292 *p = '\0';
293 return tmp;
297 * States of the interface statemachine.
299 enum dev_states {
300 DEV_STATE_STOPPED,
301 DEV_STATE_STARTWAIT,
302 DEV_STATE_STOPWAIT,
303 DEV_STATE_RUNNING,
305 * MUST be always the last element!!
307 NR_DEV_STATES
310 static const char *dev_state_names[] = {
311 "Stopped",
312 "StartWait",
313 "StopWait",
314 "Running",
318 * Events of the interface statemachine.
320 enum dev_events {
321 DEV_EVENT_START,
322 DEV_EVENT_STOP,
323 DEV_EVENT_CONUP,
324 DEV_EVENT_CONDOWN,
326 * MUST be always the last element!!
328 NR_DEV_EVENTS
331 static const char *dev_event_names[] = {
332 "Start",
333 "Stop",
334 "Connection up",
335 "Connection down",
339 * Events of the connection statemachine
341 enum conn_events {
343 * Events, representing callbacks from
344 * lowlevel iucv layer)
346 CONN_EVENT_CONN_REQ,
347 CONN_EVENT_CONN_ACK,
348 CONN_EVENT_CONN_REJ,
349 CONN_EVENT_CONN_SUS,
350 CONN_EVENT_CONN_RES,
351 CONN_EVENT_RX,
352 CONN_EVENT_TXDONE,
355 * Events, representing errors return codes from
356 * calls to lowlevel iucv layer
360 * Event, representing timer expiry.
362 CONN_EVENT_TIMER,
365 * Events, representing commands from upper levels.
367 CONN_EVENT_START,
368 CONN_EVENT_STOP,
371 * MUST be always the last element!!
373 NR_CONN_EVENTS,
376 static const char *conn_event_names[] = {
377 "Remote connection request",
378 "Remote connection acknowledge",
379 "Remote connection reject",
380 "Connection suspended",
381 "Connection resumed",
382 "Data received",
383 "Data sent",
385 "Timer",
387 "Start",
388 "Stop",
392 * States of the connection statemachine.
394 enum conn_states {
396 * Connection not assigned to any device,
397 * initial state, invalid
399 CONN_STATE_INVALID,
402 * Userid assigned but not operating
404 CONN_STATE_STOPPED,
407 * Connection registered,
408 * no connection request sent yet,
409 * no connection request received
411 CONN_STATE_STARTWAIT,
414 * Connection registered and connection request sent,
415 * no acknowledge and no connection request received yet.
417 CONN_STATE_SETUPWAIT,
420 * Connection up and running idle
422 CONN_STATE_IDLE,
425 * Data sent, awaiting CONN_EVENT_TXDONE
427 CONN_STATE_TX,
430 * Error during registration.
432 CONN_STATE_REGERR,
435 * Error during registration.
437 CONN_STATE_CONNERR,
440 * MUST be always the last element!!
442 NR_CONN_STATES,
445 static const char *conn_state_names[] = {
446 "Invalid",
447 "Stopped",
448 "StartWait",
449 "SetupWait",
450 "Idle",
451 "TX",
452 "Terminating",
453 "Registration error",
454 "Connect error",
459 * Debug Facility Stuff
461 static debug_info_t *iucv_dbf_setup = NULL;
462 static debug_info_t *iucv_dbf_data = NULL;
463 static debug_info_t *iucv_dbf_trace = NULL;
465 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
467 static void iucv_unregister_dbf_views(void)
469 if (iucv_dbf_setup)
470 debug_unregister(iucv_dbf_setup);
471 if (iucv_dbf_data)
472 debug_unregister(iucv_dbf_data);
473 if (iucv_dbf_trace)
474 debug_unregister(iucv_dbf_trace);
476 static int iucv_register_dbf_views(void)
478 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
479 IUCV_DBF_SETUP_PAGES,
480 IUCV_DBF_SETUP_NR_AREAS,
481 IUCV_DBF_SETUP_LEN);
482 iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
483 IUCV_DBF_DATA_PAGES,
484 IUCV_DBF_DATA_NR_AREAS,
485 IUCV_DBF_DATA_LEN);
486 iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
487 IUCV_DBF_TRACE_PAGES,
488 IUCV_DBF_TRACE_NR_AREAS,
489 IUCV_DBF_TRACE_LEN);
491 if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
492 (iucv_dbf_trace == NULL)) {
493 iucv_unregister_dbf_views();
494 return -ENOMEM;
496 debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
497 debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
499 debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
500 debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
502 debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
503 debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
505 return 0;
509 * Callback-wrappers, called from lowlevel iucv layer.
512 static void netiucv_callback_rx(struct iucv_path *path,
513 struct iucv_message *msg)
515 struct iucv_connection *conn = path->private;
516 struct iucv_event ev;
518 ev.conn = conn;
519 ev.data = msg;
520 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
523 static void netiucv_callback_txdone(struct iucv_path *path,
524 struct iucv_message *msg)
526 struct iucv_connection *conn = path->private;
527 struct iucv_event ev;
529 ev.conn = conn;
530 ev.data = msg;
531 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
534 static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
536 struct iucv_connection *conn = path->private;
538 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
541 static int netiucv_callback_connreq(struct iucv_path *path,
542 u8 ipvmid[8], u8 ipuser[16])
544 struct iucv_connection *conn = path->private;
545 struct iucv_event ev;
546 int rc;
548 if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
549 /* ipuser must match iucvMagic. */
550 return -EINVAL;
551 rc = -EINVAL;
552 read_lock_bh(&iucv_connection_rwlock);
553 list_for_each_entry(conn, &iucv_connection_list, list) {
554 if (strncmp(ipvmid, conn->userid, 8))
555 continue;
556 /* Found a matching connection for this path. */
557 conn->path = path;
558 ev.conn = conn;
559 ev.data = path;
560 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
561 rc = 0;
563 read_unlock_bh(&iucv_connection_rwlock);
564 return rc;
567 static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
569 struct iucv_connection *conn = path->private;
571 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
574 static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
576 struct iucv_connection *conn = path->private;
578 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
581 static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
583 struct iucv_connection *conn = path->private;
585 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
589 * NOP action for statemachines
591 static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
596 * Actions of the connection statemachine
600 * netiucv_unpack_skb
601 * @conn: The connection where this skb has been received.
602 * @pskb: The received skb.
604 * Unpack a just received skb and hand it over to upper layers.
605 * Helper function for conn_action_rx.
607 static void netiucv_unpack_skb(struct iucv_connection *conn,
608 struct sk_buff *pskb)
610 struct net_device *dev = conn->netdev;
611 struct netiucv_priv *privptr = netdev_priv(dev);
612 u16 offset = 0;
614 skb_put(pskb, NETIUCV_HDRLEN);
615 pskb->dev = dev;
616 pskb->ip_summed = CHECKSUM_NONE;
617 pskb->protocol = ntohs(ETH_P_IP);
619 while (1) {
620 struct sk_buff *skb;
621 struct ll_header *header = (struct ll_header *) pskb->data;
623 if (!header->next)
624 break;
626 skb_pull(pskb, NETIUCV_HDRLEN);
627 header->next -= offset;
628 offset += header->next;
629 header->next -= NETIUCV_HDRLEN;
630 if (skb_tailroom(pskb) < header->next) {
631 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
632 header->next, skb_tailroom(pskb));
633 return;
635 skb_put(pskb, header->next);
636 skb_reset_mac_header(pskb);
637 skb = dev_alloc_skb(pskb->len);
638 if (!skb) {
639 IUCV_DBF_TEXT(data, 2,
640 "Out of memory in netiucv_unpack_skb\n");
641 privptr->stats.rx_dropped++;
642 return;
644 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
645 pskb->len);
646 skb_reset_mac_header(skb);
647 skb->dev = pskb->dev;
648 skb->protocol = pskb->protocol;
649 pskb->ip_summed = CHECKSUM_UNNECESSARY;
650 privptr->stats.rx_packets++;
651 privptr->stats.rx_bytes += skb->len;
653 * Since receiving is always initiated from a tasklet (in iucv.c),
654 * we must use netif_rx_ni() instead of netif_rx()
656 netif_rx_ni(skb);
657 dev->last_rx = jiffies;
658 skb_pull(pskb, header->next);
659 skb_put(pskb, NETIUCV_HDRLEN);
663 static void conn_action_rx(fsm_instance *fi, int event, void *arg)
665 struct iucv_event *ev = arg;
666 struct iucv_connection *conn = ev->conn;
667 struct iucv_message *msg = ev->data;
668 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
669 int rc;
671 IUCV_DBF_TEXT(trace, 4, __func__);
673 if (!conn->netdev) {
674 iucv_message_reject(conn->path, msg);
675 IUCV_DBF_TEXT(data, 2,
676 "Received data for unlinked connection\n");
677 return;
679 if (msg->length > conn->max_buffsize) {
680 iucv_message_reject(conn->path, msg);
681 privptr->stats.rx_dropped++;
682 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
683 msg->length, conn->max_buffsize);
684 return;
686 conn->rx_buff->data = conn->rx_buff->head;
687 skb_reset_tail_pointer(conn->rx_buff);
688 conn->rx_buff->len = 0;
689 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
690 msg->length, NULL);
691 if (rc || msg->length < 5) {
692 privptr->stats.rx_errors++;
693 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
694 return;
696 netiucv_unpack_skb(conn, conn->rx_buff);
699 static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
701 struct iucv_event *ev = arg;
702 struct iucv_connection *conn = ev->conn;
703 struct iucv_message *msg = ev->data;
704 struct iucv_message txmsg;
705 struct netiucv_priv *privptr = NULL;
706 u32 single_flag = msg->tag;
707 u32 txbytes = 0;
708 u32 txpackets = 0;
709 u32 stat_maxcq = 0;
710 struct sk_buff *skb;
711 unsigned long saveflags;
712 struct ll_header header;
713 int rc;
715 IUCV_DBF_TEXT(trace, 4, __func__);
717 if (conn && conn->netdev)
718 privptr = netdev_priv(conn->netdev);
719 conn->prof.tx_pending--;
720 if (single_flag) {
721 if ((skb = skb_dequeue(&conn->commit_queue))) {
722 atomic_dec(&skb->users);
723 dev_kfree_skb_any(skb);
724 if (privptr) {
725 privptr->stats.tx_packets++;
726 privptr->stats.tx_bytes +=
727 (skb->len - NETIUCV_HDRLEN
728 - NETIUCV_HDRLEN);
732 conn->tx_buff->data = conn->tx_buff->head;
733 skb_reset_tail_pointer(conn->tx_buff);
734 conn->tx_buff->len = 0;
735 spin_lock_irqsave(&conn->collect_lock, saveflags);
736 while ((skb = skb_dequeue(&conn->collect_queue))) {
737 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
738 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
739 NETIUCV_HDRLEN);
740 skb_copy_from_linear_data(skb,
741 skb_put(conn->tx_buff, skb->len),
742 skb->len);
743 txbytes += skb->len;
744 txpackets++;
745 stat_maxcq++;
746 atomic_dec(&skb->users);
747 dev_kfree_skb_any(skb);
749 if (conn->collect_len > conn->prof.maxmulti)
750 conn->prof.maxmulti = conn->collect_len;
751 conn->collect_len = 0;
752 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
753 if (conn->tx_buff->len == 0) {
754 fsm_newstate(fi, CONN_STATE_IDLE);
755 return;
758 header.next = 0;
759 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
760 conn->prof.send_stamp = current_kernel_time();
761 txmsg.class = 0;
762 txmsg.tag = 0;
763 rc = iucv_message_send(conn->path, &txmsg, 0, 0,
764 conn->tx_buff->data, conn->tx_buff->len);
765 conn->prof.doios_multi++;
766 conn->prof.txlen += conn->tx_buff->len;
767 conn->prof.tx_pending++;
768 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
769 conn->prof.tx_max_pending = conn->prof.tx_pending;
770 if (rc) {
771 conn->prof.tx_pending--;
772 fsm_newstate(fi, CONN_STATE_IDLE);
773 if (privptr)
774 privptr->stats.tx_errors += txpackets;
775 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
776 } else {
777 if (privptr) {
778 privptr->stats.tx_packets += txpackets;
779 privptr->stats.tx_bytes += txbytes;
781 if (stat_maxcq > conn->prof.maxcqueue)
782 conn->prof.maxcqueue = stat_maxcq;
786 static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
788 struct iucv_event *ev = arg;
789 struct iucv_connection *conn = ev->conn;
790 struct iucv_path *path = ev->data;
791 struct net_device *netdev = conn->netdev;
792 struct netiucv_priv *privptr = netdev_priv(netdev);
793 int rc;
795 IUCV_DBF_TEXT(trace, 3, __func__);
797 conn->path = path;
798 path->msglim = NETIUCV_QUEUELEN_DEFAULT;
799 path->flags = 0;
800 rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
801 if (rc) {
802 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
803 return;
805 fsm_newstate(fi, CONN_STATE_IDLE);
806 netdev->tx_queue_len = conn->path->msglim;
807 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
810 static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
812 struct iucv_event *ev = arg;
813 struct iucv_path *path = ev->data;
815 IUCV_DBF_TEXT(trace, 3, __func__);
816 iucv_path_sever(path, NULL);
819 static void conn_action_connack(fsm_instance *fi, int event, void *arg)
821 struct iucv_connection *conn = arg;
822 struct net_device *netdev = conn->netdev;
823 struct netiucv_priv *privptr = netdev_priv(netdev);
825 IUCV_DBF_TEXT(trace, 3, __func__);
826 fsm_deltimer(&conn->timer);
827 fsm_newstate(fi, CONN_STATE_IDLE);
828 netdev->tx_queue_len = conn->path->msglim;
829 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
832 static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
834 struct iucv_connection *conn = arg;
836 IUCV_DBF_TEXT(trace, 3, __func__);
837 fsm_deltimer(&conn->timer);
838 iucv_path_sever(conn->path, NULL);
839 fsm_newstate(fi, CONN_STATE_STARTWAIT);
842 static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
844 struct iucv_connection *conn = arg;
845 struct net_device *netdev = conn->netdev;
846 struct netiucv_priv *privptr = netdev_priv(netdev);
848 IUCV_DBF_TEXT(trace, 3, __func__);
850 fsm_deltimer(&conn->timer);
851 iucv_path_sever(conn->path, NULL);
852 dev_info(privptr->dev, "The peer interface of the IUCV device"
853 " has closed the connection\n");
854 IUCV_DBF_TEXT(data, 2,
855 "conn_action_connsever: Remote dropped connection\n");
856 fsm_newstate(fi, CONN_STATE_STARTWAIT);
857 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
860 static void conn_action_start(fsm_instance *fi, int event, void *arg)
862 struct iucv_connection *conn = arg;
863 struct net_device *netdev = conn->netdev;
864 struct netiucv_priv *privptr = netdev_priv(netdev);
865 int rc;
867 IUCV_DBF_TEXT(trace, 3, __func__);
869 fsm_newstate(fi, CONN_STATE_STARTWAIT);
870 IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n",
871 netdev->name, conn->userid);
874 * We must set the state before calling iucv_connect because the
875 * callback handler could be called at any point after the connection
876 * request is sent
879 fsm_newstate(fi, CONN_STATE_SETUPWAIT);
880 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
881 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
882 NULL, iucvMagic, conn);
883 switch (rc) {
884 case 0:
885 netdev->tx_queue_len = conn->path->msglim;
886 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
887 CONN_EVENT_TIMER, conn);
888 return;
889 case 11:
890 dev_warn(privptr->dev,
891 "The IUCV device failed to connect to z/VM guest %s\n",
892 netiucv_printname(conn->userid));
893 fsm_newstate(fi, CONN_STATE_STARTWAIT);
894 break;
895 case 12:
896 dev_warn(privptr->dev,
897 "The IUCV device failed to connect to the peer on z/VM"
898 " guest %s\n", netiucv_printname(conn->userid));
899 fsm_newstate(fi, CONN_STATE_STARTWAIT);
900 break;
901 case 13:
902 dev_err(privptr->dev,
903 "Connecting the IUCV device would exceed the maximum"
904 " number of IUCV connections\n");
905 fsm_newstate(fi, CONN_STATE_CONNERR);
906 break;
907 case 14:
908 dev_err(privptr->dev,
909 "z/VM guest %s has too many IUCV connections"
910 " to connect with the IUCV device\n",
911 netiucv_printname(conn->userid));
912 fsm_newstate(fi, CONN_STATE_CONNERR);
913 break;
914 case 15:
915 dev_err(privptr->dev,
916 "The IUCV device cannot connect to a z/VM guest with no"
917 " IUCV authorization\n");
918 fsm_newstate(fi, CONN_STATE_CONNERR);
919 break;
920 default:
921 dev_err(privptr->dev,
922 "Connecting the IUCV device failed with error %d\n",
923 rc);
924 fsm_newstate(fi, CONN_STATE_CONNERR);
925 break;
927 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
928 kfree(conn->path);
929 conn->path = NULL;
932 static void netiucv_purge_skb_queue(struct sk_buff_head *q)
934 struct sk_buff *skb;
936 while ((skb = skb_dequeue(q))) {
937 atomic_dec(&skb->users);
938 dev_kfree_skb_any(skb);
942 static void conn_action_stop(fsm_instance *fi, int event, void *arg)
944 struct iucv_event *ev = arg;
945 struct iucv_connection *conn = ev->conn;
946 struct net_device *netdev = conn->netdev;
947 struct netiucv_priv *privptr = netdev_priv(netdev);
949 IUCV_DBF_TEXT(trace, 3, __func__);
951 fsm_deltimer(&conn->timer);
952 fsm_newstate(fi, CONN_STATE_STOPPED);
953 netiucv_purge_skb_queue(&conn->collect_queue);
954 if (conn->path) {
955 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
956 iucv_path_sever(conn->path, iucvMagic);
957 kfree(conn->path);
958 conn->path = NULL;
960 netiucv_purge_skb_queue(&conn->commit_queue);
961 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
964 static void conn_action_inval(fsm_instance *fi, int event, void *arg)
966 struct iucv_connection *conn = arg;
967 struct net_device *netdev = conn->netdev;
969 IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
970 netdev->name, conn->userid);
973 static const fsm_node conn_fsm[] = {
974 { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
975 { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
977 { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
978 { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
979 { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
980 { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
981 { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
982 { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
983 { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
985 { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
986 { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
987 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
988 { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
989 { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
991 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
992 { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
994 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
995 { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
996 { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
998 { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
999 { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
1001 { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
1002 { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
1005 static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1009 * Actions for interface - statemachine.
1013 * dev_action_start
1014 * @fi: An instance of an interface statemachine.
1015 * @event: The event, just happened.
1016 * @arg: Generic pointer, casted from struct net_device * upon call.
1018 * Startup connection by sending CONN_EVENT_START to it.
1020 static void dev_action_start(fsm_instance *fi, int event, void *arg)
1022 struct net_device *dev = arg;
1023 struct netiucv_priv *privptr = netdev_priv(dev);
1025 IUCV_DBF_TEXT(trace, 3, __func__);
1027 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1028 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1032 * Shutdown connection by sending CONN_EVENT_STOP to it.
1034 * @param fi An instance of an interface statemachine.
1035 * @param event The event, just happened.
1036 * @param arg Generic pointer, casted from struct net_device * upon call.
1038 static void
1039 dev_action_stop(fsm_instance *fi, int event, void *arg)
1041 struct net_device *dev = arg;
1042 struct netiucv_priv *privptr = netdev_priv(dev);
1043 struct iucv_event ev;
1045 IUCV_DBF_TEXT(trace, 3, __func__);
1047 ev.conn = privptr->conn;
1049 fsm_newstate(fi, DEV_STATE_STOPWAIT);
1050 fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1054 * Called from connection statemachine
1055 * when a connection is up and running.
1057 * @param fi An instance of an interface statemachine.
1058 * @param event The event, just happened.
1059 * @param arg Generic pointer, casted from struct net_device * upon call.
1061 static void
1062 dev_action_connup(fsm_instance *fi, int event, void *arg)
1064 struct net_device *dev = arg;
1065 struct netiucv_priv *privptr = netdev_priv(dev);
1067 IUCV_DBF_TEXT(trace, 3, __func__);
1069 switch (fsm_getstate(fi)) {
1070 case DEV_STATE_STARTWAIT:
1071 fsm_newstate(fi, DEV_STATE_RUNNING);
1072 dev_info(privptr->dev,
1073 "The IUCV device has been connected"
1074 " successfully to %s\n", privptr->conn->userid);
1075 IUCV_DBF_TEXT(setup, 3,
1076 "connection is up and running\n");
1077 break;
1078 case DEV_STATE_STOPWAIT:
1079 IUCV_DBF_TEXT(data, 2,
1080 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1081 break;
1086 * Called from connection statemachine
1087 * when a connection has been shutdown.
1089 * @param fi An instance of an interface statemachine.
1090 * @param event The event, just happened.
1091 * @param arg Generic pointer, casted from struct net_device * upon call.
1093 static void
1094 dev_action_conndown(fsm_instance *fi, int event, void *arg)
1096 IUCV_DBF_TEXT(trace, 3, __func__);
1098 switch (fsm_getstate(fi)) {
1099 case DEV_STATE_RUNNING:
1100 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1101 break;
1102 case DEV_STATE_STOPWAIT:
1103 fsm_newstate(fi, DEV_STATE_STOPPED);
1104 IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1105 break;
1109 static const fsm_node dev_fsm[] = {
1110 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
1112 { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
1113 { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
1115 { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
1116 { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
1118 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
1119 { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
1120 { DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop },
1123 static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1126 * Transmit a packet.
1127 * This is a helper function for netiucv_tx().
1129 * @param conn Connection to be used for sending.
1130 * @param skb Pointer to struct sk_buff of packet to send.
1131 * The linklevel header has already been set up
1132 * by netiucv_tx().
1134 * @return 0 on success, -ERRNO on failure. (Never fails.)
1136 static int netiucv_transmit_skb(struct iucv_connection *conn,
1137 struct sk_buff *skb)
1139 struct iucv_message msg;
1140 unsigned long saveflags;
1141 struct ll_header header;
1142 int rc;
1144 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1145 int l = skb->len + NETIUCV_HDRLEN;
1147 spin_lock_irqsave(&conn->collect_lock, saveflags);
1148 if (conn->collect_len + l >
1149 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1150 rc = -EBUSY;
1151 IUCV_DBF_TEXT(data, 2,
1152 "EBUSY from netiucv_transmit_skb\n");
1153 } else {
1154 atomic_inc(&skb->users);
1155 skb_queue_tail(&conn->collect_queue, skb);
1156 conn->collect_len += l;
1157 rc = 0;
1159 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1160 } else {
1161 struct sk_buff *nskb = skb;
1163 * Copy the skb to a new allocated skb in lowmem only if the
1164 * data is located above 2G in memory or tailroom is < 2.
1166 unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1167 NETIUCV_HDRLEN)) >> 31;
1168 int copied = 0;
1169 if (hi || (skb_tailroom(skb) < 2)) {
1170 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1171 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1172 if (!nskb) {
1173 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1174 rc = -ENOMEM;
1175 return rc;
1176 } else {
1177 skb_reserve(nskb, NETIUCV_HDRLEN);
1178 memcpy(skb_put(nskb, skb->len),
1179 skb->data, skb->len);
1181 copied = 1;
1184 * skb now is below 2G and has enough room. Add headers.
1186 header.next = nskb->len + NETIUCV_HDRLEN;
1187 memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1188 header.next = 0;
1189 memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1191 fsm_newstate(conn->fsm, CONN_STATE_TX);
1192 conn->prof.send_stamp = current_kernel_time();
1194 msg.tag = 1;
1195 msg.class = 0;
1196 rc = iucv_message_send(conn->path, &msg, 0, 0,
1197 nskb->data, nskb->len);
1198 conn->prof.doios_single++;
1199 conn->prof.txlen += skb->len;
1200 conn->prof.tx_pending++;
1201 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1202 conn->prof.tx_max_pending = conn->prof.tx_pending;
1203 if (rc) {
1204 struct netiucv_priv *privptr;
1205 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1206 conn->prof.tx_pending--;
1207 privptr = netdev_priv(conn->netdev);
1208 if (privptr)
1209 privptr->stats.tx_errors++;
1210 if (copied)
1211 dev_kfree_skb(nskb);
1212 else {
1214 * Remove our headers. They get added
1215 * again on retransmit.
1217 skb_pull(skb, NETIUCV_HDRLEN);
1218 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1220 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1221 } else {
1222 if (copied)
1223 dev_kfree_skb(skb);
1224 atomic_inc(&nskb->users);
1225 skb_queue_tail(&conn->commit_queue, nskb);
1229 return rc;
1233 * Interface API for upper network layers
1237 * Open an interface.
1238 * Called from generic network layer when ifconfig up is run.
1240 * @param dev Pointer to interface struct.
1242 * @return 0 on success, -ERRNO on failure. (Never fails.)
1244 static int netiucv_open(struct net_device *dev)
1246 struct netiucv_priv *priv = netdev_priv(dev);
1248 fsm_event(priv->fsm, DEV_EVENT_START, dev);
1249 return 0;
1253 * Close an interface.
1254 * Called from generic network layer when ifconfig down is run.
1256 * @param dev Pointer to interface struct.
1258 * @return 0 on success, -ERRNO on failure. (Never fails.)
1260 static int netiucv_close(struct net_device *dev)
1262 struct netiucv_priv *priv = netdev_priv(dev);
1264 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1265 return 0;
1269 * Start transmission of a packet.
1270 * Called from generic network device layer.
1272 * @param skb Pointer to buffer containing the packet.
1273 * @param dev Pointer to interface struct.
1275 * @return 0 if packet consumed, !0 if packet rejected.
1276 * Note: If we return !0, then the packet is free'd by
1277 * the generic network layer.
1279 static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1281 struct netiucv_priv *privptr = netdev_priv(dev);
1282 int rc;
1284 IUCV_DBF_TEXT(trace, 4, __func__);
1286 * Some sanity checks ...
1288 if (skb == NULL) {
1289 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1290 privptr->stats.tx_dropped++;
1291 return 0;
1293 if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1294 IUCV_DBF_TEXT(data, 2,
1295 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1296 dev_kfree_skb(skb);
1297 privptr->stats.tx_dropped++;
1298 return 0;
1302 * If connection is not running, try to restart it
1303 * and throw away packet.
1305 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1306 dev_kfree_skb(skb);
1307 privptr->stats.tx_dropped++;
1308 privptr->stats.tx_errors++;
1309 privptr->stats.tx_carrier_errors++;
1310 return 0;
1313 if (netiucv_test_and_set_busy(dev)) {
1314 IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1315 return NETDEV_TX_BUSY;
1317 dev->trans_start = jiffies;
1318 rc = netiucv_transmit_skb(privptr->conn, skb);
1319 netiucv_clear_busy(dev);
1320 return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
1324 * netiucv_stats
1325 * @dev: Pointer to interface struct.
1327 * Returns interface statistics of a device.
1329 * Returns pointer to stats struct of this interface.
1331 static struct net_device_stats *netiucv_stats (struct net_device * dev)
1333 struct netiucv_priv *priv = netdev_priv(dev);
1335 IUCV_DBF_TEXT(trace, 5, __func__);
1336 return &priv->stats;
1340 * netiucv_change_mtu
1341 * @dev: Pointer to interface struct.
1342 * @new_mtu: The new MTU to use for this interface.
1344 * Sets MTU of an interface.
1346 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1347 * (valid range is 576 .. NETIUCV_MTU_MAX).
1349 static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1351 IUCV_DBF_TEXT(trace, 3, __func__);
1352 if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1353 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1354 return -EINVAL;
1356 dev->mtu = new_mtu;
1357 return 0;
1361 * attributes in sysfs
1364 static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1365 char *buf)
1367 struct netiucv_priv *priv = dev_get_drvdata(dev);
1369 IUCV_DBF_TEXT(trace, 5, __func__);
1370 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1373 static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1374 const char *buf, size_t count)
1376 struct netiucv_priv *priv = dev_get_drvdata(dev);
1377 struct net_device *ndev = priv->conn->netdev;
1378 char *p;
1379 char *tmp;
1380 char username[9];
1381 int i;
1382 struct iucv_connection *cp;
1384 IUCV_DBF_TEXT(trace, 3, __func__);
1385 if (count > 9) {
1386 IUCV_DBF_TEXT_(setup, 2,
1387 "%d is length of username\n", (int) count);
1388 return -EINVAL;
1391 tmp = strsep((char **) &buf, "\n");
1392 for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
1393 if (isalnum(*p) || (*p == '$')) {
1394 username[i]= toupper(*p);
1395 continue;
1397 if (*p == '\n') {
1398 /* trailing lf, grr */
1399 break;
1401 IUCV_DBF_TEXT_(setup, 2,
1402 "username: invalid character %c\n", *p);
1403 return -EINVAL;
1405 while (i < 8)
1406 username[i++] = ' ';
1407 username[8] = '\0';
1409 if (memcmp(username, priv->conn->userid, 9) &&
1410 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1411 /* username changed while the interface is active. */
1412 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1413 return -EPERM;
1415 read_lock_bh(&iucv_connection_rwlock);
1416 list_for_each_entry(cp, &iucv_connection_list, list) {
1417 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
1418 read_unlock_bh(&iucv_connection_rwlock);
1419 IUCV_DBF_TEXT_(setup, 2, "user_write: Connection "
1420 "to %s already exists\n", username);
1421 return -EEXIST;
1424 read_unlock_bh(&iucv_connection_rwlock);
1425 memcpy(priv->conn->userid, username, 9);
1426 return count;
1429 static DEVICE_ATTR(user, 0644, user_show, user_write);
1431 static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1432 char *buf)
1434 struct netiucv_priv *priv = dev_get_drvdata(dev);
1436 IUCV_DBF_TEXT(trace, 5, __func__);
1437 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1440 static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1441 const char *buf, size_t count)
1443 struct netiucv_priv *priv = dev_get_drvdata(dev);
1444 struct net_device *ndev = priv->conn->netdev;
1445 char *e;
1446 int bs1;
1448 IUCV_DBF_TEXT(trace, 3, __func__);
1449 if (count >= 39)
1450 return -EINVAL;
1452 bs1 = simple_strtoul(buf, &e, 0);
1454 if (e && (!isspace(*e))) {
1455 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
1456 return -EINVAL;
1458 if (bs1 > NETIUCV_BUFSIZE_MAX) {
1459 IUCV_DBF_TEXT_(setup, 2,
1460 "buffer_write: buffer size %d too large\n",
1461 bs1);
1462 return -EINVAL;
1464 if ((ndev->flags & IFF_RUNNING) &&
1465 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1466 IUCV_DBF_TEXT_(setup, 2,
1467 "buffer_write: buffer size %d too small\n",
1468 bs1);
1469 return -EINVAL;
1471 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1472 IUCV_DBF_TEXT_(setup, 2,
1473 "buffer_write: buffer size %d too small\n",
1474 bs1);
1475 return -EINVAL;
1478 priv->conn->max_buffsize = bs1;
1479 if (!(ndev->flags & IFF_RUNNING))
1480 ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1482 return count;
1486 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1488 static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1489 char *buf)
1491 struct netiucv_priv *priv = dev_get_drvdata(dev);
1493 IUCV_DBF_TEXT(trace, 5, __func__);
1494 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1497 static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1499 static ssize_t conn_fsm_show (struct device *dev,
1500 struct device_attribute *attr, char *buf)
1502 struct netiucv_priv *priv = dev_get_drvdata(dev);
1504 IUCV_DBF_TEXT(trace, 5, __func__);
1505 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1508 static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1510 static ssize_t maxmulti_show (struct device *dev,
1511 struct device_attribute *attr, char *buf)
1513 struct netiucv_priv *priv = dev_get_drvdata(dev);
1515 IUCV_DBF_TEXT(trace, 5, __func__);
1516 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1519 static ssize_t maxmulti_write (struct device *dev,
1520 struct device_attribute *attr,
1521 const char *buf, size_t count)
1523 struct netiucv_priv *priv = dev_get_drvdata(dev);
1525 IUCV_DBF_TEXT(trace, 4, __func__);
1526 priv->conn->prof.maxmulti = 0;
1527 return count;
1530 static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1532 static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1533 char *buf)
1535 struct netiucv_priv *priv = dev_get_drvdata(dev);
1537 IUCV_DBF_TEXT(trace, 5, __func__);
1538 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1541 static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1542 const char *buf, size_t count)
1544 struct netiucv_priv *priv = dev_get_drvdata(dev);
1546 IUCV_DBF_TEXT(trace, 4, __func__);
1547 priv->conn->prof.maxcqueue = 0;
1548 return count;
1551 static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1553 static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1554 char *buf)
1556 struct netiucv_priv *priv = dev_get_drvdata(dev);
1558 IUCV_DBF_TEXT(trace, 5, __func__);
1559 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1562 static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1563 const char *buf, size_t count)
1565 struct netiucv_priv *priv = dev_get_drvdata(dev);
1567 IUCV_DBF_TEXT(trace, 4, __func__);
1568 priv->conn->prof.doios_single = 0;
1569 return count;
1572 static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1574 static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1575 char *buf)
1577 struct netiucv_priv *priv = dev_get_drvdata(dev);
1579 IUCV_DBF_TEXT(trace, 5, __func__);
1580 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1583 static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1584 const char *buf, size_t count)
1586 struct netiucv_priv *priv = dev_get_drvdata(dev);
1588 IUCV_DBF_TEXT(trace, 5, __func__);
1589 priv->conn->prof.doios_multi = 0;
1590 return count;
1593 static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1595 static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1596 char *buf)
1598 struct netiucv_priv *priv = dev_get_drvdata(dev);
1600 IUCV_DBF_TEXT(trace, 5, __func__);
1601 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1604 static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1605 const char *buf, size_t count)
1607 struct netiucv_priv *priv = dev_get_drvdata(dev);
1609 IUCV_DBF_TEXT(trace, 4, __func__);
1610 priv->conn->prof.txlen = 0;
1611 return count;
1614 static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1616 static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1617 char *buf)
1619 struct netiucv_priv *priv = dev_get_drvdata(dev);
1621 IUCV_DBF_TEXT(trace, 5, __func__);
1622 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1625 static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1626 const char *buf, size_t count)
1628 struct netiucv_priv *priv = dev_get_drvdata(dev);
1630 IUCV_DBF_TEXT(trace, 4, __func__);
1631 priv->conn->prof.tx_time = 0;
1632 return count;
1635 static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1637 static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1638 char *buf)
1640 struct netiucv_priv *priv = dev_get_drvdata(dev);
1642 IUCV_DBF_TEXT(trace, 5, __func__);
1643 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1646 static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1647 const char *buf, size_t count)
1649 struct netiucv_priv *priv = dev_get_drvdata(dev);
1651 IUCV_DBF_TEXT(trace, 4, __func__);
1652 priv->conn->prof.tx_pending = 0;
1653 return count;
1656 static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1658 static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1659 char *buf)
1661 struct netiucv_priv *priv = dev_get_drvdata(dev);
1663 IUCV_DBF_TEXT(trace, 5, __func__);
1664 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1667 static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1668 const char *buf, size_t count)
1670 struct netiucv_priv *priv = dev_get_drvdata(dev);
1672 IUCV_DBF_TEXT(trace, 4, __func__);
1673 priv->conn->prof.tx_max_pending = 0;
1674 return count;
1677 static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1679 static struct attribute *netiucv_attrs[] = {
1680 &dev_attr_buffer.attr,
1681 &dev_attr_user.attr,
1682 NULL,
1685 static struct attribute_group netiucv_attr_group = {
1686 .attrs = netiucv_attrs,
1689 static struct attribute *netiucv_stat_attrs[] = {
1690 &dev_attr_device_fsm_state.attr,
1691 &dev_attr_connection_fsm_state.attr,
1692 &dev_attr_max_tx_buffer_used.attr,
1693 &dev_attr_max_chained_skbs.attr,
1694 &dev_attr_tx_single_write_ops.attr,
1695 &dev_attr_tx_multi_write_ops.attr,
1696 &dev_attr_netto_bytes.attr,
1697 &dev_attr_max_tx_io_time.attr,
1698 &dev_attr_tx_pending.attr,
1699 &dev_attr_tx_max_pending.attr,
1700 NULL,
1703 static struct attribute_group netiucv_stat_attr_group = {
1704 .name = "stats",
1705 .attrs = netiucv_stat_attrs,
1708 static int netiucv_add_files(struct device *dev)
1710 int ret;
1712 IUCV_DBF_TEXT(trace, 3, __func__);
1713 ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
1714 if (ret)
1715 return ret;
1716 ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
1717 if (ret)
1718 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1719 return ret;
1722 static void netiucv_remove_files(struct device *dev)
1724 IUCV_DBF_TEXT(trace, 3, __func__);
1725 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1726 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1729 static int netiucv_register_device(struct net_device *ndev)
1731 struct netiucv_priv *priv = netdev_priv(ndev);
1732 struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1733 int ret;
1736 IUCV_DBF_TEXT(trace, 3, __func__);
1738 if (dev) {
1739 dev_set_name(dev, "net%s", ndev->name);
1740 dev->bus = &iucv_bus;
1741 dev->parent = iucv_root;
1743 * The release function could be called after the
1744 * module has been unloaded. It's _only_ task is to
1745 * free the struct. Therefore, we specify kfree()
1746 * directly here. (Probably a little bit obfuscating
1747 * but legitime ...).
1749 dev->release = (void (*)(struct device *))kfree;
1750 dev->driver = &netiucv_driver;
1751 } else
1752 return -ENOMEM;
1754 ret = device_register(dev);
1756 if (ret)
1757 return ret;
1758 ret = netiucv_add_files(dev);
1759 if (ret)
1760 goto out_unreg;
1761 priv->dev = dev;
1762 dev_set_drvdata(dev, priv);
1763 return 0;
1765 out_unreg:
1766 device_unregister(dev);
1767 return ret;
1770 static void netiucv_unregister_device(struct device *dev)
1772 IUCV_DBF_TEXT(trace, 3, __func__);
1773 netiucv_remove_files(dev);
1774 device_unregister(dev);
1778 * Allocate and initialize a new connection structure.
1779 * Add it to the list of netiucv connections;
1781 static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1782 char *username)
1784 struct iucv_connection *conn;
1786 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1787 if (!conn)
1788 goto out;
1789 skb_queue_head_init(&conn->collect_queue);
1790 skb_queue_head_init(&conn->commit_queue);
1791 spin_lock_init(&conn->collect_lock);
1792 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1793 conn->netdev = dev;
1795 conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1796 if (!conn->rx_buff)
1797 goto out_conn;
1798 conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1799 if (!conn->tx_buff)
1800 goto out_rx;
1801 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1802 conn_event_names, NR_CONN_STATES,
1803 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1804 GFP_KERNEL);
1805 if (!conn->fsm)
1806 goto out_tx;
1808 fsm_settimer(conn->fsm, &conn->timer);
1809 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1811 if (username) {
1812 memcpy(conn->userid, username, 9);
1813 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1816 write_lock_bh(&iucv_connection_rwlock);
1817 list_add_tail(&conn->list, &iucv_connection_list);
1818 write_unlock_bh(&iucv_connection_rwlock);
1819 return conn;
1821 out_tx:
1822 kfree_skb(conn->tx_buff);
1823 out_rx:
1824 kfree_skb(conn->rx_buff);
1825 out_conn:
1826 kfree(conn);
1827 out:
1828 return NULL;
1832 * Release a connection structure and remove it from the
1833 * list of netiucv connections.
1835 static void netiucv_remove_connection(struct iucv_connection *conn)
1837 IUCV_DBF_TEXT(trace, 3, __func__);
1838 write_lock_bh(&iucv_connection_rwlock);
1839 list_del_init(&conn->list);
1840 write_unlock_bh(&iucv_connection_rwlock);
1841 fsm_deltimer(&conn->timer);
1842 netiucv_purge_skb_queue(&conn->collect_queue);
1843 if (conn->path) {
1844 iucv_path_sever(conn->path, iucvMagic);
1845 kfree(conn->path);
1846 conn->path = NULL;
1848 netiucv_purge_skb_queue(&conn->commit_queue);
1849 kfree_fsm(conn->fsm);
1850 kfree_skb(conn->rx_buff);
1851 kfree_skb(conn->tx_buff);
1855 * Release everything of a net device.
1857 static void netiucv_free_netdevice(struct net_device *dev)
1859 struct netiucv_priv *privptr = netdev_priv(dev);
1861 IUCV_DBF_TEXT(trace, 3, __func__);
1863 if (!dev)
1864 return;
1866 if (privptr) {
1867 if (privptr->conn)
1868 netiucv_remove_connection(privptr->conn);
1869 if (privptr->fsm)
1870 kfree_fsm(privptr->fsm);
1871 privptr->conn = NULL; privptr->fsm = NULL;
1872 /* privptr gets freed by free_netdev() */
1874 free_netdev(dev);
1878 * Initialize a net device. (Called from kernel in alloc_netdev())
1880 static const struct net_device_ops netiucv_netdev_ops = {
1881 .ndo_open = netiucv_open,
1882 .ndo_stop = netiucv_close,
1883 .ndo_get_stats = netiucv_stats,
1884 .ndo_start_xmit = netiucv_tx,
1885 .ndo_change_mtu = netiucv_change_mtu,
1888 static void netiucv_setup_netdevice(struct net_device *dev)
1890 dev->mtu = NETIUCV_MTU_DEFAULT;
1891 dev->destructor = netiucv_free_netdevice;
1892 dev->hard_header_len = NETIUCV_HDRLEN;
1893 dev->addr_len = 0;
1894 dev->type = ARPHRD_SLIP;
1895 dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
1896 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1897 dev->netdev_ops = &netiucv_netdev_ops;
1901 * Allocate and initialize everything of a net device.
1903 static struct net_device *netiucv_init_netdevice(char *username)
1905 struct netiucv_priv *privptr;
1906 struct net_device *dev;
1908 dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1909 netiucv_setup_netdevice);
1910 if (!dev)
1911 return NULL;
1912 if (dev_alloc_name(dev, dev->name) < 0)
1913 goto out_netdev;
1915 privptr = netdev_priv(dev);
1916 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1917 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1918 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1919 if (!privptr->fsm)
1920 goto out_netdev;
1922 privptr->conn = netiucv_new_connection(dev, username);
1923 if (!privptr->conn) {
1924 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1925 goto out_fsm;
1927 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1928 return dev;
1930 out_fsm:
1931 kfree_fsm(privptr->fsm);
1932 out_netdev:
1933 free_netdev(dev);
1934 return NULL;
1937 static ssize_t conn_write(struct device_driver *drv,
1938 const char *buf, size_t count)
1940 const char *p;
1941 char username[9];
1942 int i, rc;
1943 struct net_device *dev;
1944 struct netiucv_priv *priv;
1945 struct iucv_connection *cp;
1947 IUCV_DBF_TEXT(trace, 3, __func__);
1948 if (count>9) {
1949 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1950 return -EINVAL;
1953 for (i = 0, p = buf; i < 8 && *p; i++, p++) {
1954 if (isalnum(*p) || *p == '$') {
1955 username[i] = toupper(*p);
1956 continue;
1958 if (*p == '\n')
1959 /* trailing lf, grr */
1960 break;
1961 IUCV_DBF_TEXT_(setup, 2,
1962 "conn_write: invalid character %c\n", *p);
1963 return -EINVAL;
1965 while (i < 8)
1966 username[i++] = ' ';
1967 username[8] = '\0';
1969 read_lock_bh(&iucv_connection_rwlock);
1970 list_for_each_entry(cp, &iucv_connection_list, list) {
1971 if (!strncmp(username, cp->userid, 9)) {
1972 read_unlock_bh(&iucv_connection_rwlock);
1973 IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection "
1974 "to %s already exists\n", username);
1975 return -EEXIST;
1978 read_unlock_bh(&iucv_connection_rwlock);
1980 dev = netiucv_init_netdevice(username);
1981 if (!dev) {
1982 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
1983 return -ENODEV;
1986 rc = netiucv_register_device(dev);
1987 if (rc) {
1988 IUCV_DBF_TEXT_(setup, 2,
1989 "ret %d from netiucv_register_device\n", rc);
1990 goto out_free_ndev;
1993 /* sysfs magic */
1994 priv = netdev_priv(dev);
1995 SET_NETDEV_DEV(dev, priv->dev);
1997 rc = register_netdev(dev);
1998 if (rc)
1999 goto out_unreg;
2001 dev_info(priv->dev, "The IUCV interface to %s has been"
2002 " established successfully\n", netiucv_printname(username));
2004 return count;
2006 out_unreg:
2007 netiucv_unregister_device(priv->dev);
2008 out_free_ndev:
2009 netiucv_free_netdevice(dev);
2010 return rc;
2013 static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2015 static ssize_t remove_write (struct device_driver *drv,
2016 const char *buf, size_t count)
2018 struct iucv_connection *cp;
2019 struct net_device *ndev;
2020 struct netiucv_priv *priv;
2021 struct device *dev;
2022 char name[IFNAMSIZ];
2023 const char *p;
2024 int i;
2026 IUCV_DBF_TEXT(trace, 3, __func__);
2028 if (count >= IFNAMSIZ)
2029 count = IFNAMSIZ - 1;;
2031 for (i = 0, p = buf; i < count && *p; i++, p++) {
2032 if (*p == '\n' || *p == ' ')
2033 /* trailing lf, grr */
2034 break;
2035 name[i] = *p;
2037 name[i] = '\0';
2039 read_lock_bh(&iucv_connection_rwlock);
2040 list_for_each_entry(cp, &iucv_connection_list, list) {
2041 ndev = cp->netdev;
2042 priv = netdev_priv(ndev);
2043 dev = priv->dev;
2044 if (strncmp(name, ndev->name, count))
2045 continue;
2046 read_unlock_bh(&iucv_connection_rwlock);
2047 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2048 dev_warn(dev, "The IUCV device is connected"
2049 " to %s and cannot be removed\n",
2050 priv->conn->userid);
2051 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2052 return -EPERM;
2054 unregister_netdev(ndev);
2055 netiucv_unregister_device(dev);
2056 return count;
2058 read_unlock_bh(&iucv_connection_rwlock);
2059 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2060 return -EINVAL;
2063 static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2065 static struct attribute * netiucv_drv_attrs[] = {
2066 &driver_attr_connection.attr,
2067 &driver_attr_remove.attr,
2068 NULL,
2071 static struct attribute_group netiucv_drv_attr_group = {
2072 .attrs = netiucv_drv_attrs,
2075 static struct attribute_group *netiucv_drv_attr_groups[] = {
2076 &netiucv_drv_attr_group,
2077 NULL,
2080 static void netiucv_banner(void)
2082 pr_info("driver initialized\n");
2085 static void __exit netiucv_exit(void)
2087 struct iucv_connection *cp;
2088 struct net_device *ndev;
2089 struct netiucv_priv *priv;
2090 struct device *dev;
2092 IUCV_DBF_TEXT(trace, 3, __func__);
2093 while (!list_empty(&iucv_connection_list)) {
2094 cp = list_entry(iucv_connection_list.next,
2095 struct iucv_connection, list);
2096 ndev = cp->netdev;
2097 priv = netdev_priv(ndev);
2098 dev = priv->dev;
2100 unregister_netdev(ndev);
2101 netiucv_unregister_device(dev);
2104 driver_unregister(&netiucv_driver);
2105 iucv_unregister(&netiucv_handler, 1);
2106 iucv_unregister_dbf_views();
2108 pr_info("driver unloaded\n");
2109 return;
2112 static int __init netiucv_init(void)
2114 int rc;
2116 rc = iucv_register_dbf_views();
2117 if (rc)
2118 goto out;
2119 rc = iucv_register(&netiucv_handler, 1);
2120 if (rc)
2121 goto out_dbf;
2122 IUCV_DBF_TEXT(trace, 3, __func__);
2123 netiucv_driver.groups = netiucv_drv_attr_groups;
2124 rc = driver_register(&netiucv_driver);
2125 if (rc) {
2126 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2127 goto out_iucv;
2130 netiucv_banner();
2131 return rc;
2133 out_iucv:
2134 iucv_unregister(&netiucv_handler, 1);
2135 out_dbf:
2136 iucv_unregister_dbf_views();
2137 out:
2138 return rc;
2141 module_init(netiucv_init);
2142 module_exit(netiucv_exit);
2143 MODULE_LICENSE("GPL");