[VLAN]: Propagate selected feature bits to VLAN devices
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / s390 / net / netiucv.c
blobe4ba6a0372acb6360124839afe6e651c1a6cec87
1 /*
2 * IUCV network driver
4 * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
7 * Sysfs integration and all bugs therein by Cornelia Huck
8 * (cornelia.huck@de.ibm.com)
10 * Documentation used:
11 * the source of the original IUCV driver by:
12 * Stefan Hegewald <hegewald@de.ibm.com>
13 * Hartmut Penner <hpenner@de.ibm.com>
14 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
15 * Martin Schwidefsky (schwidefsky@de.ibm.com)
16 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34 #undef DEBUG
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/kernel.h>
39 #include <linux/slab.h>
40 #include <linux/errno.h>
41 #include <linux/types.h>
42 #include <linux/interrupt.h>
43 #include <linux/timer.h>
44 #include <linux/bitops.h>
46 #include <linux/signal.h>
47 #include <linux/string.h>
48 #include <linux/device.h>
50 #include <linux/ip.h>
51 #include <linux/if_arp.h>
52 #include <linux/tcp.h>
53 #include <linux/skbuff.h>
54 #include <linux/ctype.h>
55 #include <net/dst.h>
57 #include <asm/io.h>
58 #include <asm/uaccess.h>
60 #include <net/iucv/iucv.h>
61 #include "fsm.h"
63 MODULE_AUTHOR
64 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
65 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
67 /**
68 * Debug Facility stuff
70 #define IUCV_DBF_SETUP_NAME "iucv_setup"
71 #define IUCV_DBF_SETUP_LEN 32
72 #define IUCV_DBF_SETUP_PAGES 2
73 #define IUCV_DBF_SETUP_NR_AREAS 1
74 #define IUCV_DBF_SETUP_LEVEL 3
76 #define IUCV_DBF_DATA_NAME "iucv_data"
77 #define IUCV_DBF_DATA_LEN 128
78 #define IUCV_DBF_DATA_PAGES 2
79 #define IUCV_DBF_DATA_NR_AREAS 1
80 #define IUCV_DBF_DATA_LEVEL 2
82 #define IUCV_DBF_TRACE_NAME "iucv_trace"
83 #define IUCV_DBF_TRACE_LEN 16
84 #define IUCV_DBF_TRACE_PAGES 4
85 #define IUCV_DBF_TRACE_NR_AREAS 1
86 #define IUCV_DBF_TRACE_LEVEL 3
88 #define IUCV_DBF_TEXT(name,level,text) \
89 do { \
90 debug_text_event(iucv_dbf_##name,level,text); \
91 } while (0)
93 #define IUCV_DBF_HEX(name,level,addr,len) \
94 do { \
95 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
96 } while (0)
98 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
100 /* Allow to sort out low debug levels early to avoid wasted sprints */
101 static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
103 return (level <= dbf_grp->level);
106 #define IUCV_DBF_TEXT_(name, level, text...) \
107 do { \
108 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
109 char* iucv_dbf_txt_buf = \
110 get_cpu_var(iucv_dbf_txt_buf); \
111 sprintf(iucv_dbf_txt_buf, text); \
112 debug_text_event(iucv_dbf_##name, level, \
113 iucv_dbf_txt_buf); \
114 put_cpu_var(iucv_dbf_txt_buf); \
116 } while (0)
118 #define IUCV_DBF_SPRINTF(name,level,text...) \
119 do { \
120 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
121 debug_sprintf_event(iucv_dbf_trace, level, text ); \
122 } while (0)
125 * some more debug stuff
127 #define IUCV_HEXDUMP16(importance,header,ptr) \
128 PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
129 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
130 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
131 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
132 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
133 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
134 *(((char*)ptr)+12),*(((char*)ptr)+13), \
135 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
136 PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
137 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
138 *(((char*)ptr)+16),*(((char*)ptr)+17), \
139 *(((char*)ptr)+18),*(((char*)ptr)+19), \
140 *(((char*)ptr)+20),*(((char*)ptr)+21), \
141 *(((char*)ptr)+22),*(((char*)ptr)+23), \
142 *(((char*)ptr)+24),*(((char*)ptr)+25), \
143 *(((char*)ptr)+26),*(((char*)ptr)+27), \
144 *(((char*)ptr)+28),*(((char*)ptr)+29), \
145 *(((char*)ptr)+30),*(((char*)ptr)+31));
147 #define PRINTK_HEADER " iucv: " /* for debugging */
149 static struct device_driver netiucv_driver = {
150 .owner = THIS_MODULE,
151 .name = "netiucv",
152 .bus = &iucv_bus,
155 static int netiucv_callback_connreq(struct iucv_path *,
156 u8 ipvmid[8], u8 ipuser[16]);
157 static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
158 static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
159 static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
160 static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
161 static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
162 static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
164 static struct iucv_handler netiucv_handler = {
165 .path_pending = netiucv_callback_connreq,
166 .path_complete = netiucv_callback_connack,
167 .path_severed = netiucv_callback_connrej,
168 .path_quiesced = netiucv_callback_connsusp,
169 .path_resumed = netiucv_callback_connres,
170 .message_pending = netiucv_callback_rx,
171 .message_complete = netiucv_callback_txdone
175 * Per connection profiling data
177 struct connection_profile {
178 unsigned long maxmulti;
179 unsigned long maxcqueue;
180 unsigned long doios_single;
181 unsigned long doios_multi;
182 unsigned long txlen;
183 unsigned long tx_time;
184 struct timespec send_stamp;
185 unsigned long tx_pending;
186 unsigned long tx_max_pending;
190 * Representation of one iucv connection
192 struct iucv_connection {
193 struct list_head list;
194 struct iucv_path *path;
195 struct sk_buff *rx_buff;
196 struct sk_buff *tx_buff;
197 struct sk_buff_head collect_queue;
198 struct sk_buff_head commit_queue;
199 spinlock_t collect_lock;
200 int collect_len;
201 int max_buffsize;
202 fsm_timer timer;
203 fsm_instance *fsm;
204 struct net_device *netdev;
205 struct connection_profile prof;
206 char userid[9];
210 * Linked list of all connection structs.
212 static LIST_HEAD(iucv_connection_list);
213 static DEFINE_RWLOCK(iucv_connection_rwlock);
216 * Representation of event-data for the
217 * connection state machine.
219 struct iucv_event {
220 struct iucv_connection *conn;
221 void *data;
225 * Private part of the network device structure
227 struct netiucv_priv {
228 struct net_device_stats stats;
229 unsigned long tbusy;
230 fsm_instance *fsm;
231 struct iucv_connection *conn;
232 struct device *dev;
236 * Link level header for a packet.
238 struct ll_header {
239 u16 next;
242 #define NETIUCV_HDRLEN (sizeof(struct ll_header))
243 #define NETIUCV_BUFSIZE_MAX 32768
244 #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
245 #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
246 #define NETIUCV_MTU_DEFAULT 9216
247 #define NETIUCV_QUEUELEN_DEFAULT 50
248 #define NETIUCV_TIMEOUT_5SEC 5000
251 * Compatibility macros for busy handling
252 * of network devices.
254 static inline void netiucv_clear_busy(struct net_device *dev)
256 struct netiucv_priv *priv = netdev_priv(dev);
257 clear_bit(0, &priv->tbusy);
258 netif_wake_queue(dev);
261 static inline int netiucv_test_and_set_busy(struct net_device *dev)
263 struct netiucv_priv *priv = netdev_priv(dev);
264 netif_stop_queue(dev);
265 return test_and_set_bit(0, &priv->tbusy);
268 static u8 iucvMagic[16] = {
269 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
270 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
274 * Convert an iucv userId to its printable
275 * form (strip whitespace at end).
277 * @param An iucv userId
279 * @returns The printable string (static data!!)
281 static char *netiucv_printname(char *name)
283 static char tmp[9];
284 char *p = tmp;
285 memcpy(tmp, name, 8);
286 tmp[8] = '\0';
287 while (*p && (!isspace(*p)))
288 p++;
289 *p = '\0';
290 return tmp;
294 * States of the interface statemachine.
296 enum dev_states {
297 DEV_STATE_STOPPED,
298 DEV_STATE_STARTWAIT,
299 DEV_STATE_STOPWAIT,
300 DEV_STATE_RUNNING,
302 * MUST be always the last element!!
304 NR_DEV_STATES
307 static const char *dev_state_names[] = {
308 "Stopped",
309 "StartWait",
310 "StopWait",
311 "Running",
315 * Events of the interface statemachine.
317 enum dev_events {
318 DEV_EVENT_START,
319 DEV_EVENT_STOP,
320 DEV_EVENT_CONUP,
321 DEV_EVENT_CONDOWN,
323 * MUST be always the last element!!
325 NR_DEV_EVENTS
328 static const char *dev_event_names[] = {
329 "Start",
330 "Stop",
331 "Connection up",
332 "Connection down",
336 * Events of the connection statemachine
338 enum conn_events {
340 * Events, representing callbacks from
341 * lowlevel iucv layer)
343 CONN_EVENT_CONN_REQ,
344 CONN_EVENT_CONN_ACK,
345 CONN_EVENT_CONN_REJ,
346 CONN_EVENT_CONN_SUS,
347 CONN_EVENT_CONN_RES,
348 CONN_EVENT_RX,
349 CONN_EVENT_TXDONE,
352 * Events, representing errors return codes from
353 * calls to lowlevel iucv layer
357 * Event, representing timer expiry.
359 CONN_EVENT_TIMER,
362 * Events, representing commands from upper levels.
364 CONN_EVENT_START,
365 CONN_EVENT_STOP,
368 * MUST be always the last element!!
370 NR_CONN_EVENTS,
373 static const char *conn_event_names[] = {
374 "Remote connection request",
375 "Remote connection acknowledge",
376 "Remote connection reject",
377 "Connection suspended",
378 "Connection resumed",
379 "Data received",
380 "Data sent",
382 "Timer",
384 "Start",
385 "Stop",
389 * States of the connection statemachine.
391 enum conn_states {
393 * Connection not assigned to any device,
394 * initial state, invalid
396 CONN_STATE_INVALID,
399 * Userid assigned but not operating
401 CONN_STATE_STOPPED,
404 * Connection registered,
405 * no connection request sent yet,
406 * no connection request received
408 CONN_STATE_STARTWAIT,
411 * Connection registered and connection request sent,
412 * no acknowledge and no connection request received yet.
414 CONN_STATE_SETUPWAIT,
417 * Connection up and running idle
419 CONN_STATE_IDLE,
422 * Data sent, awaiting CONN_EVENT_TXDONE
424 CONN_STATE_TX,
427 * Error during registration.
429 CONN_STATE_REGERR,
432 * Error during registration.
434 CONN_STATE_CONNERR,
437 * MUST be always the last element!!
439 NR_CONN_STATES,
442 static const char *conn_state_names[] = {
443 "Invalid",
444 "Stopped",
445 "StartWait",
446 "SetupWait",
447 "Idle",
448 "TX",
449 "Terminating",
450 "Registration error",
451 "Connect error",
456 * Debug Facility Stuff
458 static debug_info_t *iucv_dbf_setup = NULL;
459 static debug_info_t *iucv_dbf_data = NULL;
460 static debug_info_t *iucv_dbf_trace = NULL;
462 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
464 static void iucv_unregister_dbf_views(void)
466 if (iucv_dbf_setup)
467 debug_unregister(iucv_dbf_setup);
468 if (iucv_dbf_data)
469 debug_unregister(iucv_dbf_data);
470 if (iucv_dbf_trace)
471 debug_unregister(iucv_dbf_trace);
473 static int iucv_register_dbf_views(void)
475 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
476 IUCV_DBF_SETUP_PAGES,
477 IUCV_DBF_SETUP_NR_AREAS,
478 IUCV_DBF_SETUP_LEN);
479 iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
480 IUCV_DBF_DATA_PAGES,
481 IUCV_DBF_DATA_NR_AREAS,
482 IUCV_DBF_DATA_LEN);
483 iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
484 IUCV_DBF_TRACE_PAGES,
485 IUCV_DBF_TRACE_NR_AREAS,
486 IUCV_DBF_TRACE_LEN);
488 if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
489 (iucv_dbf_trace == NULL)) {
490 iucv_unregister_dbf_views();
491 return -ENOMEM;
493 debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
494 debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
496 debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
497 debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
499 debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
500 debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
502 return 0;
506 * Callback-wrappers, called from lowlevel iucv layer.
509 static void netiucv_callback_rx(struct iucv_path *path,
510 struct iucv_message *msg)
512 struct iucv_connection *conn = path->private;
513 struct iucv_event ev;
515 ev.conn = conn;
516 ev.data = msg;
517 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
520 static void netiucv_callback_txdone(struct iucv_path *path,
521 struct iucv_message *msg)
523 struct iucv_connection *conn = path->private;
524 struct iucv_event ev;
526 ev.conn = conn;
527 ev.data = msg;
528 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
531 static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
533 struct iucv_connection *conn = path->private;
535 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
538 static int netiucv_callback_connreq(struct iucv_path *path,
539 u8 ipvmid[8], u8 ipuser[16])
541 struct iucv_connection *conn = path->private;
542 struct iucv_event ev;
543 int rc;
545 if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
546 /* ipuser must match iucvMagic. */
547 return -EINVAL;
548 rc = -EINVAL;
549 read_lock_bh(&iucv_connection_rwlock);
550 list_for_each_entry(conn, &iucv_connection_list, list) {
551 if (strncmp(ipvmid, conn->userid, 8))
552 continue;
553 /* Found a matching connection for this path. */
554 conn->path = path;
555 ev.conn = conn;
556 ev.data = path;
557 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
558 rc = 0;
560 read_unlock_bh(&iucv_connection_rwlock);
561 return rc;
564 static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
566 struct iucv_connection *conn = path->private;
568 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
571 static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
573 struct iucv_connection *conn = path->private;
575 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
578 static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
580 struct iucv_connection *conn = path->private;
582 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
586 * NOP action for statemachines
588 static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
593 * Actions of the connection statemachine
597 * netiucv_unpack_skb
598 * @conn: The connection where this skb has been received.
599 * @pskb: The received skb.
601 * Unpack a just received skb and hand it over to upper layers.
602 * Helper function for conn_action_rx.
604 static void netiucv_unpack_skb(struct iucv_connection *conn,
605 struct sk_buff *pskb)
607 struct net_device *dev = conn->netdev;
608 struct netiucv_priv *privptr = netdev_priv(dev);
609 u16 offset = 0;
611 skb_put(pskb, NETIUCV_HDRLEN);
612 pskb->dev = dev;
613 pskb->ip_summed = CHECKSUM_NONE;
614 pskb->protocol = ntohs(ETH_P_IP);
616 while (1) {
617 struct sk_buff *skb;
618 struct ll_header *header = (struct ll_header *) pskb->data;
620 if (!header->next)
621 break;
623 skb_pull(pskb, NETIUCV_HDRLEN);
624 header->next -= offset;
625 offset += header->next;
626 header->next -= NETIUCV_HDRLEN;
627 if (skb_tailroom(pskb) < header->next) {
628 PRINT_WARN("%s: Illegal next field in iucv header: "
629 "%d > %d\n",
630 dev->name, header->next, skb_tailroom(pskb));
631 IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
632 header->next, skb_tailroom(pskb));
633 return;
635 skb_put(pskb, header->next);
636 skb_reset_mac_header(pskb);
637 skb = dev_alloc_skb(pskb->len);
638 if (!skb) {
639 PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
640 dev->name);
641 IUCV_DBF_TEXT(data, 2,
642 "Out of memory in netiucv_unpack_skb\n");
643 privptr->stats.rx_dropped++;
644 return;
646 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
647 pskb->len);
648 skb_reset_mac_header(skb);
649 skb->dev = pskb->dev;
650 skb->protocol = pskb->protocol;
651 pskb->ip_summed = CHECKSUM_UNNECESSARY;
652 privptr->stats.rx_packets++;
653 privptr->stats.rx_bytes += skb->len;
655 * Since receiving is always initiated from a tasklet (in iucv.c),
656 * we must use netif_rx_ni() instead of netif_rx()
658 netif_rx_ni(skb);
659 dev->last_rx = jiffies;
660 skb_pull(pskb, header->next);
661 skb_put(pskb, NETIUCV_HDRLEN);
665 static void conn_action_rx(fsm_instance *fi, int event, void *arg)
667 struct iucv_event *ev = arg;
668 struct iucv_connection *conn = ev->conn;
669 struct iucv_message *msg = ev->data;
670 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
671 int rc;
673 IUCV_DBF_TEXT(trace, 4, __func__);
675 if (!conn->netdev) {
676 iucv_message_reject(conn->path, msg);
677 PRINT_WARN("Received data for unlinked connection\n");
678 IUCV_DBF_TEXT(data, 2,
679 "Received data for unlinked connection\n");
680 return;
682 if (msg->length > conn->max_buffsize) {
683 iucv_message_reject(conn->path, msg);
684 privptr->stats.rx_dropped++;
685 PRINT_WARN("msglen %d > max_buffsize %d\n",
686 msg->length, conn->max_buffsize);
687 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
688 msg->length, conn->max_buffsize);
689 return;
691 conn->rx_buff->data = conn->rx_buff->head;
692 skb_reset_tail_pointer(conn->rx_buff);
693 conn->rx_buff->len = 0;
694 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
695 msg->length, NULL);
696 if (rc || msg->length < 5) {
697 privptr->stats.rx_errors++;
698 PRINT_WARN("iucv_receive returned %08x\n", rc);
699 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
700 return;
702 netiucv_unpack_skb(conn, conn->rx_buff);
705 static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
707 struct iucv_event *ev = arg;
708 struct iucv_connection *conn = ev->conn;
709 struct iucv_message *msg = ev->data;
710 struct iucv_message txmsg;
711 struct netiucv_priv *privptr = NULL;
712 u32 single_flag = msg->tag;
713 u32 txbytes = 0;
714 u32 txpackets = 0;
715 u32 stat_maxcq = 0;
716 struct sk_buff *skb;
717 unsigned long saveflags;
718 struct ll_header header;
719 int rc;
721 IUCV_DBF_TEXT(trace, 4, __func__);
723 if (conn && conn->netdev)
724 privptr = netdev_priv(conn->netdev);
725 conn->prof.tx_pending--;
726 if (single_flag) {
727 if ((skb = skb_dequeue(&conn->commit_queue))) {
728 atomic_dec(&skb->users);
729 dev_kfree_skb_any(skb);
730 if (privptr) {
731 privptr->stats.tx_packets++;
732 privptr->stats.tx_bytes +=
733 (skb->len - NETIUCV_HDRLEN
734 - NETIUCV_HDRLEN);
738 conn->tx_buff->data = conn->tx_buff->head;
739 skb_reset_tail_pointer(conn->tx_buff);
740 conn->tx_buff->len = 0;
741 spin_lock_irqsave(&conn->collect_lock, saveflags);
742 while ((skb = skb_dequeue(&conn->collect_queue))) {
743 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
744 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
745 NETIUCV_HDRLEN);
746 skb_copy_from_linear_data(skb,
747 skb_put(conn->tx_buff, skb->len),
748 skb->len);
749 txbytes += skb->len;
750 txpackets++;
751 stat_maxcq++;
752 atomic_dec(&skb->users);
753 dev_kfree_skb_any(skb);
755 if (conn->collect_len > conn->prof.maxmulti)
756 conn->prof.maxmulti = conn->collect_len;
757 conn->collect_len = 0;
758 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
759 if (conn->tx_buff->len == 0) {
760 fsm_newstate(fi, CONN_STATE_IDLE);
761 return;
764 header.next = 0;
765 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
766 conn->prof.send_stamp = current_kernel_time();
767 txmsg.class = 0;
768 txmsg.tag = 0;
769 rc = iucv_message_send(conn->path, &txmsg, 0, 0,
770 conn->tx_buff->data, conn->tx_buff->len);
771 conn->prof.doios_multi++;
772 conn->prof.txlen += conn->tx_buff->len;
773 conn->prof.tx_pending++;
774 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
775 conn->prof.tx_max_pending = conn->prof.tx_pending;
776 if (rc) {
777 conn->prof.tx_pending--;
778 fsm_newstate(fi, CONN_STATE_IDLE);
779 if (privptr)
780 privptr->stats.tx_errors += txpackets;
781 PRINT_WARN("iucv_send returned %08x\n", rc);
782 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
783 } else {
784 if (privptr) {
785 privptr->stats.tx_packets += txpackets;
786 privptr->stats.tx_bytes += txbytes;
788 if (stat_maxcq > conn->prof.maxcqueue)
789 conn->prof.maxcqueue = stat_maxcq;
793 static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
795 struct iucv_event *ev = arg;
796 struct iucv_connection *conn = ev->conn;
797 struct iucv_path *path = ev->data;
798 struct net_device *netdev = conn->netdev;
799 struct netiucv_priv *privptr = netdev_priv(netdev);
800 int rc;
802 IUCV_DBF_TEXT(trace, 3, __func__);
804 conn->path = path;
805 path->msglim = NETIUCV_QUEUELEN_DEFAULT;
806 path->flags = 0;
807 rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
808 if (rc) {
809 PRINT_WARN("%s: IUCV accept failed with error %d\n",
810 netdev->name, rc);
811 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
812 return;
814 fsm_newstate(fi, CONN_STATE_IDLE);
815 netdev->tx_queue_len = conn->path->msglim;
816 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
819 static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
821 struct iucv_event *ev = arg;
822 struct iucv_path *path = ev->data;
824 IUCV_DBF_TEXT(trace, 3, __func__);
825 iucv_path_sever(path, NULL);
828 static void conn_action_connack(fsm_instance *fi, int event, void *arg)
830 struct iucv_connection *conn = arg;
831 struct net_device *netdev = conn->netdev;
832 struct netiucv_priv *privptr = netdev_priv(netdev);
834 IUCV_DBF_TEXT(trace, 3, __func__);
835 fsm_deltimer(&conn->timer);
836 fsm_newstate(fi, CONN_STATE_IDLE);
837 netdev->tx_queue_len = conn->path->msglim;
838 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
841 static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
843 struct iucv_connection *conn = arg;
845 IUCV_DBF_TEXT(trace, 3, __func__);
846 fsm_deltimer(&conn->timer);
847 iucv_path_sever(conn->path, NULL);
848 fsm_newstate(fi, CONN_STATE_STARTWAIT);
851 static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
853 struct iucv_connection *conn = arg;
854 struct net_device *netdev = conn->netdev;
855 struct netiucv_priv *privptr = netdev_priv(netdev);
857 IUCV_DBF_TEXT(trace, 3, __func__);
859 fsm_deltimer(&conn->timer);
860 iucv_path_sever(conn->path, NULL);
861 PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
862 IUCV_DBF_TEXT(data, 2,
863 "conn_action_connsever: Remote dropped connection\n");
864 fsm_newstate(fi, CONN_STATE_STARTWAIT);
865 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
868 static void conn_action_start(fsm_instance *fi, int event, void *arg)
870 struct iucv_connection *conn = arg;
871 int rc;
873 IUCV_DBF_TEXT(trace, 3, __func__);
875 fsm_newstate(fi, CONN_STATE_STARTWAIT);
876 PRINT_DEBUG("%s('%s'): connecting ...\n",
877 conn->netdev->name, conn->userid);
880 * We must set the state before calling iucv_connect because the
881 * callback handler could be called at any point after the connection
882 * request is sent
885 fsm_newstate(fi, CONN_STATE_SETUPWAIT);
886 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
887 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
888 NULL, iucvMagic, conn);
889 switch (rc) {
890 case 0:
891 conn->netdev->tx_queue_len = conn->path->msglim;
892 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
893 CONN_EVENT_TIMER, conn);
894 return;
895 case 11:
896 PRINT_INFO("%s: User %s is currently not available.\n",
897 conn->netdev->name,
898 netiucv_printname(conn->userid));
899 fsm_newstate(fi, CONN_STATE_STARTWAIT);
900 break;
901 case 12:
902 PRINT_INFO("%s: User %s is currently not ready.\n",
903 conn->netdev->name,
904 netiucv_printname(conn->userid));
905 fsm_newstate(fi, CONN_STATE_STARTWAIT);
906 break;
907 case 13:
908 PRINT_WARN("%s: Too many IUCV connections.\n",
909 conn->netdev->name);
910 fsm_newstate(fi, CONN_STATE_CONNERR);
911 break;
912 case 14:
913 PRINT_WARN("%s: User %s has too many IUCV connections.\n",
914 conn->netdev->name,
915 netiucv_printname(conn->userid));
916 fsm_newstate(fi, CONN_STATE_CONNERR);
917 break;
918 case 15:
919 PRINT_WARN("%s: No IUCV authorization in CP directory.\n",
920 conn->netdev->name);
921 fsm_newstate(fi, CONN_STATE_CONNERR);
922 break;
923 default:
924 PRINT_WARN("%s: iucv_connect returned error %d\n",
925 conn->netdev->name, rc);
926 fsm_newstate(fi, CONN_STATE_CONNERR);
927 break;
929 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
930 kfree(conn->path);
931 conn->path = NULL;
934 static void netiucv_purge_skb_queue(struct sk_buff_head *q)
936 struct sk_buff *skb;
938 while ((skb = skb_dequeue(q))) {
939 atomic_dec(&skb->users);
940 dev_kfree_skb_any(skb);
944 static void conn_action_stop(fsm_instance *fi, int event, void *arg)
946 struct iucv_event *ev = arg;
947 struct iucv_connection *conn = ev->conn;
948 struct net_device *netdev = conn->netdev;
949 struct netiucv_priv *privptr = netdev_priv(netdev);
951 IUCV_DBF_TEXT(trace, 3, __func__);
953 fsm_deltimer(&conn->timer);
954 fsm_newstate(fi, CONN_STATE_STOPPED);
955 netiucv_purge_skb_queue(&conn->collect_queue);
956 if (conn->path) {
957 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
958 iucv_path_sever(conn->path, iucvMagic);
959 kfree(conn->path);
960 conn->path = NULL;
962 netiucv_purge_skb_queue(&conn->commit_queue);
963 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
966 static void conn_action_inval(fsm_instance *fi, int event, void *arg)
968 struct iucv_connection *conn = arg;
969 struct net_device *netdev = conn->netdev;
971 PRINT_WARN("%s: Cannot connect without username\n", netdev->name);
972 IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
975 static const fsm_node conn_fsm[] = {
976 { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
977 { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
979 { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
980 { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
981 { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
982 { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
983 { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
984 { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
985 { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
987 { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
988 { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
989 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
990 { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
991 { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
993 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
994 { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
996 { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
997 { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
998 { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
1000 { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
1001 { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
1003 { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
1004 { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
1007 static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1011 * Actions for interface - statemachine.
1015 * dev_action_start
1016 * @fi: An instance of an interface statemachine.
1017 * @event: The event, just happened.
1018 * @arg: Generic pointer, casted from struct net_device * upon call.
1020 * Startup connection by sending CONN_EVENT_START to it.
1022 static void dev_action_start(fsm_instance *fi, int event, void *arg)
1024 struct net_device *dev = arg;
1025 struct netiucv_priv *privptr = netdev_priv(dev);
1027 IUCV_DBF_TEXT(trace, 3, __func__);
1029 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1030 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1034 * Shutdown connection by sending CONN_EVENT_STOP to it.
1036 * @param fi An instance of an interface statemachine.
1037 * @param event The event, just happened.
1038 * @param arg Generic pointer, casted from struct net_device * upon call.
1040 static void
1041 dev_action_stop(fsm_instance *fi, int event, void *arg)
1043 struct net_device *dev = arg;
1044 struct netiucv_priv *privptr = netdev_priv(dev);
1045 struct iucv_event ev;
1047 IUCV_DBF_TEXT(trace, 3, __func__);
1049 ev.conn = privptr->conn;
1051 fsm_newstate(fi, DEV_STATE_STOPWAIT);
1052 fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1056 * Called from connection statemachine
1057 * when a connection is up and running.
1059 * @param fi An instance of an interface statemachine.
1060 * @param event The event, just happened.
1061 * @param arg Generic pointer, casted from struct net_device * upon call.
1063 static void
1064 dev_action_connup(fsm_instance *fi, int event, void *arg)
1066 struct net_device *dev = arg;
1067 struct netiucv_priv *privptr = netdev_priv(dev);
1069 IUCV_DBF_TEXT(trace, 3, __func__);
1071 switch (fsm_getstate(fi)) {
1072 case DEV_STATE_STARTWAIT:
1073 fsm_newstate(fi, DEV_STATE_RUNNING);
1074 PRINT_INFO("%s: connected with remote side %s\n",
1075 dev->name, privptr->conn->userid);
1076 IUCV_DBF_TEXT(setup, 3,
1077 "connection is up and running\n");
1078 break;
1079 case DEV_STATE_STOPWAIT:
1080 PRINT_INFO(
1081 "%s: got connection UP event during shutdown!\n",
1082 dev->name);
1083 IUCV_DBF_TEXT(data, 2,
1084 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1085 break;
1090 * Called from connection statemachine
1091 * when a connection has been shutdown.
1093 * @param fi An instance of an interface statemachine.
1094 * @param event The event, just happened.
1095 * @param arg Generic pointer, casted from struct net_device * upon call.
1097 static void
1098 dev_action_conndown(fsm_instance *fi, int event, void *arg)
1100 IUCV_DBF_TEXT(trace, 3, __func__);
1102 switch (fsm_getstate(fi)) {
1103 case DEV_STATE_RUNNING:
1104 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1105 break;
1106 case DEV_STATE_STOPWAIT:
1107 fsm_newstate(fi, DEV_STATE_STOPPED);
1108 IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1109 break;
1113 static const fsm_node dev_fsm[] = {
1114 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
1116 { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
1117 { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
1119 { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
1120 { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
1122 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
1123 { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
1124 { DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop },
1127 static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1130 * Transmit a packet.
1131 * This is a helper function for netiucv_tx().
1133 * @param conn Connection to be used for sending.
1134 * @param skb Pointer to struct sk_buff of packet to send.
1135 * The linklevel header has already been set up
1136 * by netiucv_tx().
1138 * @return 0 on success, -ERRNO on failure. (Never fails.)
1140 static int netiucv_transmit_skb(struct iucv_connection *conn,
1141 struct sk_buff *skb)
1143 struct iucv_message msg;
1144 unsigned long saveflags;
1145 struct ll_header header;
1146 int rc;
1148 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1149 int l = skb->len + NETIUCV_HDRLEN;
1151 spin_lock_irqsave(&conn->collect_lock, saveflags);
1152 if (conn->collect_len + l >
1153 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1154 rc = -EBUSY;
1155 IUCV_DBF_TEXT(data, 2,
1156 "EBUSY from netiucv_transmit_skb\n");
1157 } else {
1158 atomic_inc(&skb->users);
1159 skb_queue_tail(&conn->collect_queue, skb);
1160 conn->collect_len += l;
1161 rc = 0;
1163 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1164 } else {
1165 struct sk_buff *nskb = skb;
1167 * Copy the skb to a new allocated skb in lowmem only if the
1168 * data is located above 2G in memory or tailroom is < 2.
1170 unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1171 NETIUCV_HDRLEN)) >> 31;
1172 int copied = 0;
1173 if (hi || (skb_tailroom(skb) < 2)) {
1174 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1175 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1176 if (!nskb) {
1177 PRINT_WARN("%s: Could not allocate tx_skb\n",
1178 conn->netdev->name);
1179 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1180 rc = -ENOMEM;
1181 return rc;
1182 } else {
1183 skb_reserve(nskb, NETIUCV_HDRLEN);
1184 memcpy(skb_put(nskb, skb->len),
1185 skb->data, skb->len);
1187 copied = 1;
1190 * skb now is below 2G and has enough room. Add headers.
1192 header.next = nskb->len + NETIUCV_HDRLEN;
1193 memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1194 header.next = 0;
1195 memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1197 fsm_newstate(conn->fsm, CONN_STATE_TX);
1198 conn->prof.send_stamp = current_kernel_time();
1200 msg.tag = 1;
1201 msg.class = 0;
1202 rc = iucv_message_send(conn->path, &msg, 0, 0,
1203 nskb->data, nskb->len);
1204 conn->prof.doios_single++;
1205 conn->prof.txlen += skb->len;
1206 conn->prof.tx_pending++;
1207 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1208 conn->prof.tx_max_pending = conn->prof.tx_pending;
1209 if (rc) {
1210 struct netiucv_priv *privptr;
1211 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1212 conn->prof.tx_pending--;
1213 privptr = netdev_priv(conn->netdev);
1214 if (privptr)
1215 privptr->stats.tx_errors++;
1216 if (copied)
1217 dev_kfree_skb(nskb);
1218 else {
1220 * Remove our headers. They get added
1221 * again on retransmit.
1223 skb_pull(skb, NETIUCV_HDRLEN);
1224 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1226 PRINT_WARN("iucv_send returned %08x\n", rc);
1227 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1228 } else {
1229 if (copied)
1230 dev_kfree_skb(skb);
1231 atomic_inc(&nskb->users);
1232 skb_queue_tail(&conn->commit_queue, nskb);
1236 return rc;
1240 * Interface API for upper network layers
1244 * Open an interface.
1245 * Called from generic network layer when ifconfig up is run.
1247 * @param dev Pointer to interface struct.
1249 * @return 0 on success, -ERRNO on failure. (Never fails.)
1251 static int netiucv_open(struct net_device *dev)
1253 struct netiucv_priv *priv = netdev_priv(dev);
1255 fsm_event(priv->fsm, DEV_EVENT_START, dev);
1256 return 0;
1260 * Close an interface.
1261 * Called from generic network layer when ifconfig down is run.
1263 * @param dev Pointer to interface struct.
1265 * @return 0 on success, -ERRNO on failure. (Never fails.)
1267 static int netiucv_close(struct net_device *dev)
1269 struct netiucv_priv *priv = netdev_priv(dev);
1271 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1272 return 0;
1276 * Start transmission of a packet.
1277 * Called from generic network device layer.
1279 * @param skb Pointer to buffer containing the packet.
1280 * @param dev Pointer to interface struct.
1282 * @return 0 if packet consumed, !0 if packet rejected.
1283 * Note: If we return !0, then the packet is free'd by
1284 * the generic network layer.
1286 static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1288 struct netiucv_priv *privptr = netdev_priv(dev);
1289 int rc;
1291 IUCV_DBF_TEXT(trace, 4, __func__);
1293 * Some sanity checks ...
1295 if (skb == NULL) {
1296 PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
1297 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1298 privptr->stats.tx_dropped++;
1299 return 0;
1301 if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1302 PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
1303 dev->name, NETIUCV_HDRLEN);
1304 IUCV_DBF_TEXT(data, 2,
1305 "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1306 dev_kfree_skb(skb);
1307 privptr->stats.tx_dropped++;
1308 return 0;
1312 * If connection is not running, try to restart it
1313 * and throw away packet.
1315 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1316 dev_kfree_skb(skb);
1317 privptr->stats.tx_dropped++;
1318 privptr->stats.tx_errors++;
1319 privptr->stats.tx_carrier_errors++;
1320 return 0;
1323 if (netiucv_test_and_set_busy(dev)) {
1324 IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1325 return -EBUSY;
1327 dev->trans_start = jiffies;
1328 rc = netiucv_transmit_skb(privptr->conn, skb) != 0;
1329 netiucv_clear_busy(dev);
1330 return rc;
1334 * netiucv_stats
1335 * @dev: Pointer to interface struct.
1337 * Returns interface statistics of a device.
1339 * Returns pointer to stats struct of this interface.
1341 static struct net_device_stats *netiucv_stats (struct net_device * dev)
1343 struct netiucv_priv *priv = netdev_priv(dev);
1345 IUCV_DBF_TEXT(trace, 5, __func__);
1346 return &priv->stats;
1350 * netiucv_change_mtu
1351 * @dev: Pointer to interface struct.
1352 * @new_mtu: The new MTU to use for this interface.
1354 * Sets MTU of an interface.
1356 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1357 * (valid range is 576 .. NETIUCV_MTU_MAX).
1359 static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1361 IUCV_DBF_TEXT(trace, 3, __func__);
1362 if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1363 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1364 return -EINVAL;
1366 dev->mtu = new_mtu;
1367 return 0;
1371 * attributes in sysfs
1374 static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1375 char *buf)
1377 struct netiucv_priv *priv = dev->driver_data;
1379 IUCV_DBF_TEXT(trace, 5, __func__);
1380 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1383 static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1384 const char *buf, size_t count)
1386 struct netiucv_priv *priv = dev->driver_data;
1387 struct net_device *ndev = priv->conn->netdev;
1388 char *p;
1389 char *tmp;
1390 char username[9];
1391 int i;
1392 struct iucv_connection *cp;
1394 IUCV_DBF_TEXT(trace, 3, __func__);
1395 if (count > 9) {
1396 PRINT_WARN("netiucv: username too long (%d)!\n", (int) count);
1397 IUCV_DBF_TEXT_(setup, 2,
1398 "%d is length of username\n", (int) count);
1399 return -EINVAL;
1402 tmp = strsep((char **) &buf, "\n");
1403 for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
1404 if (isalnum(*p) || (*p == '$')) {
1405 username[i]= toupper(*p);
1406 continue;
1408 if (*p == '\n') {
1409 /* trailing lf, grr */
1410 break;
1412 PRINT_WARN("netiucv: Invalid char %c in username!\n", *p);
1413 IUCV_DBF_TEXT_(setup, 2,
1414 "username: invalid character %c\n", *p);
1415 return -EINVAL;
1417 while (i < 8)
1418 username[i++] = ' ';
1419 username[8] = '\0';
1421 if (memcmp(username, priv->conn->userid, 9) &&
1422 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1423 /* username changed while the interface is active. */
1424 PRINT_WARN("netiucv: device %s active, connected to %s\n",
1425 dev->bus_id, priv->conn->userid);
1426 PRINT_WARN("netiucv: user cannot be updated\n");
1427 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1428 return -EBUSY;
1430 read_lock_bh(&iucv_connection_rwlock);
1431 list_for_each_entry(cp, &iucv_connection_list, list) {
1432 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
1433 read_unlock_bh(&iucv_connection_rwlock);
1434 PRINT_WARN("netiucv: Connection to %s already "
1435 "exists\n", username);
1436 return -EEXIST;
1439 read_unlock_bh(&iucv_connection_rwlock);
1440 memcpy(priv->conn->userid, username, 9);
1441 return count;
1444 static DEVICE_ATTR(user, 0644, user_show, user_write);
1446 static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1447 char *buf)
1448 { struct netiucv_priv *priv = dev->driver_data;
1450 IUCV_DBF_TEXT(trace, 5, __func__);
1451 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1454 static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1455 const char *buf, size_t count)
1457 struct netiucv_priv *priv = dev->driver_data;
1458 struct net_device *ndev = priv->conn->netdev;
1459 char *e;
1460 int bs1;
1462 IUCV_DBF_TEXT(trace, 3, __func__);
1463 if (count >= 39)
1464 return -EINVAL;
1466 bs1 = simple_strtoul(buf, &e, 0);
1468 if (e && (!isspace(*e))) {
1469 PRINT_WARN("netiucv: Invalid character in buffer!\n");
1470 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
1471 return -EINVAL;
1473 if (bs1 > NETIUCV_BUFSIZE_MAX) {
1474 PRINT_WARN("netiucv: Given buffer size %d too large.\n",
1475 bs1);
1476 IUCV_DBF_TEXT_(setup, 2,
1477 "buffer_write: buffer size %d too large\n",
1478 bs1);
1479 return -EINVAL;
1481 if ((ndev->flags & IFF_RUNNING) &&
1482 (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1483 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1484 bs1);
1485 IUCV_DBF_TEXT_(setup, 2,
1486 "buffer_write: buffer size %d too small\n",
1487 bs1);
1488 return -EINVAL;
1490 if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1491 PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1492 bs1);
1493 IUCV_DBF_TEXT_(setup, 2,
1494 "buffer_write: buffer size %d too small\n",
1495 bs1);
1496 return -EINVAL;
1499 priv->conn->max_buffsize = bs1;
1500 if (!(ndev->flags & IFF_RUNNING))
1501 ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1503 return count;
1507 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1509 static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1510 char *buf)
1512 struct netiucv_priv *priv = dev->driver_data;
1514 IUCV_DBF_TEXT(trace, 5, __func__);
1515 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1518 static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1520 static ssize_t conn_fsm_show (struct device *dev,
1521 struct device_attribute *attr, char *buf)
1523 struct netiucv_priv *priv = dev->driver_data;
1525 IUCV_DBF_TEXT(trace, 5, __func__);
1526 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1529 static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1531 static ssize_t maxmulti_show (struct device *dev,
1532 struct device_attribute *attr, char *buf)
1534 struct netiucv_priv *priv = dev->driver_data;
1536 IUCV_DBF_TEXT(trace, 5, __func__);
1537 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1540 static ssize_t maxmulti_write (struct device *dev,
1541 struct device_attribute *attr,
1542 const char *buf, size_t count)
1544 struct netiucv_priv *priv = dev->driver_data;
1546 IUCV_DBF_TEXT(trace, 4, __func__);
1547 priv->conn->prof.maxmulti = 0;
1548 return count;
1551 static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1553 static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1554 char *buf)
1556 struct netiucv_priv *priv = dev->driver_data;
1558 IUCV_DBF_TEXT(trace, 5, __func__);
1559 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1562 static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1563 const char *buf, size_t count)
1565 struct netiucv_priv *priv = dev->driver_data;
1567 IUCV_DBF_TEXT(trace, 4, __func__);
1568 priv->conn->prof.maxcqueue = 0;
1569 return count;
1572 static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1574 static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1575 char *buf)
1577 struct netiucv_priv *priv = dev->driver_data;
1579 IUCV_DBF_TEXT(trace, 5, __func__);
1580 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1583 static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1584 const char *buf, size_t count)
1586 struct netiucv_priv *priv = dev->driver_data;
1588 IUCV_DBF_TEXT(trace, 4, __func__);
1589 priv->conn->prof.doios_single = 0;
1590 return count;
1593 static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1595 static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1596 char *buf)
1598 struct netiucv_priv *priv = dev->driver_data;
1600 IUCV_DBF_TEXT(trace, 5, __func__);
1601 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1604 static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1605 const char *buf, size_t count)
1607 struct netiucv_priv *priv = dev->driver_data;
1609 IUCV_DBF_TEXT(trace, 5, __func__);
1610 priv->conn->prof.doios_multi = 0;
1611 return count;
1614 static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1616 static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1617 char *buf)
1619 struct netiucv_priv *priv = dev->driver_data;
1621 IUCV_DBF_TEXT(trace, 5, __func__);
1622 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1625 static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1626 const char *buf, size_t count)
1628 struct netiucv_priv *priv = dev->driver_data;
1630 IUCV_DBF_TEXT(trace, 4, __func__);
1631 priv->conn->prof.txlen = 0;
1632 return count;
1635 static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1637 static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1638 char *buf)
1640 struct netiucv_priv *priv = dev->driver_data;
1642 IUCV_DBF_TEXT(trace, 5, __func__);
1643 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1646 static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1647 const char *buf, size_t count)
1649 struct netiucv_priv *priv = dev->driver_data;
1651 IUCV_DBF_TEXT(trace, 4, __func__);
1652 priv->conn->prof.tx_time = 0;
1653 return count;
1656 static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1658 static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1659 char *buf)
1661 struct netiucv_priv *priv = dev->driver_data;
1663 IUCV_DBF_TEXT(trace, 5, __func__);
1664 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1667 static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1668 const char *buf, size_t count)
1670 struct netiucv_priv *priv = dev->driver_data;
1672 IUCV_DBF_TEXT(trace, 4, __func__);
1673 priv->conn->prof.tx_pending = 0;
1674 return count;
1677 static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1679 static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1680 char *buf)
1682 struct netiucv_priv *priv = dev->driver_data;
1684 IUCV_DBF_TEXT(trace, 5, __func__);
1685 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1688 static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1689 const char *buf, size_t count)
1691 struct netiucv_priv *priv = dev->driver_data;
1693 IUCV_DBF_TEXT(trace, 4, __func__);
1694 priv->conn->prof.tx_max_pending = 0;
1695 return count;
1698 static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1700 static struct attribute *netiucv_attrs[] = {
1701 &dev_attr_buffer.attr,
1702 &dev_attr_user.attr,
1703 NULL,
1706 static struct attribute_group netiucv_attr_group = {
1707 .attrs = netiucv_attrs,
1710 static struct attribute *netiucv_stat_attrs[] = {
1711 &dev_attr_device_fsm_state.attr,
1712 &dev_attr_connection_fsm_state.attr,
1713 &dev_attr_max_tx_buffer_used.attr,
1714 &dev_attr_max_chained_skbs.attr,
1715 &dev_attr_tx_single_write_ops.attr,
1716 &dev_attr_tx_multi_write_ops.attr,
1717 &dev_attr_netto_bytes.attr,
1718 &dev_attr_max_tx_io_time.attr,
1719 &dev_attr_tx_pending.attr,
1720 &dev_attr_tx_max_pending.attr,
1721 NULL,
1724 static struct attribute_group netiucv_stat_attr_group = {
1725 .name = "stats",
1726 .attrs = netiucv_stat_attrs,
1729 static int netiucv_add_files(struct device *dev)
1731 int ret;
1733 IUCV_DBF_TEXT(trace, 3, __func__);
1734 ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
1735 if (ret)
1736 return ret;
1737 ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
1738 if (ret)
1739 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1740 return ret;
1743 static void netiucv_remove_files(struct device *dev)
1745 IUCV_DBF_TEXT(trace, 3, __func__);
1746 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1747 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1750 static int netiucv_register_device(struct net_device *ndev)
1752 struct netiucv_priv *priv = netdev_priv(ndev);
1753 struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1754 int ret;
1757 IUCV_DBF_TEXT(trace, 3, __func__);
1759 if (dev) {
1760 snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
1761 dev->bus = &iucv_bus;
1762 dev->parent = iucv_root;
1764 * The release function could be called after the
1765 * module has been unloaded. It's _only_ task is to
1766 * free the struct. Therefore, we specify kfree()
1767 * directly here. (Probably a little bit obfuscating
1768 * but legitime ...).
1770 dev->release = (void (*)(struct device *))kfree;
1771 dev->driver = &netiucv_driver;
1772 } else
1773 return -ENOMEM;
1775 ret = device_register(dev);
1777 if (ret)
1778 return ret;
1779 ret = netiucv_add_files(dev);
1780 if (ret)
1781 goto out_unreg;
1782 priv->dev = dev;
1783 dev->driver_data = priv;
1784 return 0;
1786 out_unreg:
1787 device_unregister(dev);
1788 return ret;
1791 static void netiucv_unregister_device(struct device *dev)
1793 IUCV_DBF_TEXT(trace, 3, __func__);
1794 netiucv_remove_files(dev);
1795 device_unregister(dev);
1799 * Allocate and initialize a new connection structure.
1800 * Add it to the list of netiucv connections;
1802 static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1803 char *username)
1805 struct iucv_connection *conn;
1807 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1808 if (!conn)
1809 goto out;
1810 skb_queue_head_init(&conn->collect_queue);
1811 skb_queue_head_init(&conn->commit_queue);
1812 spin_lock_init(&conn->collect_lock);
1813 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1814 conn->netdev = dev;
1816 conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1817 if (!conn->rx_buff)
1818 goto out_conn;
1819 conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1820 if (!conn->tx_buff)
1821 goto out_rx;
1822 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1823 conn_event_names, NR_CONN_STATES,
1824 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1825 GFP_KERNEL);
1826 if (!conn->fsm)
1827 goto out_tx;
1829 fsm_settimer(conn->fsm, &conn->timer);
1830 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1832 if (username) {
1833 memcpy(conn->userid, username, 9);
1834 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1837 write_lock_bh(&iucv_connection_rwlock);
1838 list_add_tail(&conn->list, &iucv_connection_list);
1839 write_unlock_bh(&iucv_connection_rwlock);
1840 return conn;
1842 out_tx:
1843 kfree_skb(conn->tx_buff);
1844 out_rx:
1845 kfree_skb(conn->rx_buff);
1846 out_conn:
1847 kfree(conn);
1848 out:
1849 return NULL;
1853 * Release a connection structure and remove it from the
1854 * list of netiucv connections.
1856 static void netiucv_remove_connection(struct iucv_connection *conn)
1858 IUCV_DBF_TEXT(trace, 3, __func__);
1859 write_lock_bh(&iucv_connection_rwlock);
1860 list_del_init(&conn->list);
1861 write_unlock_bh(&iucv_connection_rwlock);
1862 fsm_deltimer(&conn->timer);
1863 netiucv_purge_skb_queue(&conn->collect_queue);
1864 if (conn->path) {
1865 iucv_path_sever(conn->path, iucvMagic);
1866 kfree(conn->path);
1867 conn->path = NULL;
1869 netiucv_purge_skb_queue(&conn->commit_queue);
1870 kfree_fsm(conn->fsm);
1871 kfree_skb(conn->rx_buff);
1872 kfree_skb(conn->tx_buff);
1876 * Release everything of a net device.
1878 static void netiucv_free_netdevice(struct net_device *dev)
1880 struct netiucv_priv *privptr = netdev_priv(dev);
1882 IUCV_DBF_TEXT(trace, 3, __func__);
1884 if (!dev)
1885 return;
1887 if (privptr) {
1888 if (privptr->conn)
1889 netiucv_remove_connection(privptr->conn);
1890 if (privptr->fsm)
1891 kfree_fsm(privptr->fsm);
1892 privptr->conn = NULL; privptr->fsm = NULL;
1893 /* privptr gets freed by free_netdev() */
1895 free_netdev(dev);
1899 * Initialize a net device. (Called from kernel in alloc_netdev())
1901 static void netiucv_setup_netdevice(struct net_device *dev)
1903 dev->mtu = NETIUCV_MTU_DEFAULT;
1904 dev->hard_start_xmit = netiucv_tx;
1905 dev->open = netiucv_open;
1906 dev->stop = netiucv_close;
1907 dev->get_stats = netiucv_stats;
1908 dev->change_mtu = netiucv_change_mtu;
1909 dev->destructor = netiucv_free_netdevice;
1910 dev->hard_header_len = NETIUCV_HDRLEN;
1911 dev->addr_len = 0;
1912 dev->type = ARPHRD_SLIP;
1913 dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
1914 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1918 * Allocate and initialize everything of a net device.
1920 static struct net_device *netiucv_init_netdevice(char *username)
1922 struct netiucv_priv *privptr;
1923 struct net_device *dev;
1925 dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1926 netiucv_setup_netdevice);
1927 if (!dev)
1928 return NULL;
1929 if (dev_alloc_name(dev, dev->name) < 0)
1930 goto out_netdev;
1932 privptr = netdev_priv(dev);
1933 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1934 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1935 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1936 if (!privptr->fsm)
1937 goto out_netdev;
1939 privptr->conn = netiucv_new_connection(dev, username);
1940 if (!privptr->conn) {
1941 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1942 goto out_fsm;
1944 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1945 return dev;
1947 out_fsm:
1948 kfree_fsm(privptr->fsm);
1949 out_netdev:
1950 free_netdev(dev);
1951 return NULL;
1954 static ssize_t conn_write(struct device_driver *drv,
1955 const char *buf, size_t count)
1957 const char *p;
1958 char username[9];
1959 int i, rc;
1960 struct net_device *dev;
1961 struct netiucv_priv *priv;
1962 struct iucv_connection *cp;
1964 IUCV_DBF_TEXT(trace, 3, __func__);
1965 if (count>9) {
1966 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
1967 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1968 return -EINVAL;
1971 for (i = 0, p = buf; i < 8 && *p; i++, p++) {
1972 if (isalnum(*p) || *p == '$') {
1973 username[i] = toupper(*p);
1974 continue;
1976 if (*p == '\n')
1977 /* trailing lf, grr */
1978 break;
1979 PRINT_WARN("netiucv: Invalid character in username!\n");
1980 IUCV_DBF_TEXT_(setup, 2,
1981 "conn_write: invalid character %c\n", *p);
1982 return -EINVAL;
1984 while (i < 8)
1985 username[i++] = ' ';
1986 username[8] = '\0';
1988 read_lock_bh(&iucv_connection_rwlock);
1989 list_for_each_entry(cp, &iucv_connection_list, list) {
1990 if (!strncmp(username, cp->userid, 9)) {
1991 read_unlock_bh(&iucv_connection_rwlock);
1992 PRINT_WARN("netiucv: Connection to %s already "
1993 "exists\n", username);
1994 return -EEXIST;
1997 read_unlock_bh(&iucv_connection_rwlock);
1999 dev = netiucv_init_netdevice(username);
2000 if (!dev) {
2001 PRINT_WARN("netiucv: Could not allocate network device "
2002 "structure for user '%s'\n",
2003 netiucv_printname(username));
2004 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2005 return -ENODEV;
2008 rc = netiucv_register_device(dev);
2009 if (rc) {
2010 IUCV_DBF_TEXT_(setup, 2,
2011 "ret %d from netiucv_register_device\n", rc);
2012 goto out_free_ndev;
2015 /* sysfs magic */
2016 priv = netdev_priv(dev);
2017 SET_NETDEV_DEV(dev, priv->dev);
2019 rc = register_netdev(dev);
2020 if (rc)
2021 goto out_unreg;
2023 PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
2025 return count;
2027 out_unreg:
2028 netiucv_unregister_device(priv->dev);
2029 out_free_ndev:
2030 PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
2031 IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
2032 netiucv_free_netdevice(dev);
2033 return rc;
2036 static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2038 static ssize_t remove_write (struct device_driver *drv,
2039 const char *buf, size_t count)
2041 struct iucv_connection *cp;
2042 struct net_device *ndev;
2043 struct netiucv_priv *priv;
2044 struct device *dev;
2045 char name[IFNAMSIZ];
2046 const char *p;
2047 int i;
2049 IUCV_DBF_TEXT(trace, 3, __func__);
2051 if (count >= IFNAMSIZ)
2052 count = IFNAMSIZ - 1;;
2054 for (i = 0, p = buf; i < count && *p; i++, p++) {
2055 if (*p == '\n' || *p == ' ')
2056 /* trailing lf, grr */
2057 break;
2058 name[i] = *p;
2060 name[i] = '\0';
2062 read_lock_bh(&iucv_connection_rwlock);
2063 list_for_each_entry(cp, &iucv_connection_list, list) {
2064 ndev = cp->netdev;
2065 priv = netdev_priv(ndev);
2066 dev = priv->dev;
2067 if (strncmp(name, ndev->name, count))
2068 continue;
2069 read_unlock_bh(&iucv_connection_rwlock);
2070 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2071 PRINT_WARN("netiucv: net device %s active with peer "
2072 "%s\n", ndev->name, priv->conn->userid);
2073 PRINT_WARN("netiucv: %s cannot be removed\n",
2074 ndev->name);
2075 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2076 return -EBUSY;
2078 unregister_netdev(ndev);
2079 netiucv_unregister_device(dev);
2080 return count;
2082 read_unlock_bh(&iucv_connection_rwlock);
2083 PRINT_WARN("netiucv: net device %s unknown\n", name);
2084 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2085 return -EINVAL;
2088 static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2090 static struct attribute * netiucv_drv_attrs[] = {
2091 &driver_attr_connection.attr,
2092 &driver_attr_remove.attr,
2093 NULL,
2096 static struct attribute_group netiucv_drv_attr_group = {
2097 .attrs = netiucv_drv_attrs,
2100 static struct attribute_group *netiucv_drv_attr_groups[] = {
2101 &netiucv_drv_attr_group,
2102 NULL,
2105 static void netiucv_banner(void)
2107 PRINT_INFO("NETIUCV driver initialized\n");
2110 static void __exit netiucv_exit(void)
2112 struct iucv_connection *cp;
2113 struct net_device *ndev;
2114 struct netiucv_priv *priv;
2115 struct device *dev;
2117 IUCV_DBF_TEXT(trace, 3, __func__);
2118 while (!list_empty(&iucv_connection_list)) {
2119 cp = list_entry(iucv_connection_list.next,
2120 struct iucv_connection, list);
2121 ndev = cp->netdev;
2122 priv = netdev_priv(ndev);
2123 dev = priv->dev;
2125 unregister_netdev(ndev);
2126 netiucv_unregister_device(dev);
2129 driver_unregister(&netiucv_driver);
2130 iucv_unregister(&netiucv_handler, 1);
2131 iucv_unregister_dbf_views();
2133 PRINT_INFO("NETIUCV driver unloaded\n");
2134 return;
2137 static int __init netiucv_init(void)
2139 int rc;
2141 rc = iucv_register_dbf_views();
2142 if (rc)
2143 goto out;
2144 rc = iucv_register(&netiucv_handler, 1);
2145 if (rc)
2146 goto out_dbf;
2147 IUCV_DBF_TEXT(trace, 3, __func__);
2148 netiucv_driver.groups = netiucv_drv_attr_groups;
2149 rc = driver_register(&netiucv_driver);
2150 if (rc) {
2151 PRINT_ERR("NETIUCV: failed to register driver.\n");
2152 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2153 goto out_iucv;
2156 netiucv_banner();
2157 return rc;
2159 out_iucv:
2160 iucv_unregister(&netiucv_handler, 1);
2161 out_dbf:
2162 iucv_unregister_dbf_views();
2163 out:
2164 return rc;
2167 module_init(netiucv_init);
2168 module_exit(netiucv_exit);
2169 MODULE_LICENSE("GPL");