USB: cp210x: call generic open last in open
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / s390 / net / claw.c
blobb41fae37d3afde703e225ec6f42ea59c94edf848
1 /*
2 * drivers/s390/net/claw.c
3 * ESCON CLAW network driver
5 * Linux for zSeries version
6 * Copyright IBM Corp. 2002, 2009
7 * Author(s) Original code written by:
8 * Kazuo Iimura <iimura@jp.ibm.com>
9 * Rewritten by
10 * Andy Richter <richtera@us.ibm.com>
11 * Marc Price <mwprice@us.ibm.com>
13 * sysfs parms:
14 * group x.x.rrrr,x.x.wwww
15 * read_buffer nnnnnnn
16 * write_buffer nnnnnn
17 * host_name aaaaaaaa
18 * adapter_name aaaaaaaa
19 * api_type aaaaaaaa
21 * eg.
22 * group 0.0.0200 0.0.0201
23 * read_buffer 25
24 * write_buffer 20
25 * host_name LINUX390
26 * adapter_name RS6K
27 * api_type TCPIP
29 * where
31 * The device id is decided by the order entries
32 * are added to the group the first is claw0 the second claw1
33 * up to CLAW_MAX_DEV
35 * rrrr - the first of 2 consecutive device addresses used for the
36 * CLAW protocol.
37 * The specified address is always used as the input (Read)
38 * channel and the next address is used as the output channel.
40 * wwww - the second of 2 consecutive device addresses used for
41 * the CLAW protocol.
42 * The specified address is always used as the output
43 * channel and the previous address is used as the input channel.
45 * read_buffer - specifies number of input buffers to allocate.
46 * write_buffer - specifies number of output buffers to allocate.
47 * host_name - host name
48 * adaptor_name - adaptor name
49 * api_type - API type TCPIP or API will be sent and expected
50 * as ws_name
52 * Note the following requirements:
53 * 1) host_name must match the configured adapter_name on the remote side
54 * 2) adaptor_name must match the configured host name on the remote side
56 * Change History
57 * 1.00 Initial release shipped
58 * 1.10 Changes for Buffer allocation
59 * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower
60 * 1.25 Added Packing support
61 * 1.5
64 #define KMSG_COMPONENT "claw"
66 #include <asm/ccwdev.h>
67 #include <asm/ccwgroup.h>
68 #include <asm/debug.h>
69 #include <asm/idals.h>
70 #include <asm/io.h>
71 #include <linux/bitops.h>
72 #include <linux/ctype.h>
73 #include <linux/delay.h>
74 #include <linux/errno.h>
75 #include <linux/if_arp.h>
76 #include <linux/init.h>
77 #include <linux/interrupt.h>
78 #include <linux/ip.h>
79 #include <linux/kernel.h>
80 #include <linux/module.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/proc_fs.h>
84 #include <linux/sched.h>
85 #include <linux/signal.h>
86 #include <linux/skbuff.h>
87 #include <linux/slab.h>
88 #include <linux/string.h>
89 #include <linux/tcp.h>
90 #include <linux/timer.h>
91 #include <linux/types.h>
93 #include "claw.h"
96 CLAW uses the s390dbf file system see claw_trace and claw_setup
99 static char version[] __initdata = "CLAW driver";
100 static char debug_buffer[255];
102 * Debug Facility Stuff
104 static debug_info_t *claw_dbf_setup;
105 static debug_info_t *claw_dbf_trace;
108 * CLAW Debug Facility functions
110 static void
111 claw_unregister_debug_facility(void)
113 if (claw_dbf_setup)
114 debug_unregister(claw_dbf_setup);
115 if (claw_dbf_trace)
116 debug_unregister(claw_dbf_trace);
119 static int
120 claw_register_debug_facility(void)
122 claw_dbf_setup = debug_register("claw_setup", 2, 1, 8);
123 claw_dbf_trace = debug_register("claw_trace", 2, 2, 8);
124 if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) {
125 claw_unregister_debug_facility();
126 return -ENOMEM;
128 debug_register_view(claw_dbf_setup, &debug_hex_ascii_view);
129 debug_set_level(claw_dbf_setup, 2);
130 debug_register_view(claw_dbf_trace, &debug_hex_ascii_view);
131 debug_set_level(claw_dbf_trace, 2);
132 return 0;
135 static inline void
136 claw_set_busy(struct net_device *dev)
138 ((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
139 eieio();
142 static inline void
143 claw_clear_busy(struct net_device *dev)
145 clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
146 netif_wake_queue(dev);
147 eieio();
150 static inline int
151 claw_check_busy(struct net_device *dev)
153 eieio();
154 return ((struct claw_privbk *) dev->ml_priv)->tbusy;
157 static inline void
158 claw_setbit_busy(int nr,struct net_device *dev)
160 netif_stop_queue(dev);
161 set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
164 static inline void
165 claw_clearbit_busy(int nr,struct net_device *dev)
167 clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
168 netif_wake_queue(dev);
171 static inline int
172 claw_test_and_setbit_busy(int nr,struct net_device *dev)
174 netif_stop_queue(dev);
175 return test_and_set_bit(nr,
176 (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy));
180 /* Functions for the DEV methods */
182 static int claw_probe(struct ccwgroup_device *cgdev);
183 static void claw_remove_device(struct ccwgroup_device *cgdev);
184 static void claw_purge_skb_queue(struct sk_buff_head *q);
185 static int claw_new_device(struct ccwgroup_device *cgdev);
186 static int claw_shutdown_device(struct ccwgroup_device *cgdev);
187 static int claw_tx(struct sk_buff *skb, struct net_device *dev);
188 static int claw_change_mtu( struct net_device *dev, int new_mtu);
189 static int claw_open(struct net_device *dev);
190 static void claw_irq_handler(struct ccw_device *cdev,
191 unsigned long intparm, struct irb *irb);
192 static void claw_irq_tasklet ( unsigned long data );
193 static int claw_release(struct net_device *dev);
194 static void claw_write_retry ( struct chbk * p_ch );
195 static void claw_write_next ( struct chbk * p_ch );
196 static void claw_timer ( struct chbk * p_ch );
198 /* Functions */
199 static int add_claw_reads(struct net_device *dev,
200 struct ccwbk* p_first, struct ccwbk* p_last);
201 static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
202 static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
203 static int find_link(struct net_device *dev, char *host_name, char *ws_name );
204 static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
205 static int init_ccw_bk(struct net_device *dev);
206 static void probe_error( struct ccwgroup_device *cgdev);
207 static struct net_device_stats *claw_stats(struct net_device *dev);
208 static int pages_to_order_of_mag(int num_of_pages);
209 static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
210 /* sysfs Functions */
211 static ssize_t claw_hname_show(struct device *dev,
212 struct device_attribute *attr, char *buf);
213 static ssize_t claw_hname_write(struct device *dev,
214 struct device_attribute *attr,
215 const char *buf, size_t count);
216 static ssize_t claw_adname_show(struct device *dev,
217 struct device_attribute *attr, char *buf);
218 static ssize_t claw_adname_write(struct device *dev,
219 struct device_attribute *attr,
220 const char *buf, size_t count);
221 static ssize_t claw_apname_show(struct device *dev,
222 struct device_attribute *attr, char *buf);
223 static ssize_t claw_apname_write(struct device *dev,
224 struct device_attribute *attr,
225 const char *buf, size_t count);
226 static ssize_t claw_wbuff_show(struct device *dev,
227 struct device_attribute *attr, char *buf);
228 static ssize_t claw_wbuff_write(struct device *dev,
229 struct device_attribute *attr,
230 const char *buf, size_t count);
231 static ssize_t claw_rbuff_show(struct device *dev,
232 struct device_attribute *attr, char *buf);
233 static ssize_t claw_rbuff_write(struct device *dev,
234 struct device_attribute *attr,
235 const char *buf, size_t count);
236 static int claw_add_files(struct device *dev);
237 static void claw_remove_files(struct device *dev);
239 /* Functions for System Validate */
240 static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
241 static int claw_send_control(struct net_device *dev, __u8 type, __u8 link,
242 __u8 correlator, __u8 rc , char *local_name, char *remote_name);
243 static int claw_snd_conn_req(struct net_device *dev, __u8 link);
244 static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl);
245 static int claw_snd_sys_validate_rsp(struct net_device *dev,
246 struct clawctl * p_ctl, __u32 return_code);
247 static int claw_strt_conn_req(struct net_device *dev );
248 static void claw_strt_read(struct net_device *dev, int lock);
249 static void claw_strt_out_IO(struct net_device *dev);
250 static void claw_free_wrt_buf(struct net_device *dev);
252 /* Functions for unpack reads */
253 static void unpack_read(struct net_device *dev);
255 static int claw_pm_prepare(struct ccwgroup_device *gdev)
257 return -EPERM;
260 /* the root device for claw group devices */
261 static struct device *claw_root_dev;
263 /* ccwgroup table */
265 static struct ccwgroup_driver claw_group_driver = {
266 .driver = {
267 .owner = THIS_MODULE,
268 .name = "claw",
270 .max_slaves = 2,
271 .driver_id = 0xC3D3C1E6,
272 .probe = claw_probe,
273 .remove = claw_remove_device,
274 .set_online = claw_new_device,
275 .set_offline = claw_shutdown_device,
276 .prepare = claw_pm_prepare,
279 static struct ccw_device_id claw_ids[] = {
280 {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
283 MODULE_DEVICE_TABLE(ccw, claw_ids);
285 static struct ccw_driver claw_ccw_driver = {
286 .driver = {
287 .owner = THIS_MODULE,
288 .name = "claw",
290 .ids = claw_ids,
291 .probe = ccwgroup_probe_ccwdev,
292 .remove = ccwgroup_remove_ccwdev,
293 .int_class = IOINT_CLW,
296 static ssize_t
297 claw_driver_group_store(struct device_driver *ddrv, const char *buf,
298 size_t count)
300 int err;
301 err = ccwgroup_create_from_string(claw_root_dev,
302 claw_group_driver.driver_id,
303 &claw_ccw_driver, 2, buf);
304 return err ? err : count;
307 static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
309 static struct attribute *claw_group_attrs[] = {
310 &driver_attr_group.attr,
311 NULL,
314 static struct attribute_group claw_group_attr_group = {
315 .attrs = claw_group_attrs,
318 static const struct attribute_group *claw_group_attr_groups[] = {
319 &claw_group_attr_group,
320 NULL,
324 * Key functions
327 /*----------------------------------------------------------------*
328 * claw_probe *
329 * this function is called for each CLAW device. *
330 *----------------------------------------------------------------*/
331 static int
332 claw_probe(struct ccwgroup_device *cgdev)
334 int rc;
335 struct claw_privbk *privptr=NULL;
337 CLAW_DBF_TEXT(2, setup, "probe");
338 if (!get_device(&cgdev->dev))
339 return -ENODEV;
340 privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
341 dev_set_drvdata(&cgdev->dev, privptr);
342 if (privptr == NULL) {
343 probe_error(cgdev);
344 put_device(&cgdev->dev);
345 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
346 return -ENOMEM;
348 privptr->p_mtc_envelope= kzalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
349 privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
350 if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) {
351 probe_error(cgdev);
352 put_device(&cgdev->dev);
353 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
354 return -ENOMEM;
356 memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8);
357 memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8);
358 memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8);
359 privptr->p_env->packing = 0;
360 privptr->p_env->write_buffers = 5;
361 privptr->p_env->read_buffers = 5;
362 privptr->p_env->read_size = CLAW_FRAME_SIZE;
363 privptr->p_env->write_size = CLAW_FRAME_SIZE;
364 rc = claw_add_files(&cgdev->dev);
365 if (rc) {
366 probe_error(cgdev);
367 put_device(&cgdev->dev);
368 dev_err(&cgdev->dev, "Creating the /proc files for a new"
369 " CLAW device failed\n");
370 CLAW_DBF_TEXT_(2, setup, "probex%d", rc);
371 return rc;
373 privptr->p_env->p_priv = privptr;
374 cgdev->cdev[0]->handler = claw_irq_handler;
375 cgdev->cdev[1]->handler = claw_irq_handler;
376 CLAW_DBF_TEXT(2, setup, "prbext 0");
378 return 0;
379 } /* end of claw_probe */
381 /*-------------------------------------------------------------------*
382 * claw_tx *
383 *-------------------------------------------------------------------*/
385 static int
386 claw_tx(struct sk_buff *skb, struct net_device *dev)
388 int rc;
389 struct claw_privbk *privptr = dev->ml_priv;
390 unsigned long saveflags;
391 struct chbk *p_ch;
393 CLAW_DBF_TEXT(4, trace, "claw_tx");
394 p_ch = &privptr->channel[WRITE_CHANNEL];
395 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
396 rc=claw_hw_tx( skb, dev, 1 );
397 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
398 CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc);
399 if (rc)
400 rc = NETDEV_TX_BUSY;
401 else
402 rc = NETDEV_TX_OK;
403 return rc;
404 } /* end of claw_tx */
406 /*------------------------------------------------------------------*
407 * pack the collect queue into an skb and return it *
408 * If not packing just return the top skb from the queue *
409 *------------------------------------------------------------------*/
411 static struct sk_buff *
412 claw_pack_skb(struct claw_privbk *privptr)
414 struct sk_buff *new_skb,*held_skb;
415 struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
416 struct claw_env *p_env = privptr->p_env;
417 int pkt_cnt,pk_ind,so_far;
419 new_skb = NULL; /* assume no dice */
420 pkt_cnt = 0;
421 CLAW_DBF_TEXT(4, trace, "PackSKBe");
422 if (!skb_queue_empty(&p_ch->collect_queue)) {
423 /* some data */
424 held_skb = skb_dequeue(&p_ch->collect_queue);
425 if (held_skb)
426 dev_kfree_skb_any(held_skb);
427 else
428 return NULL;
429 if (p_env->packing != DO_PACKED)
430 return held_skb;
431 /* get a new SKB we will pack at least one */
432 new_skb = dev_alloc_skb(p_env->write_size);
433 if (new_skb == NULL) {
434 atomic_inc(&held_skb->users);
435 skb_queue_head(&p_ch->collect_queue,held_skb);
436 return NULL;
438 /* we have packed packet and a place to put it */
439 pk_ind = 1;
440 so_far = 0;
441 new_skb->cb[1] = 'P'; /* every skb on queue has pack header */
442 while ((pk_ind) && (held_skb != NULL)) {
443 if (held_skb->len+so_far <= p_env->write_size-8) {
444 memcpy(skb_put(new_skb,held_skb->len),
445 held_skb->data,held_skb->len);
446 privptr->stats.tx_packets++;
447 so_far += held_skb->len;
448 pkt_cnt++;
449 dev_kfree_skb_any(held_skb);
450 held_skb = skb_dequeue(&p_ch->collect_queue);
451 if (held_skb)
452 atomic_dec(&held_skb->users);
453 } else {
454 pk_ind = 0;
455 atomic_inc(&held_skb->users);
456 skb_queue_head(&p_ch->collect_queue,held_skb);
460 CLAW_DBF_TEXT(4, trace, "PackSKBx");
461 return new_skb;
464 /*-------------------------------------------------------------------*
465 * claw_change_mtu *
467 *-------------------------------------------------------------------*/
469 static int
470 claw_change_mtu(struct net_device *dev, int new_mtu)
472 struct claw_privbk *privptr = dev->ml_priv;
473 int buff_size;
474 CLAW_DBF_TEXT(4, trace, "setmtu");
475 buff_size = privptr->p_env->write_size;
476 if ((new_mtu < 60) || (new_mtu > buff_size)) {
477 return -EINVAL;
479 dev->mtu = new_mtu;
480 return 0;
481 } /* end of claw_change_mtu */
484 /*-------------------------------------------------------------------*
485 * claw_open *
487 *-------------------------------------------------------------------*/
488 static int
489 claw_open(struct net_device *dev)
492 int rc;
493 int i;
494 unsigned long saveflags=0;
495 unsigned long parm;
496 struct claw_privbk *privptr;
497 DECLARE_WAITQUEUE(wait, current);
498 struct timer_list timer;
499 struct ccwbk *p_buf;
501 CLAW_DBF_TEXT(4, trace, "open");
502 privptr = (struct claw_privbk *)dev->ml_priv;
503 /* allocate and initialize CCW blocks */
504 if (privptr->buffs_alloc == 0) {
505 rc=init_ccw_bk(dev);
506 if (rc) {
507 CLAW_DBF_TEXT(2, trace, "openmem");
508 return -ENOMEM;
511 privptr->system_validate_comp=0;
512 privptr->release_pend=0;
513 if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
514 privptr->p_env->read_size=DEF_PACK_BUFSIZE;
515 privptr->p_env->write_size=DEF_PACK_BUFSIZE;
516 privptr->p_env->packing=PACKING_ASK;
517 } else {
518 privptr->p_env->packing=0;
519 privptr->p_env->read_size=CLAW_FRAME_SIZE;
520 privptr->p_env->write_size=CLAW_FRAME_SIZE;
522 claw_set_busy(dev);
523 tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
524 (unsigned long) &privptr->channel[READ_CHANNEL]);
525 for ( i = 0; i < 2; i++) {
526 CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
527 init_waitqueue_head(&privptr->channel[i].wait);
528 /* skb_queue_head_init(&p_ch->io_queue); */
529 if (i == WRITE_CHANNEL)
530 skb_queue_head_init(
531 &privptr->channel[WRITE_CHANNEL].collect_queue);
532 privptr->channel[i].flag_a = 0;
533 privptr->channel[i].IO_active = 0;
534 privptr->channel[i].flag &= ~CLAW_TIMER;
535 init_timer(&timer);
536 timer.function = (void *)claw_timer;
537 timer.data = (unsigned long)(&privptr->channel[i]);
538 timer.expires = jiffies + 15*HZ;
539 add_timer(&timer);
540 spin_lock_irqsave(get_ccwdev_lock(
541 privptr->channel[i].cdev), saveflags);
542 parm = (unsigned long) &privptr->channel[i];
543 privptr->channel[i].claw_state = CLAW_START_HALT_IO;
544 rc = 0;
545 add_wait_queue(&privptr->channel[i].wait, &wait);
546 rc = ccw_device_halt(
547 (struct ccw_device *)privptr->channel[i].cdev,parm);
548 set_current_state(TASK_INTERRUPTIBLE);
549 spin_unlock_irqrestore(
550 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
551 schedule();
552 set_current_state(TASK_RUNNING);
553 remove_wait_queue(&privptr->channel[i].wait, &wait);
554 if(rc != 0)
555 ccw_check_return_code(privptr->channel[i].cdev, rc);
556 if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
557 del_timer(&timer);
559 if ((((privptr->channel[READ_CHANNEL].last_dstat |
560 privptr->channel[WRITE_CHANNEL].last_dstat) &
561 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
562 (((privptr->channel[READ_CHANNEL].flag |
563 privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
564 dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
565 "%s: remote side is not ready\n", dev->name);
566 CLAW_DBF_TEXT(2, trace, "notrdy");
568 for ( i = 0; i < 2; i++) {
569 spin_lock_irqsave(
570 get_ccwdev_lock(privptr->channel[i].cdev),
571 saveflags);
572 parm = (unsigned long) &privptr->channel[i];
573 privptr->channel[i].claw_state = CLAW_STOP;
574 rc = ccw_device_halt(
575 (struct ccw_device *)&privptr->channel[i].cdev,
576 parm);
577 spin_unlock_irqrestore(
578 get_ccwdev_lock(privptr->channel[i].cdev),
579 saveflags);
580 if (rc != 0) {
581 ccw_check_return_code(
582 privptr->channel[i].cdev, rc);
585 free_pages((unsigned long)privptr->p_buff_ccw,
586 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
587 if (privptr->p_env->read_size < PAGE_SIZE) {
588 free_pages((unsigned long)privptr->p_buff_read,
589 (int)pages_to_order_of_mag(
590 privptr->p_buff_read_num));
592 else {
593 p_buf=privptr->p_read_active_first;
594 while (p_buf!=NULL) {
595 free_pages((unsigned long)p_buf->p_buffer,
596 (int)pages_to_order_of_mag(
597 privptr->p_buff_pages_perread ));
598 p_buf=p_buf->next;
601 if (privptr->p_env->write_size < PAGE_SIZE ) {
602 free_pages((unsigned long)privptr->p_buff_write,
603 (int)pages_to_order_of_mag(
604 privptr->p_buff_write_num));
606 else {
607 p_buf=privptr->p_write_active_first;
608 while (p_buf!=NULL) {
609 free_pages((unsigned long)p_buf->p_buffer,
610 (int)pages_to_order_of_mag(
611 privptr->p_buff_pages_perwrite ));
612 p_buf=p_buf->next;
615 privptr->buffs_alloc = 0;
616 privptr->channel[READ_CHANNEL].flag = 0x00;
617 privptr->channel[WRITE_CHANNEL].flag = 0x00;
618 privptr->p_buff_ccw=NULL;
619 privptr->p_buff_read=NULL;
620 privptr->p_buff_write=NULL;
621 claw_clear_busy(dev);
622 CLAW_DBF_TEXT(2, trace, "open EIO");
623 return -EIO;
626 /* Send SystemValidate command */
628 claw_clear_busy(dev);
629 CLAW_DBF_TEXT(4, trace, "openok");
630 return 0;
631 } /* end of claw_open */
633 /*-------------------------------------------------------------------*
635 * claw_irq_handler *
637 *--------------------------------------------------------------------*/
638 static void
639 claw_irq_handler(struct ccw_device *cdev,
640 unsigned long intparm, struct irb *irb)
642 struct chbk *p_ch = NULL;
643 struct claw_privbk *privptr = NULL;
644 struct net_device *dev = NULL;
645 struct claw_env *p_env;
646 struct chbk *p_ch_r=NULL;
648 CLAW_DBF_TEXT(4, trace, "clawirq");
649 /* Bypass all 'unsolicited interrupts' */
650 privptr = dev_get_drvdata(&cdev->dev);
651 if (!privptr) {
652 dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
653 " IRQ, c-%02x d-%02x\n",
654 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
655 CLAW_DBF_TEXT(2, trace, "badirq");
656 return;
659 /* Try to extract channel from driver data. */
660 if (privptr->channel[READ_CHANNEL].cdev == cdev)
661 p_ch = &privptr->channel[READ_CHANNEL];
662 else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
663 p_ch = &privptr->channel[WRITE_CHANNEL];
664 else {
665 dev_warn(&cdev->dev, "The device is not a CLAW device\n");
666 CLAW_DBF_TEXT(2, trace, "badchan");
667 return;
669 CLAW_DBF_TEXT_(4, trace, "IRQCH=%d", p_ch->flag);
671 dev = (struct net_device *) (p_ch->ndev);
672 p_env=privptr->p_env;
674 /* Copy interruption response block. */
675 memcpy(p_ch->irb, irb, sizeof(struct irb));
677 /* Check for good subchannel return code, otherwise info message */
678 if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
679 dev_info(&cdev->dev,
680 "%s: subchannel check for device: %04x -"
681 " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
682 dev->name, p_ch->devno,
683 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
684 irb->scsw.cmd.cpa);
685 CLAW_DBF_TEXT(2, trace, "chanchk");
686 /* return; */
689 /* Check the reason-code of a unit check */
690 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
691 ccw_check_unit_check(p_ch, irb->ecw[0]);
693 /* State machine to bring the connection up, down and to restart */
694 p_ch->last_dstat = irb->scsw.cmd.dstat;
696 switch (p_ch->claw_state) {
697 case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
698 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
699 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
700 (p_ch->irb->scsw.cmd.stctl ==
701 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))))
702 return;
703 wake_up(&p_ch->wait); /* wake up claw_release */
704 CLAW_DBF_TEXT(4, trace, "stop");
705 return;
706 case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */
707 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
708 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
709 (p_ch->irb->scsw.cmd.stctl ==
710 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
711 CLAW_DBF_TEXT(4, trace, "haltio");
712 return;
714 if (p_ch->flag == CLAW_READ) {
715 p_ch->claw_state = CLAW_START_READ;
716 wake_up(&p_ch->wait); /* wake claw_open (READ)*/
717 } else if (p_ch->flag == CLAW_WRITE) {
718 p_ch->claw_state = CLAW_START_WRITE;
719 /* send SYSTEM_VALIDATE */
720 claw_strt_read(dev, LOCK_NO);
721 claw_send_control(dev,
722 SYSTEM_VALIDATE_REQUEST,
723 0, 0, 0,
724 p_env->host_name,
725 p_env->adapter_name);
726 } else {
727 dev_warn(&cdev->dev, "The CLAW device received"
728 " an unexpected IRQ, "
729 "c-%02x d-%02x\n",
730 irb->scsw.cmd.cstat,
731 irb->scsw.cmd.dstat);
732 return;
734 CLAW_DBF_TEXT(4, trace, "haltio");
735 return;
736 case CLAW_START_READ:
737 CLAW_DBF_TEXT(4, trace, "ReadIRQ");
738 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
739 clear_bit(0, (void *)&p_ch->IO_active);
740 if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
741 (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
742 (p_ch->irb->ecw[0]) == 0) {
743 privptr->stats.rx_errors++;
744 dev_info(&cdev->dev,
745 "%s: Restart is required after remote "
746 "side recovers \n",
747 dev->name);
749 CLAW_DBF_TEXT(4, trace, "notrdy");
750 return;
752 if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) &&
753 (p_ch->irb->scsw.cmd.dstat == 0)) {
754 if (test_and_set_bit(CLAW_BH_ACTIVE,
755 (void *)&p_ch->flag_a) == 0)
756 tasklet_schedule(&p_ch->tasklet);
757 else
758 CLAW_DBF_TEXT(4, trace, "PCINoBH");
759 CLAW_DBF_TEXT(4, trace, "PCI_read");
760 return;
762 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
763 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
764 (p_ch->irb->scsw.cmd.stctl ==
765 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
766 CLAW_DBF_TEXT(4, trace, "SPend_rd");
767 return;
769 clear_bit(0, (void *)&p_ch->IO_active);
770 claw_clearbit_busy(TB_RETRY, dev);
771 if (test_and_set_bit(CLAW_BH_ACTIVE,
772 (void *)&p_ch->flag_a) == 0)
773 tasklet_schedule(&p_ch->tasklet);
774 else
775 CLAW_DBF_TEXT(4, trace, "RdBHAct");
776 CLAW_DBF_TEXT(4, trace, "RdIRQXit");
777 return;
778 case CLAW_START_WRITE:
779 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
780 dev_info(&cdev->dev,
781 "%s: Unit Check Occurred in "
782 "write channel\n", dev->name);
783 clear_bit(0, (void *)&p_ch->IO_active);
784 if (p_ch->irb->ecw[0] & 0x80) {
785 dev_info(&cdev->dev,
786 "%s: Resetting Event "
787 "occurred:\n", dev->name);
788 init_timer(&p_ch->timer);
789 p_ch->timer.function =
790 (void *)claw_write_retry;
791 p_ch->timer.data = (unsigned long)p_ch;
792 p_ch->timer.expires = jiffies + 10*HZ;
793 add_timer(&p_ch->timer);
794 dev_info(&cdev->dev,
795 "%s: write connection "
796 "restarting\n", dev->name);
798 CLAW_DBF_TEXT(4, trace, "rstrtwrt");
799 return;
801 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
802 clear_bit(0, (void *)&p_ch->IO_active);
803 dev_info(&cdev->dev,
804 "%s: Unit Exception "
805 "occurred in write channel\n",
806 dev->name);
808 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
809 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
810 (p_ch->irb->scsw.cmd.stctl ==
811 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
812 CLAW_DBF_TEXT(4, trace, "writeUE");
813 return;
815 clear_bit(0, (void *)&p_ch->IO_active);
816 if (claw_test_and_setbit_busy(TB_TX, dev) == 0) {
817 claw_write_next(p_ch);
818 claw_clearbit_busy(TB_TX, dev);
819 claw_clear_busy(dev);
821 p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
822 if (test_and_set_bit(CLAW_BH_ACTIVE,
823 (void *)&p_ch_r->flag_a) == 0)
824 tasklet_schedule(&p_ch_r->tasklet);
825 CLAW_DBF_TEXT(4, trace, "StWtExit");
826 return;
827 default:
828 dev_warn(&cdev->dev,
829 "The CLAW device for %s received an unexpected IRQ\n",
830 dev->name);
831 CLAW_DBF_TEXT(2, trace, "badIRQ");
832 return;
835 } /* end of claw_irq_handler */
838 /*-------------------------------------------------------------------*
839 * claw_irq_tasklet *
841 *--------------------------------------------------------------------*/
842 static void
843 claw_irq_tasklet ( unsigned long data )
845 struct chbk * p_ch;
846 struct net_device *dev;
848 p_ch = (struct chbk *) data;
849 dev = (struct net_device *)p_ch->ndev;
850 CLAW_DBF_TEXT(4, trace, "IRQtask");
851 unpack_read(dev);
852 clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
853 CLAW_DBF_TEXT(4, trace, "TskletXt");
854 return;
855 } /* end of claw_irq_bh */
857 /*-------------------------------------------------------------------*
858 * claw_release *
860 *--------------------------------------------------------------------*/
861 static int
862 claw_release(struct net_device *dev)
864 int rc;
865 int i;
866 unsigned long saveflags;
867 unsigned long parm;
868 struct claw_privbk *privptr;
869 DECLARE_WAITQUEUE(wait, current);
870 struct ccwbk* p_this_ccw;
871 struct ccwbk* p_buf;
873 if (!dev)
874 return 0;
875 privptr = (struct claw_privbk *)dev->ml_priv;
876 if (!privptr)
877 return 0;
878 CLAW_DBF_TEXT(4, trace, "release");
879 privptr->release_pend=1;
880 claw_setbit_busy(TB_STOP,dev);
881 for ( i = 1; i >=0 ; i--) {
882 spin_lock_irqsave(
883 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
884 /* del_timer(&privptr->channel[READ_CHANNEL].timer); */
885 privptr->channel[i].claw_state = CLAW_STOP;
886 privptr->channel[i].IO_active = 0;
887 parm = (unsigned long) &privptr->channel[i];
888 if (i == WRITE_CHANNEL)
889 claw_purge_skb_queue(
890 &privptr->channel[WRITE_CHANNEL].collect_queue);
891 rc = ccw_device_halt (privptr->channel[i].cdev, parm);
892 if (privptr->system_validate_comp==0x00) /* never opened? */
893 init_waitqueue_head(&privptr->channel[i].wait);
894 add_wait_queue(&privptr->channel[i].wait, &wait);
895 set_current_state(TASK_INTERRUPTIBLE);
896 spin_unlock_irqrestore(
897 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
898 schedule();
899 set_current_state(TASK_RUNNING);
900 remove_wait_queue(&privptr->channel[i].wait, &wait);
901 if (rc != 0) {
902 ccw_check_return_code(privptr->channel[i].cdev, rc);
905 if (privptr->pk_skb != NULL) {
906 dev_kfree_skb_any(privptr->pk_skb);
907 privptr->pk_skb = NULL;
909 if(privptr->buffs_alloc != 1) {
910 CLAW_DBF_TEXT(4, trace, "none2fre");
911 return 0;
913 CLAW_DBF_TEXT(4, trace, "freebufs");
914 if (privptr->p_buff_ccw != NULL) {
915 free_pages((unsigned long)privptr->p_buff_ccw,
916 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
918 CLAW_DBF_TEXT(4, trace, "freeread");
919 if (privptr->p_env->read_size < PAGE_SIZE) {
920 if (privptr->p_buff_read != NULL) {
921 free_pages((unsigned long)privptr->p_buff_read,
922 (int)pages_to_order_of_mag(privptr->p_buff_read_num));
925 else {
926 p_buf=privptr->p_read_active_first;
927 while (p_buf!=NULL) {
928 free_pages((unsigned long)p_buf->p_buffer,
929 (int)pages_to_order_of_mag(
930 privptr->p_buff_pages_perread ));
931 p_buf=p_buf->next;
934 CLAW_DBF_TEXT(4, trace, "freewrit");
935 if (privptr->p_env->write_size < PAGE_SIZE ) {
936 free_pages((unsigned long)privptr->p_buff_write,
937 (int)pages_to_order_of_mag(privptr->p_buff_write_num));
939 else {
940 p_buf=privptr->p_write_active_first;
941 while (p_buf!=NULL) {
942 free_pages((unsigned long)p_buf->p_buffer,
943 (int)pages_to_order_of_mag(
944 privptr->p_buff_pages_perwrite ));
945 p_buf=p_buf->next;
948 CLAW_DBF_TEXT(4, trace, "clearptr");
949 privptr->buffs_alloc = 0;
950 privptr->p_buff_ccw=NULL;
951 privptr->p_buff_read=NULL;
952 privptr->p_buff_write=NULL;
953 privptr->system_validate_comp=0;
954 privptr->release_pend=0;
955 /* Remove any writes that were pending and reset all reads */
956 p_this_ccw=privptr->p_read_active_first;
957 while (p_this_ccw!=NULL) {
958 p_this_ccw->header.length=0xffff;
959 p_this_ccw->header.opcode=0xff;
960 p_this_ccw->header.flag=0x00;
961 p_this_ccw=p_this_ccw->next;
964 while (privptr->p_write_active_first!=NULL) {
965 p_this_ccw=privptr->p_write_active_first;
966 p_this_ccw->header.flag=CLAW_PENDING;
967 privptr->p_write_active_first=p_this_ccw->next;
968 p_this_ccw->next=privptr->p_write_free_chain;
969 privptr->p_write_free_chain=p_this_ccw;
970 ++privptr->write_free_count;
972 privptr->p_write_active_last=NULL;
973 privptr->mtc_logical_link = -1;
974 privptr->mtc_skipping = 1;
975 privptr->mtc_offset=0;
977 if (((privptr->channel[READ_CHANNEL].last_dstat |
978 privptr->channel[WRITE_CHANNEL].last_dstat) &
979 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
980 dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
981 "Deactivating %s completed with incorrect"
982 " subchannel status "
983 "(read %02x, write %02x)\n",
984 dev->name,
985 privptr->channel[READ_CHANNEL].last_dstat,
986 privptr->channel[WRITE_CHANNEL].last_dstat);
987 CLAW_DBF_TEXT(2, trace, "badclose");
989 CLAW_DBF_TEXT(4, trace, "rlsexit");
990 return 0;
991 } /* end of claw_release */
993 /*-------------------------------------------------------------------*
994 * claw_write_retry *
996 *--------------------------------------------------------------------*/
998 static void
999 claw_write_retry ( struct chbk *p_ch )
1002 struct net_device *dev=p_ch->ndev;
1004 CLAW_DBF_TEXT(4, trace, "w_retry");
1005 if (p_ch->claw_state == CLAW_STOP) {
1006 return;
1008 claw_strt_out_IO( dev );
1009 CLAW_DBF_TEXT(4, trace, "rtry_xit");
1010 return;
1011 } /* end of claw_write_retry */
1014 /*-------------------------------------------------------------------*
1015 * claw_write_next *
1017 *--------------------------------------------------------------------*/
1019 static void
1020 claw_write_next ( struct chbk * p_ch )
1023 struct net_device *dev;
1024 struct claw_privbk *privptr=NULL;
1025 struct sk_buff *pk_skb;
1027 CLAW_DBF_TEXT(4, trace, "claw_wrt");
1028 if (p_ch->claw_state == CLAW_STOP)
1029 return;
1030 dev = (struct net_device *) p_ch->ndev;
1031 privptr = (struct claw_privbk *) dev->ml_priv;
1032 claw_free_wrt_buf( dev );
1033 if ((privptr->write_free_count > 0) &&
1034 !skb_queue_empty(&p_ch->collect_queue)) {
1035 pk_skb = claw_pack_skb(privptr);
1036 while (pk_skb != NULL) {
1037 claw_hw_tx(pk_skb, dev, 1);
1038 if (privptr->write_free_count > 0) {
1039 pk_skb = claw_pack_skb(privptr);
1040 } else
1041 pk_skb = NULL;
1044 if (privptr->p_write_active_first!=NULL) {
1045 claw_strt_out_IO(dev);
1047 return;
1048 } /* end of claw_write_next */
1050 /*-------------------------------------------------------------------*
1052 * claw_timer *
1053 *--------------------------------------------------------------------*/
1055 static void
1056 claw_timer ( struct chbk * p_ch )
1058 CLAW_DBF_TEXT(4, trace, "timer");
1059 p_ch->flag |= CLAW_TIMER;
1060 wake_up(&p_ch->wait);
1061 return;
1062 } /* end of claw_timer */
1066 * functions
1070 /*-------------------------------------------------------------------*
1072 * pages_to_order_of_mag *
1074 * takes a number of pages from 1 to 512 and returns the *
1075 * log(num_pages)/log(2) get_free_pages() needs a base 2 order *
1076 * of magnitude get_free_pages() has an upper order of 9 *
1077 *--------------------------------------------------------------------*/
1079 static int
1080 pages_to_order_of_mag(int num_of_pages)
1082 int order_of_mag=1; /* assume 2 pages */
1083 int nump;
1085 CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages);
1086 if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */
1087 /* 512 pages = 2Meg on 4k page systems */
1088 if (num_of_pages >= 512) {return 9; }
1089 /* we have two or more pages order is at least 1 */
1090 for (nump=2 ;nump <= 512;nump*=2) {
1091 if (num_of_pages <= nump)
1092 break;
1093 order_of_mag +=1;
1095 if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */
1096 CLAW_DBF_TEXT_(5, trace, "mag%d", order_of_mag);
1097 return order_of_mag;
1100 /*-------------------------------------------------------------------*
1102 * add_claw_reads *
1104 *--------------------------------------------------------------------*/
1105 static int
1106 add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1107 struct ccwbk* p_last)
1109 struct claw_privbk *privptr;
1110 struct ccw1 temp_ccw;
1111 struct endccw * p_end;
1112 CLAW_DBF_TEXT(4, trace, "addreads");
1113 privptr = dev->ml_priv;
1114 p_end = privptr->p_end_ccw;
1116 /* first CCW and last CCW contains a new set of read channel programs
1117 * to apend the running channel programs
1119 if ( p_first==NULL) {
1120 CLAW_DBF_TEXT(4, trace, "addexit");
1121 return 0;
1124 /* set up ending CCW sequence for this segment */
1125 if (p_end->read1) {
1126 p_end->read1=0x00; /* second ending CCW is now active */
1127 /* reset ending CCWs and setup TIC CCWs */
1128 p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1129 p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1130 p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1);
1131 p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1);
1132 p_end->read2_nop2.cda=0;
1133 p_end->read2_nop2.count=1;
1135 else {
1136 p_end->read1=0x01; /* first ending CCW is now active */
1137 /* reset ending CCWs and setup TIC CCWs */
1138 p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1139 p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1140 p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1);
1141 p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1);
1142 p_end->read1_nop2.cda=0;
1143 p_end->read1_nop2.count=1;
1146 if ( privptr-> p_read_active_first ==NULL ) {
1147 privptr->p_read_active_first = p_first; /* set new first */
1148 privptr->p_read_active_last = p_last; /* set new last */
1150 else {
1152 /* set up TIC ccw */
1153 temp_ccw.cda= (__u32)__pa(&p_first->read);
1154 temp_ccw.count=0;
1155 temp_ccw.flags=0;
1156 temp_ccw.cmd_code = CCW_CLAW_CMD_TIC;
1159 if (p_end->read1) {
1161 /* first set of CCW's is chained to the new read */
1162 /* chain, so the second set is chained to the active chain. */
1163 /* Therefore modify the second set to point to the new */
1164 /* read chain set up TIC CCWs */
1165 /* make sure we update the CCW so channel doesn't fetch it */
1166 /* when it's only half done */
1167 memcpy( &p_end->read2_nop2, &temp_ccw ,
1168 sizeof(struct ccw1));
1169 privptr->p_read_active_last->r_TIC_1.cda=
1170 (__u32)__pa(&p_first->read);
1171 privptr->p_read_active_last->r_TIC_2.cda=
1172 (__u32)__pa(&p_first->read);
1174 else {
1175 /* make sure we update the CCW so channel doesn't */
1176 /* fetch it when it is only half done */
1177 memcpy( &p_end->read1_nop2, &temp_ccw ,
1178 sizeof(struct ccw1));
1179 privptr->p_read_active_last->r_TIC_1.cda=
1180 (__u32)__pa(&p_first->read);
1181 privptr->p_read_active_last->r_TIC_2.cda=
1182 (__u32)__pa(&p_first->read);
1184 /* chain in new set of blocks */
1185 privptr->p_read_active_last->next = p_first;
1186 privptr->p_read_active_last=p_last;
1187 } /* end of if ( privptr-> p_read_active_first ==NULL) */
1188 CLAW_DBF_TEXT(4, trace, "addexit");
1189 return 0;
1190 } /* end of add_claw_reads */
1192 /*-------------------------------------------------------------------*
1193 * ccw_check_return_code *
1195 *-------------------------------------------------------------------*/
1197 static void
1198 ccw_check_return_code(struct ccw_device *cdev, int return_code)
1200 CLAW_DBF_TEXT(4, trace, "ccwret");
1201 if (return_code != 0) {
1202 switch (return_code) {
1203 case -EBUSY: /* BUSY is a transient state no action needed */
1204 break;
1205 case -ENODEV:
1206 dev_err(&cdev->dev, "The remote channel adapter is not"
1207 " available\n");
1208 break;
1209 case -EINVAL:
1210 dev_err(&cdev->dev,
1211 "The status of the remote channel adapter"
1212 " is not valid\n");
1213 break;
1214 default:
1215 dev_err(&cdev->dev, "The common device layer"
1216 " returned error code %d\n",
1217 return_code);
1220 CLAW_DBF_TEXT(4, trace, "ccwret");
1221 } /* end of ccw_check_return_code */
1223 /*-------------------------------------------------------------------*
1224 * ccw_check_unit_check *
1225 *--------------------------------------------------------------------*/
1227 static void
1228 ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1230 struct net_device *ndev = p_ch->ndev;
1231 struct device *dev = &p_ch->cdev->dev;
1233 CLAW_DBF_TEXT(4, trace, "unitchek");
1234 dev_warn(dev, "The communication peer of %s disconnected\n",
1235 ndev->name);
1237 if (sense & 0x40) {
1238 if (sense & 0x01) {
1239 dev_warn(dev, "The remote channel adapter for"
1240 " %s has been reset\n",
1241 ndev->name);
1243 } else if (sense & 0x20) {
1244 if (sense & 0x04) {
1245 dev_warn(dev, "A data streaming timeout occurred"
1246 " for %s\n",
1247 ndev->name);
1248 } else if (sense & 0x10) {
1249 dev_warn(dev, "The remote channel adapter for %s"
1250 " is faulty\n",
1251 ndev->name);
1252 } else {
1253 dev_warn(dev, "A data transfer parity error occurred"
1254 " for %s\n",
1255 ndev->name);
1257 } else if (sense & 0x10) {
1258 dev_warn(dev, "A read data parity error occurred"
1259 " for %s\n",
1260 ndev->name);
1263 } /* end of ccw_check_unit_check */
1265 /*-------------------------------------------------------------------*
1266 * find_link *
1267 *--------------------------------------------------------------------*/
1268 static int
1269 find_link(struct net_device *dev, char *host_name, char *ws_name )
1271 struct claw_privbk *privptr;
1272 struct claw_env *p_env;
1273 int rc=0;
1275 CLAW_DBF_TEXT(2, setup, "findlink");
1276 privptr = dev->ml_priv;
1277 p_env=privptr->p_env;
1278 switch (p_env->packing)
1280 case PACKING_ASK:
1281 if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) ||
1282 (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 ))
1283 rc = EINVAL;
1284 break;
1285 case DO_PACKED:
1286 case PACK_SEND:
1287 if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) ||
1288 (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 ))
1289 rc = EINVAL;
1290 break;
1291 default:
1292 if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) ||
1293 (memcmp(p_env->api_type , ws_name, 8)!=0))
1294 rc = EINVAL;
1295 break;
1298 return rc;
1299 } /* end of find_link */
1301 /*-------------------------------------------------------------------*
1302 * claw_hw_tx *
1305 *-------------------------------------------------------------------*/
1307 static int
1308 claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1310 int rc=0;
1311 struct claw_privbk *privptr;
1312 struct ccwbk *p_this_ccw;
1313 struct ccwbk *p_first_ccw;
1314 struct ccwbk *p_last_ccw;
1315 __u32 numBuffers;
1316 signed long len_of_data;
1317 unsigned long bytesInThisBuffer;
1318 unsigned char *pDataAddress;
1319 struct endccw *pEnd;
1320 struct ccw1 tempCCW;
1321 struct claw_env *p_env;
1322 struct clawph *pk_head;
1323 struct chbk *ch;
1325 CLAW_DBF_TEXT(4, trace, "hw_tx");
1326 privptr = (struct claw_privbk *)(dev->ml_priv);
1327 p_env =privptr->p_env;
1328 claw_free_wrt_buf(dev); /* Clean up free chain if posible */
1329 /* scan the write queue to free any completed write packets */
1330 p_first_ccw=NULL;
1331 p_last_ccw=NULL;
1332 if ((p_env->packing >= PACK_SEND) &&
1333 (skb->cb[1] != 'P')) {
1334 skb_push(skb,sizeof(struct clawph));
1335 pk_head=(struct clawph *)skb->data;
1336 pk_head->len=skb->len-sizeof(struct clawph);
1337 if (pk_head->len%4) {
1338 pk_head->len+= 4-(pk_head->len%4);
1339 skb_pad(skb,4-(pk_head->len%4));
1340 skb_put(skb,4-(pk_head->len%4));
1342 if (p_env->packing == DO_PACKED)
1343 pk_head->link_num = linkid;
1344 else
1345 pk_head->link_num = 0;
1346 pk_head->flag = 0x00;
1347 skb_pad(skb,4);
1348 skb->cb[1] = 'P';
1350 if (linkid == 0) {
1351 if (claw_check_busy(dev)) {
1352 if (privptr->write_free_count!=0) {
1353 claw_clear_busy(dev);
1355 else {
1356 claw_strt_out_IO(dev );
1357 claw_free_wrt_buf( dev );
1358 if (privptr->write_free_count==0) {
1359 ch = &privptr->channel[WRITE_CHANNEL];
1360 atomic_inc(&skb->users);
1361 skb_queue_tail(&ch->collect_queue, skb);
1362 goto Done;
1364 else {
1365 claw_clear_busy(dev);
1369 /* tx lock */
1370 if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
1371 ch = &privptr->channel[WRITE_CHANNEL];
1372 atomic_inc(&skb->users);
1373 skb_queue_tail(&ch->collect_queue, skb);
1374 claw_strt_out_IO(dev );
1375 rc=-EBUSY;
1376 goto Done2;
1379 /* See how many write buffers are required to hold this data */
1380 numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size);
1382 /* If that number of buffers isn't available, give up for now */
1383 if (privptr->write_free_count < numBuffers ||
1384 privptr->p_write_free_chain == NULL ) {
1386 claw_setbit_busy(TB_NOBUFFER,dev);
1387 ch = &privptr->channel[WRITE_CHANNEL];
1388 atomic_inc(&skb->users);
1389 skb_queue_tail(&ch->collect_queue, skb);
1390 CLAW_DBF_TEXT(2, trace, "clawbusy");
1391 goto Done2;
1393 pDataAddress=skb->data;
1394 len_of_data=skb->len;
1396 while (len_of_data > 0) {
1397 p_this_ccw=privptr->p_write_free_chain; /* get a block */
1398 if (p_this_ccw == NULL) { /* lost the race */
1399 ch = &privptr->channel[WRITE_CHANNEL];
1400 atomic_inc(&skb->users);
1401 skb_queue_tail(&ch->collect_queue, skb);
1402 goto Done2;
1404 privptr->p_write_free_chain=p_this_ccw->next;
1405 p_this_ccw->next=NULL;
1406 --privptr->write_free_count; /* -1 */
1407 if (len_of_data >= privptr->p_env->write_size)
1408 bytesInThisBuffer = privptr->p_env->write_size;
1409 else
1410 bytesInThisBuffer = len_of_data;
1411 memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer);
1412 len_of_data-=bytesInThisBuffer;
1413 pDataAddress+=(unsigned long)bytesInThisBuffer;
1414 /* setup write CCW */
1415 p_this_ccw->write.cmd_code = (linkid * 8) +1;
1416 if (len_of_data>0) {
1417 p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG;
1419 p_this_ccw->write.count=bytesInThisBuffer;
1420 /* now add to end of this chain */
1421 if (p_first_ccw==NULL) {
1422 p_first_ccw=p_this_ccw;
1424 if (p_last_ccw!=NULL) {
1425 p_last_ccw->next=p_this_ccw;
1426 /* set up TIC ccws */
1427 p_last_ccw->w_TIC_1.cda=
1428 (__u32)__pa(&p_this_ccw->write);
1430 p_last_ccw=p_this_ccw; /* save new last block */
1433 /* FirstCCW and LastCCW now contain a new set of write channel
1434 * programs to append to the running channel program
1437 if (p_first_ccw!=NULL) {
1438 /* setup ending ccw sequence for this segment */
1439 pEnd=privptr->p_end_ccw;
1440 if (pEnd->write1) {
1441 pEnd->write1=0x00; /* second end ccw is now active */
1442 /* set up Tic CCWs */
1443 p_last_ccw->w_TIC_1.cda=
1444 (__u32)__pa(&pEnd->write2_nop1);
1445 pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1446 pEnd->write2_nop2.flags =
1447 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1448 pEnd->write2_nop2.cda=0;
1449 pEnd->write2_nop2.count=1;
1451 else { /* end of if (pEnd->write1)*/
1452 pEnd->write1=0x01; /* first end ccw is now active */
1453 /* set up Tic CCWs */
1454 p_last_ccw->w_TIC_1.cda=
1455 (__u32)__pa(&pEnd->write1_nop1);
1456 pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1457 pEnd->write1_nop2.flags =
1458 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1459 pEnd->write1_nop2.cda=0;
1460 pEnd->write1_nop2.count=1;
1461 } /* end if if (pEnd->write1) */
1463 if (privptr->p_write_active_first==NULL ) {
1464 privptr->p_write_active_first=p_first_ccw;
1465 privptr->p_write_active_last=p_last_ccw;
1467 else {
1468 /* set up Tic CCWs */
1470 tempCCW.cda=(__u32)__pa(&p_first_ccw->write);
1471 tempCCW.count=0;
1472 tempCCW.flags=0;
1473 tempCCW.cmd_code=CCW_CLAW_CMD_TIC;
1475 if (pEnd->write1) {
1478 * first set of ending CCW's is chained to the new write
1479 * chain, so the second set is chained to the active chain
1480 * Therefore modify the second set to point the new write chain.
1481 * make sure we update the CCW atomically
1482 * so channel does not fetch it when it's only half done
1484 memcpy( &pEnd->write2_nop2, &tempCCW ,
1485 sizeof(struct ccw1));
1486 privptr->p_write_active_last->w_TIC_1.cda=
1487 (__u32)__pa(&p_first_ccw->write);
1489 else {
1491 /*make sure we update the CCW atomically
1492 *so channel does not fetch it when it's only half done
1494 memcpy(&pEnd->write1_nop2, &tempCCW ,
1495 sizeof(struct ccw1));
1496 privptr->p_write_active_last->w_TIC_1.cda=
1497 (__u32)__pa(&p_first_ccw->write);
1499 } /* end if if (pEnd->write1) */
1501 privptr->p_write_active_last->next=p_first_ccw;
1502 privptr->p_write_active_last=p_last_ccw;
1505 } /* endif (p_first_ccw!=NULL) */
1506 dev_kfree_skb_any(skb);
1507 claw_strt_out_IO(dev );
1508 /* if write free count is zero , set NOBUFFER */
1509 if (privptr->write_free_count==0) {
1510 claw_setbit_busy(TB_NOBUFFER,dev);
1512 Done2:
1513 claw_clearbit_busy(TB_TX,dev);
1514 Done:
1515 return(rc);
1516 } /* end of claw_hw_tx */
1518 /*-------------------------------------------------------------------*
1520 * init_ccw_bk *
1522 *--------------------------------------------------------------------*/
1524 static int
1525 init_ccw_bk(struct net_device *dev)
1528 __u32 ccw_blocks_required;
1529 __u32 ccw_blocks_perpage;
1530 __u32 ccw_pages_required;
1531 __u32 claw_reads_perpage=1;
1532 __u32 claw_read_pages;
1533 __u32 claw_writes_perpage=1;
1534 __u32 claw_write_pages;
1535 void *p_buff=NULL;
1536 struct ccwbk*p_free_chain;
1537 struct ccwbk*p_buf;
1538 struct ccwbk*p_last_CCWB;
1539 struct ccwbk*p_first_CCWB;
1540 struct endccw *p_endccw=NULL;
1541 addr_t real_address;
1542 struct claw_privbk *privptr = dev->ml_priv;
1543 struct clawh *pClawH=NULL;
1544 addr_t real_TIC_address;
1545 int i,j;
1546 CLAW_DBF_TEXT(4, trace, "init_ccw");
1548 /* initialize statistics field */
1549 privptr->active_link_ID=0;
1550 /* initialize ccwbk pointers */
1551 privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/
1552 privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/
1553 privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/
1554 privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/
1555 privptr->p_read_active_last=NULL; /* pointer to the last read ccw */
1556 privptr->p_end_ccw=NULL; /* pointer to ending ccw */
1557 privptr->p_claw_signal_blk=NULL; /* pointer to signal block */
1558 privptr->buffs_alloc = 0;
1559 memset(&privptr->end_ccw, 0x00, sizeof(struct endccw));
1560 memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl));
1561 /* initialize free write ccwbk counter */
1562 privptr->write_free_count=0; /* number of free bufs on write chain */
1563 p_last_CCWB = NULL;
1564 p_first_CCWB= NULL;
1566 * We need 1 CCW block for each read buffer, 1 for each
1567 * write buffer, plus 1 for ClawSignalBlock
1569 ccw_blocks_required =
1570 privptr->p_env->read_buffers+privptr->p_env->write_buffers+1;
1572 * compute number of CCW blocks that will fit in a page
1574 ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE;
1575 ccw_pages_required=
1576 DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage);
1579 * read and write sizes are set by 2 constants in claw.h
1580 * 4k and 32k. Unpacked values other than 4k are not going to
1581 * provide good performance. With packing buffers support 32k
1582 * buffers are used.
1584 if (privptr->p_env->read_size < PAGE_SIZE) {
1585 claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size;
1586 claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers,
1587 claw_reads_perpage);
1589 else { /* > or equal */
1590 privptr->p_buff_pages_perread =
1591 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1592 claw_read_pages = privptr->p_env->read_buffers *
1593 privptr->p_buff_pages_perread;
1595 if (privptr->p_env->write_size < PAGE_SIZE) {
1596 claw_writes_perpage =
1597 PAGE_SIZE / privptr->p_env->write_size;
1598 claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers,
1599 claw_writes_perpage);
1602 else { /* > or equal */
1603 privptr->p_buff_pages_perwrite =
1604 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1605 claw_write_pages = privptr->p_env->write_buffers *
1606 privptr->p_buff_pages_perwrite;
1609 * allocate ccw_pages_required
1611 if (privptr->p_buff_ccw==NULL) {
1612 privptr->p_buff_ccw=
1613 (void *)__get_free_pages(__GFP_DMA,
1614 (int)pages_to_order_of_mag(ccw_pages_required ));
1615 if (privptr->p_buff_ccw==NULL) {
1616 return -ENOMEM;
1618 privptr->p_buff_ccw_num=ccw_pages_required;
1620 memset(privptr->p_buff_ccw, 0x00,
1621 privptr->p_buff_ccw_num * PAGE_SIZE);
1624 * obtain ending ccw block address
1627 privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw;
1628 real_address = (__u32)__pa(privptr->p_end_ccw);
1629 /* Initialize ending CCW block */
1630 p_endccw=privptr->p_end_ccw;
1631 p_endccw->real=real_address;
1632 p_endccw->write1=0x00;
1633 p_endccw->read1=0x00;
1635 /* write1_nop1 */
1636 p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1637 p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1638 p_endccw->write1_nop1.count = 1;
1639 p_endccw->write1_nop1.cda = 0;
1641 /* write1_nop2 */
1642 p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1643 p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1644 p_endccw->write1_nop2.count = 1;
1645 p_endccw->write1_nop2.cda = 0;
1647 /* write2_nop1 */
1648 p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1649 p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1650 p_endccw->write2_nop1.count = 1;
1651 p_endccw->write2_nop1.cda = 0;
1653 /* write2_nop2 */
1654 p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1655 p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1656 p_endccw->write2_nop2.count = 1;
1657 p_endccw->write2_nop2.cda = 0;
1659 /* read1_nop1 */
1660 p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1661 p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1662 p_endccw->read1_nop1.count = 1;
1663 p_endccw->read1_nop1.cda = 0;
1665 /* read1_nop2 */
1666 p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1667 p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1668 p_endccw->read1_nop2.count = 1;
1669 p_endccw->read1_nop2.cda = 0;
1671 /* read2_nop1 */
1672 p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1673 p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1674 p_endccw->read2_nop1.count = 1;
1675 p_endccw->read2_nop1.cda = 0;
1677 /* read2_nop2 */
1678 p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1679 p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1680 p_endccw->read2_nop2.count = 1;
1681 p_endccw->read2_nop2.cda = 0;
1684 * Build a chain of CCWs
1687 p_buff=privptr->p_buff_ccw;
1689 p_free_chain=NULL;
1690 for (i=0 ; i < ccw_pages_required; i++ ) {
1691 real_address = (__u32)__pa(p_buff);
1692 p_buf=p_buff;
1693 for (j=0 ; j < ccw_blocks_perpage ; j++) {
1694 p_buf->next = p_free_chain;
1695 p_free_chain = p_buf;
1696 p_buf->real=(__u32)__pa(p_buf);
1697 ++p_buf;
1699 p_buff+=PAGE_SIZE;
1702 * Initialize ClawSignalBlock
1705 if (privptr->p_claw_signal_blk==NULL) {
1706 privptr->p_claw_signal_blk=p_free_chain;
1707 p_free_chain=p_free_chain->next;
1708 pClawH=(struct clawh *)privptr->p_claw_signal_blk;
1709 pClawH->length=0xffff;
1710 pClawH->opcode=0xff;
1711 pClawH->flag=CLAW_BUSY;
1715 * allocate write_pages_required and add to free chain
1717 if (privptr->p_buff_write==NULL) {
1718 if (privptr->p_env->write_size < PAGE_SIZE) {
1719 privptr->p_buff_write=
1720 (void *)__get_free_pages(__GFP_DMA,
1721 (int)pages_to_order_of_mag(claw_write_pages ));
1722 if (privptr->p_buff_write==NULL) {
1723 privptr->p_buff_ccw=NULL;
1724 return -ENOMEM;
1727 * Build CLAW write free chain
1731 memset(privptr->p_buff_write, 0x00,
1732 ccw_pages_required * PAGE_SIZE);
1733 privptr->p_write_free_chain=NULL;
1735 p_buff=privptr->p_buff_write;
1737 for (i=0 ; i< privptr->p_env->write_buffers ; i++) {
1738 p_buf = p_free_chain; /* get a CCW */
1739 p_free_chain = p_buf->next;
1740 p_buf->next =privptr->p_write_free_chain;
1741 privptr->p_write_free_chain = p_buf;
1742 p_buf-> p_buffer = (struct clawbuf *)p_buff;
1743 p_buf-> write.cda = (__u32)__pa(p_buff);
1744 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1745 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1746 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1747 p_buf-> w_read_FF.count = 1;
1748 p_buf-> w_read_FF.cda =
1749 (__u32)__pa(&p_buf-> header.flag);
1750 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1751 p_buf-> w_TIC_1.flags = 0;
1752 p_buf-> w_TIC_1.count = 0;
1754 if (((unsigned long)p_buff +
1755 privptr->p_env->write_size) >=
1756 ((unsigned long)(p_buff+2*
1757 (privptr->p_env->write_size) - 1) & PAGE_MASK)) {
1758 p_buff = p_buff+privptr->p_env->write_size;
1762 else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */
1764 privptr->p_write_free_chain=NULL;
1765 for (i = 0; i< privptr->p_env->write_buffers ; i++) {
1766 p_buff=(void *)__get_free_pages(__GFP_DMA,
1767 (int)pages_to_order_of_mag(
1768 privptr->p_buff_pages_perwrite) );
1769 if (p_buff==NULL) {
1770 free_pages((unsigned long)privptr->p_buff_ccw,
1771 (int)pages_to_order_of_mag(
1772 privptr->p_buff_ccw_num));
1773 privptr->p_buff_ccw=NULL;
1774 p_buf=privptr->p_buff_write;
1775 while (p_buf!=NULL) {
1776 free_pages((unsigned long)
1777 p_buf->p_buffer,
1778 (int)pages_to_order_of_mag(
1779 privptr->p_buff_pages_perwrite));
1780 p_buf=p_buf->next;
1782 return -ENOMEM;
1783 } /* Error on get_pages */
1784 memset(p_buff, 0x00, privptr->p_env->write_size );
1785 p_buf = p_free_chain;
1786 p_free_chain = p_buf->next;
1787 p_buf->next = privptr->p_write_free_chain;
1788 privptr->p_write_free_chain = p_buf;
1789 privptr->p_buff_write = p_buf;
1790 p_buf->p_buffer=(struct clawbuf *)p_buff;
1791 p_buf-> write.cda = (__u32)__pa(p_buff);
1792 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1793 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1794 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1795 p_buf-> w_read_FF.count = 1;
1796 p_buf-> w_read_FF.cda =
1797 (__u32)__pa(&p_buf-> header.flag);
1798 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1799 p_buf-> w_TIC_1.flags = 0;
1800 p_buf-> w_TIC_1.count = 0;
1801 } /* for all write_buffers */
1803 } /* else buffers are PAGE_SIZE or bigger */
1806 privptr->p_buff_write_num=claw_write_pages;
1807 privptr->write_free_count=privptr->p_env->write_buffers;
1811 * allocate read_pages_required and chain to free chain
1813 if (privptr->p_buff_read==NULL) {
1814 if (privptr->p_env->read_size < PAGE_SIZE) {
1815 privptr->p_buff_read=
1816 (void *)__get_free_pages(__GFP_DMA,
1817 (int)pages_to_order_of_mag(claw_read_pages) );
1818 if (privptr->p_buff_read==NULL) {
1819 free_pages((unsigned long)privptr->p_buff_ccw,
1820 (int)pages_to_order_of_mag(
1821 privptr->p_buff_ccw_num));
1822 /* free the write pages size is < page size */
1823 free_pages((unsigned long)privptr->p_buff_write,
1824 (int)pages_to_order_of_mag(
1825 privptr->p_buff_write_num));
1826 privptr->p_buff_ccw=NULL;
1827 privptr->p_buff_write=NULL;
1828 return -ENOMEM;
1830 memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE);
1831 privptr->p_buff_read_num=claw_read_pages;
1833 * Build CLAW read free chain
1836 p_buff=privptr->p_buff_read;
1837 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1838 p_buf = p_free_chain;
1839 p_free_chain = p_buf->next;
1841 if (p_last_CCWB==NULL) {
1842 p_buf->next=NULL;
1843 real_TIC_address=0;
1844 p_last_CCWB=p_buf;
1846 else {
1847 p_buf->next=p_first_CCWB;
1848 real_TIC_address=
1849 (__u32)__pa(&p_first_CCWB -> read );
1852 p_first_CCWB=p_buf;
1854 p_buf->p_buffer=(struct clawbuf *)p_buff;
1855 /* initialize read command */
1856 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1857 p_buf-> read.cda = (__u32)__pa(p_buff);
1858 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1859 p_buf-> read.count = privptr->p_env->read_size;
1861 /* initialize read_h command */
1862 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1863 p_buf-> read_h.cda =
1864 (__u32)__pa(&(p_buf->header));
1865 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1866 p_buf-> read_h.count = sizeof(struct clawh);
1868 /* initialize Signal command */
1869 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1870 p_buf-> signal.cda =
1871 (__u32)__pa(&(pClawH->flag));
1872 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1873 p_buf-> signal.count = 1;
1875 /* initialize r_TIC_1 command */
1876 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1877 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1878 p_buf-> r_TIC_1.flags = 0;
1879 p_buf-> r_TIC_1.count = 0;
1881 /* initialize r_read_FF command */
1882 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1883 p_buf-> r_read_FF.cda =
1884 (__u32)__pa(&(pClawH->flag));
1885 p_buf-> r_read_FF.flags =
1886 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
1887 p_buf-> r_read_FF.count = 1;
1889 /* initialize r_TIC_2 */
1890 memcpy(&p_buf->r_TIC_2,
1891 &p_buf->r_TIC_1, sizeof(struct ccw1));
1893 /* initialize Header */
1894 p_buf->header.length=0xffff;
1895 p_buf->header.opcode=0xff;
1896 p_buf->header.flag=CLAW_PENDING;
1898 if (((unsigned long)p_buff+privptr->p_env->read_size) >=
1899 ((unsigned long)(p_buff+2*(privptr->p_env->read_size)
1901 & PAGE_MASK)) {
1902 p_buff= p_buff+privptr->p_env->read_size;
1904 else {
1905 p_buff=
1906 (void *)((unsigned long)
1907 (p_buff+2*(privptr->p_env->read_size)-1)
1908 & PAGE_MASK) ;
1910 } /* for read_buffers */
1911 } /* read_size < PAGE_SIZE */
1912 else { /* read Size >= PAGE_SIZE */
1913 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1914 p_buff = (void *)__get_free_pages(__GFP_DMA,
1915 (int)pages_to_order_of_mag(
1916 privptr->p_buff_pages_perread));
1917 if (p_buff==NULL) {
1918 free_pages((unsigned long)privptr->p_buff_ccw,
1919 (int)pages_to_order_of_mag(privptr->
1920 p_buff_ccw_num));
1921 /* free the write pages */
1922 p_buf=privptr->p_buff_write;
1923 while (p_buf!=NULL) {
1924 free_pages(
1925 (unsigned long)p_buf->p_buffer,
1926 (int)pages_to_order_of_mag(
1927 privptr->p_buff_pages_perwrite));
1928 p_buf=p_buf->next;
1930 /* free any read pages already alloc */
1931 p_buf=privptr->p_buff_read;
1932 while (p_buf!=NULL) {
1933 free_pages(
1934 (unsigned long)p_buf->p_buffer,
1935 (int)pages_to_order_of_mag(
1936 privptr->p_buff_pages_perread));
1937 p_buf=p_buf->next;
1939 privptr->p_buff_ccw=NULL;
1940 privptr->p_buff_write=NULL;
1941 return -ENOMEM;
1943 memset(p_buff, 0x00, privptr->p_env->read_size);
1944 p_buf = p_free_chain;
1945 privptr->p_buff_read = p_buf;
1946 p_free_chain = p_buf->next;
1948 if (p_last_CCWB==NULL) {
1949 p_buf->next=NULL;
1950 real_TIC_address=0;
1951 p_last_CCWB=p_buf;
1953 else {
1954 p_buf->next=p_first_CCWB;
1955 real_TIC_address=
1956 (addr_t)__pa(
1957 &p_first_CCWB -> read );
1960 p_first_CCWB=p_buf;
1961 /* save buff address */
1962 p_buf->p_buffer=(struct clawbuf *)p_buff;
1963 /* initialize read command */
1964 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1965 p_buf-> read.cda = (__u32)__pa(p_buff);
1966 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1967 p_buf-> read.count = privptr->p_env->read_size;
1969 /* initialize read_h command */
1970 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1971 p_buf-> read_h.cda =
1972 (__u32)__pa(&(p_buf->header));
1973 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1974 p_buf-> read_h.count = sizeof(struct clawh);
1976 /* initialize Signal command */
1977 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1978 p_buf-> signal.cda =
1979 (__u32)__pa(&(pClawH->flag));
1980 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1981 p_buf-> signal.count = 1;
1983 /* initialize r_TIC_1 command */
1984 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1985 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1986 p_buf-> r_TIC_1.flags = 0;
1987 p_buf-> r_TIC_1.count = 0;
1989 /* initialize r_read_FF command */
1990 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1991 p_buf-> r_read_FF.cda =
1992 (__u32)__pa(&(pClawH->flag));
1993 p_buf-> r_read_FF.flags =
1994 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
1995 p_buf-> r_read_FF.count = 1;
1997 /* initialize r_TIC_2 */
1998 memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1,
1999 sizeof(struct ccw1));
2001 /* initialize Header */
2002 p_buf->header.length=0xffff;
2003 p_buf->header.opcode=0xff;
2004 p_buf->header.flag=CLAW_PENDING;
2006 } /* For read_buffers */
2007 } /* read_size >= PAGE_SIZE */
2008 } /* pBuffread = NULL */
2009 add_claw_reads( dev ,p_first_CCWB , p_last_CCWB);
2010 privptr->buffs_alloc = 1;
2012 return 0;
2013 } /* end of init_ccw_bk */
2015 /*-------------------------------------------------------------------*
2017 * probe_error *
2019 *--------------------------------------------------------------------*/
2021 static void
2022 probe_error( struct ccwgroup_device *cgdev)
2024 struct claw_privbk *privptr;
2026 CLAW_DBF_TEXT(4, trace, "proberr");
2027 privptr = dev_get_drvdata(&cgdev->dev);
2028 if (privptr != NULL) {
2029 dev_set_drvdata(&cgdev->dev, NULL);
2030 kfree(privptr->p_env);
2031 kfree(privptr->p_mtc_envelope);
2032 kfree(privptr);
2034 } /* probe_error */
2036 /*-------------------------------------------------------------------*
2037 * claw_process_control *
2040 *--------------------------------------------------------------------*/
2042 static int
2043 claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2046 struct clawbuf *p_buf;
2047 struct clawctl ctlbk;
2048 struct clawctl *p_ctlbk;
2049 char temp_host_name[8];
2050 char temp_ws_name[8];
2051 struct claw_privbk *privptr;
2052 struct claw_env *p_env;
2053 struct sysval *p_sysval;
2054 struct conncmd *p_connect=NULL;
2055 int rc;
2056 struct chbk *p_ch = NULL;
2057 struct device *tdev;
2058 CLAW_DBF_TEXT(2, setup, "clw_cntl");
2059 udelay(1000); /* Wait a ms for the control packets to
2060 *catch up to each other */
2061 privptr = dev->ml_priv;
2062 p_env=privptr->p_env;
2063 tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
2064 memcpy( &temp_host_name, p_env->host_name, 8);
2065 memcpy( &temp_ws_name, p_env->adapter_name , 8);
2066 dev_info(tdev, "%s: CLAW device %.8s: "
2067 "Received Control Packet\n",
2068 dev->name, temp_ws_name);
2069 if (privptr->release_pend==1) {
2070 return 0;
2072 p_buf=p_ccw->p_buffer;
2073 p_ctlbk=&ctlbk;
2074 if (p_env->packing == DO_PACKED) { /* packing in progress?*/
2075 memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl));
2076 } else {
2077 memcpy(p_ctlbk, p_buf, sizeof(struct clawctl));
2079 switch (p_ctlbk->command)
2081 case SYSTEM_VALIDATE_REQUEST:
2082 if (p_ctlbk->version != CLAW_VERSION_ID) {
2083 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2084 CLAW_RC_WRONG_VERSION);
2085 dev_warn(tdev, "The communication peer of %s"
2086 " uses an incorrect API version %d\n",
2087 dev->name, p_ctlbk->version);
2089 p_sysval = (struct sysval *)&(p_ctlbk->data);
2090 dev_info(tdev, "%s: Recv Sys Validate Request: "
2091 "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s,"
2092 "Host name=%.8s\n",
2093 dev->name, p_ctlbk->version,
2094 p_ctlbk->linkid,
2095 p_ctlbk->correlator,
2096 p_sysval->WS_name,
2097 p_sysval->host_name);
2098 if (memcmp(temp_host_name, p_sysval->host_name, 8)) {
2099 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2100 CLAW_RC_NAME_MISMATCH);
2101 CLAW_DBF_TEXT(2, setup, "HSTBAD");
2102 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name);
2103 CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name);
2104 dev_warn(tdev,
2105 "Host name %s for %s does not match the"
2106 " remote adapter name %s\n",
2107 p_sysval->host_name,
2108 dev->name,
2109 temp_host_name);
2111 if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) {
2112 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2113 CLAW_RC_NAME_MISMATCH);
2114 CLAW_DBF_TEXT(2, setup, "WSNBAD");
2115 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name);
2116 CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name);
2117 dev_warn(tdev, "Adapter name %s for %s does not match"
2118 " the remote host name %s\n",
2119 p_sysval->WS_name,
2120 dev->name,
2121 temp_ws_name);
2123 if ((p_sysval->write_frame_size < p_env->write_size) &&
2124 (p_env->packing == 0)) {
2125 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2126 CLAW_RC_HOST_RCV_TOO_SMALL);
2127 dev_warn(tdev,
2128 "The local write buffer is smaller than the"
2129 " remote read buffer\n");
2130 CLAW_DBF_TEXT(2, setup, "wrtszbad");
2132 if ((p_sysval->read_frame_size < p_env->read_size) &&
2133 (p_env->packing == 0)) {
2134 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2135 CLAW_RC_HOST_RCV_TOO_SMALL);
2136 dev_warn(tdev,
2137 "The local read buffer is smaller than the"
2138 " remote write buffer\n");
2139 CLAW_DBF_TEXT(2, setup, "rdsizbad");
2141 claw_snd_sys_validate_rsp(dev, p_ctlbk, 0);
2142 dev_info(tdev,
2143 "CLAW device %.8s: System validate"
2144 " completed.\n", temp_ws_name);
2145 dev_info(tdev,
2146 "%s: sys Validate Rsize:%d Wsize:%d\n",
2147 dev->name, p_sysval->read_frame_size,
2148 p_sysval->write_frame_size);
2149 privptr->system_validate_comp = 1;
2150 if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0)
2151 p_env->packing = PACKING_ASK;
2152 claw_strt_conn_req(dev);
2153 break;
2154 case SYSTEM_VALIDATE_RESPONSE:
2155 p_sysval = (struct sysval *)&(p_ctlbk->data);
2156 dev_info(tdev,
2157 "Settings for %s validated (version=%d, "
2158 "remote device=%d, rc=%d, adapter name=%.8s, "
2159 "host name=%.8s)\n",
2160 dev->name,
2161 p_ctlbk->version,
2162 p_ctlbk->correlator,
2163 p_ctlbk->rc,
2164 p_sysval->WS_name,
2165 p_sysval->host_name);
2166 switch (p_ctlbk->rc) {
2167 case 0:
2168 dev_info(tdev, "%s: CLAW device "
2169 "%.8s: System validate completed.\n",
2170 dev->name, temp_ws_name);
2171 if (privptr->system_validate_comp == 0)
2172 claw_strt_conn_req(dev);
2173 privptr->system_validate_comp = 1;
2174 break;
2175 case CLAW_RC_NAME_MISMATCH:
2176 dev_warn(tdev, "Validating %s failed because of"
2177 " a host or adapter name mismatch\n",
2178 dev->name);
2179 break;
2180 case CLAW_RC_WRONG_VERSION:
2181 dev_warn(tdev, "Validating %s failed because of a"
2182 " version conflict\n",
2183 dev->name);
2184 break;
2185 case CLAW_RC_HOST_RCV_TOO_SMALL:
2186 dev_warn(tdev, "Validating %s failed because of a"
2187 " frame size conflict\n",
2188 dev->name);
2189 break;
2190 default:
2191 dev_warn(tdev, "The communication peer of %s rejected"
2192 " the connection\n",
2193 dev->name);
2194 break;
2196 break;
2198 case CONNECTION_REQUEST:
2199 p_connect = (struct conncmd *)&(p_ctlbk->data);
2200 dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d,"
2201 "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
2202 dev->name,
2203 p_ctlbk->version,
2204 p_ctlbk->linkid,
2205 p_ctlbk->correlator,
2206 p_connect->host_name,
2207 p_connect->WS_name);
2208 if (privptr->active_link_ID != 0) {
2209 claw_snd_disc(dev, p_ctlbk);
2210 dev_info(tdev, "%s rejected a connection request"
2211 " because it is already active\n",
2212 dev->name);
2214 if (p_ctlbk->linkid != 1) {
2215 claw_snd_disc(dev, p_ctlbk);
2216 dev_info(tdev, "%s rejected a request to open multiple"
2217 " connections\n",
2218 dev->name);
2220 rc = find_link(dev, p_connect->host_name, p_connect->WS_name);
2221 if (rc != 0) {
2222 claw_snd_disc(dev, p_ctlbk);
2223 dev_info(tdev, "%s rejected a connection request"
2224 " because of a type mismatch\n",
2225 dev->name);
2227 claw_send_control(dev,
2228 CONNECTION_CONFIRM, p_ctlbk->linkid,
2229 p_ctlbk->correlator,
2230 0, p_connect->host_name,
2231 p_connect->WS_name);
2232 if (p_env->packing == PACKING_ASK) {
2233 p_env->packing = PACK_SEND;
2234 claw_snd_conn_req(dev, 0);
2236 dev_info(tdev, "%s: CLAW device %.8s: Connection "
2237 "completed link_id=%d.\n",
2238 dev->name, temp_ws_name,
2239 p_ctlbk->linkid);
2240 privptr->active_link_ID = p_ctlbk->linkid;
2241 p_ch = &privptr->channel[WRITE_CHANNEL];
2242 wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
2243 break;
2244 case CONNECTION_RESPONSE:
2245 p_connect = (struct conncmd *)&(p_ctlbk->data);
2246 dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d,"
2247 "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
2248 dev->name,
2249 p_ctlbk->version,
2250 p_ctlbk->linkid,
2251 p_ctlbk->correlator,
2252 p_ctlbk->rc,
2253 p_connect->host_name,
2254 p_connect->WS_name);
2256 if (p_ctlbk->rc != 0) {
2257 dev_warn(tdev, "The communication peer of %s rejected"
2258 " a connection request\n",
2259 dev->name);
2260 return 1;
2262 rc = find_link(dev,
2263 p_connect->host_name, p_connect->WS_name);
2264 if (rc != 0) {
2265 claw_snd_disc(dev, p_ctlbk);
2266 dev_warn(tdev, "The communication peer of %s"
2267 " rejected a connection "
2268 "request because of a type mismatch\n",
2269 dev->name);
2271 /* should be until CONNECTION_CONFIRM */
2272 privptr->active_link_ID = -(p_ctlbk->linkid);
2273 break;
2274 case CONNECTION_CONFIRM:
2275 p_connect = (struct conncmd *)&(p_ctlbk->data);
2276 dev_info(tdev,
2277 "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
2278 "Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
2279 dev->name,
2280 p_ctlbk->version,
2281 p_ctlbk->linkid,
2282 p_ctlbk->correlator,
2283 p_connect->host_name,
2284 p_connect->WS_name);
2285 if (p_ctlbk->linkid == -(privptr->active_link_ID)) {
2286 privptr->active_link_ID = p_ctlbk->linkid;
2287 if (p_env->packing > PACKING_ASK) {
2288 dev_info(tdev,
2289 "%s: Confirmed Now packing\n", dev->name);
2290 p_env->packing = DO_PACKED;
2292 p_ch = &privptr->channel[WRITE_CHANNEL];
2293 wake_up(&p_ch->wait);
2294 } else {
2295 dev_warn(tdev, "Activating %s failed because of"
2296 " an incorrect link ID=%d\n",
2297 dev->name, p_ctlbk->linkid);
2298 claw_snd_disc(dev, p_ctlbk);
2300 break;
2301 case DISCONNECT:
2302 dev_info(tdev, "%s: Disconnect: "
2303 "Vers=%d,link_id=%d,Corr=%d\n",
2304 dev->name, p_ctlbk->version,
2305 p_ctlbk->linkid, p_ctlbk->correlator);
2306 if ((p_ctlbk->linkid == 2) &&
2307 (p_env->packing == PACK_SEND)) {
2308 privptr->active_link_ID = 1;
2309 p_env->packing = DO_PACKED;
2310 } else
2311 privptr->active_link_ID = 0;
2312 break;
2313 case CLAW_ERROR:
2314 dev_warn(tdev, "The communication peer of %s failed\n",
2315 dev->name);
2316 break;
2317 default:
2318 dev_warn(tdev, "The communication peer of %s sent"
2319 " an unknown command code\n",
2320 dev->name);
2321 break;
2324 return 0;
2325 } /* end of claw_process_control */
2328 /*-------------------------------------------------------------------*
2329 * claw_send_control *
2331 *--------------------------------------------------------------------*/
2333 static int
2334 claw_send_control(struct net_device *dev, __u8 type, __u8 link,
2335 __u8 correlator, __u8 rc, char *local_name, char *remote_name)
2337 struct claw_privbk *privptr;
2338 struct clawctl *p_ctl;
2339 struct sysval *p_sysval;
2340 struct conncmd *p_connect;
2341 struct sk_buff *skb;
2343 CLAW_DBF_TEXT(2, setup, "sndcntl");
2344 privptr = dev->ml_priv;
2345 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2347 p_ctl->command=type;
2348 p_ctl->version=CLAW_VERSION_ID;
2349 p_ctl->linkid=link;
2350 p_ctl->correlator=correlator;
2351 p_ctl->rc=rc;
2353 p_sysval=(struct sysval *)&p_ctl->data;
2354 p_connect=(struct conncmd *)&p_ctl->data;
2356 switch (p_ctl->command) {
2357 case SYSTEM_VALIDATE_REQUEST:
2358 case SYSTEM_VALIDATE_RESPONSE:
2359 memcpy(&p_sysval->host_name, local_name, 8);
2360 memcpy(&p_sysval->WS_name, remote_name, 8);
2361 if (privptr->p_env->packing > 0) {
2362 p_sysval->read_frame_size = DEF_PACK_BUFSIZE;
2363 p_sysval->write_frame_size = DEF_PACK_BUFSIZE;
2364 } else {
2365 /* how big is the biggest group of packets */
2366 p_sysval->read_frame_size =
2367 privptr->p_env->read_size;
2368 p_sysval->write_frame_size =
2369 privptr->p_env->write_size;
2371 memset(&p_sysval->reserved, 0x00, 4);
2372 break;
2373 case CONNECTION_REQUEST:
2374 case CONNECTION_RESPONSE:
2375 case CONNECTION_CONFIRM:
2376 case DISCONNECT:
2377 memcpy(&p_sysval->host_name, local_name, 8);
2378 memcpy(&p_sysval->WS_name, remote_name, 8);
2379 if (privptr->p_env->packing > 0) {
2380 /* How big is the biggest packet */
2381 p_connect->reserved1[0]=CLAW_FRAME_SIZE;
2382 p_connect->reserved1[1]=CLAW_FRAME_SIZE;
2383 } else {
2384 memset(&p_connect->reserved1, 0x00, 4);
2385 memset(&p_connect->reserved2, 0x00, 4);
2387 break;
2388 default:
2389 break;
2392 /* write Control Record to the device */
2395 skb = dev_alloc_skb(sizeof(struct clawctl));
2396 if (!skb) {
2397 return -ENOMEM;
2399 memcpy(skb_put(skb, sizeof(struct clawctl)),
2400 p_ctl, sizeof(struct clawctl));
2401 if (privptr->p_env->packing >= PACK_SEND)
2402 claw_hw_tx(skb, dev, 1);
2403 else
2404 claw_hw_tx(skb, dev, 0);
2405 return 0;
2406 } /* end of claw_send_control */
2408 /*-------------------------------------------------------------------*
2409 * claw_snd_conn_req *
2411 *--------------------------------------------------------------------*/
2412 static int
2413 claw_snd_conn_req(struct net_device *dev, __u8 link)
2415 int rc;
2416 struct claw_privbk *privptr = dev->ml_priv;
2417 struct clawctl *p_ctl;
2419 CLAW_DBF_TEXT(2, setup, "snd_conn");
2420 rc = 1;
2421 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2422 p_ctl->linkid = link;
2423 if ( privptr->system_validate_comp==0x00 ) {
2424 return rc;
2426 if (privptr->p_env->packing == PACKING_ASK )
2427 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2428 WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED);
2429 if (privptr->p_env->packing == PACK_SEND) {
2430 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2431 WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME);
2433 if (privptr->p_env->packing == 0)
2434 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2435 HOST_APPL_NAME, privptr->p_env->api_type);
2436 return rc;
2438 } /* end of claw_snd_conn_req */
2441 /*-------------------------------------------------------------------*
2442 * claw_snd_disc *
2444 *--------------------------------------------------------------------*/
2446 static int
2447 claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
2449 int rc;
2450 struct conncmd * p_connect;
2452 CLAW_DBF_TEXT(2, setup, "snd_dsc");
2453 p_connect=(struct conncmd *)&p_ctl->data;
2455 rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid,
2456 p_ctl->correlator, 0,
2457 p_connect->host_name, p_connect->WS_name);
2458 return rc;
2459 } /* end of claw_snd_disc */
2462 /*-------------------------------------------------------------------*
2463 * claw_snd_sys_validate_rsp *
2465 *--------------------------------------------------------------------*/
2467 static int
2468 claw_snd_sys_validate_rsp(struct net_device *dev,
2469 struct clawctl *p_ctl, __u32 return_code)
2471 struct claw_env * p_env;
2472 struct claw_privbk *privptr;
2473 int rc;
2475 CLAW_DBF_TEXT(2, setup, "chkresp");
2476 privptr = dev->ml_priv;
2477 p_env=privptr->p_env;
2478 rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
2479 p_ctl->linkid,
2480 p_ctl->correlator,
2481 return_code,
2482 p_env->host_name,
2483 p_env->adapter_name );
2484 return rc;
2485 } /* end of claw_snd_sys_validate_rsp */
2487 /*-------------------------------------------------------------------*
2488 * claw_strt_conn_req *
2490 *--------------------------------------------------------------------*/
2492 static int
2493 claw_strt_conn_req(struct net_device *dev )
2495 int rc;
2497 CLAW_DBF_TEXT(2, setup, "conn_req");
2498 rc=claw_snd_conn_req(dev, 1);
2499 return rc;
2500 } /* end of claw_strt_conn_req */
2504 /*-------------------------------------------------------------------*
2505 * claw_stats *
2506 *-------------------------------------------------------------------*/
2508 static struct
2509 net_device_stats *claw_stats(struct net_device *dev)
2511 struct claw_privbk *privptr;
2513 CLAW_DBF_TEXT(4, trace, "stats");
2514 privptr = dev->ml_priv;
2515 return &privptr->stats;
2516 } /* end of claw_stats */
2519 /*-------------------------------------------------------------------*
2520 * unpack_read *
2522 *--------------------------------------------------------------------*/
2523 static void
2524 unpack_read(struct net_device *dev )
2526 struct sk_buff *skb;
2527 struct claw_privbk *privptr;
2528 struct claw_env *p_env;
2529 struct ccwbk *p_this_ccw;
2530 struct ccwbk *p_first_ccw;
2531 struct ccwbk *p_last_ccw;
2532 struct clawph *p_packh;
2533 void *p_packd;
2534 struct clawctl *p_ctlrec=NULL;
2535 struct device *p_dev;
2537 __u32 len_of_data;
2538 __u32 pack_off;
2539 __u8 link_num;
2540 __u8 mtc_this_frm=0;
2541 __u32 bytes_to_mov;
2542 int i=0;
2543 int p=0;
2545 CLAW_DBF_TEXT(4, trace, "unpkread");
2546 p_first_ccw=NULL;
2547 p_last_ccw=NULL;
2548 p_packh=NULL;
2549 p_packd=NULL;
2550 privptr = dev->ml_priv;
2552 p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
2553 p_env = privptr->p_env;
2554 p_this_ccw=privptr->p_read_active_first;
2555 while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
2556 pack_off = 0;
2557 p = 0;
2558 p_this_ccw->header.flag=CLAW_PENDING;
2559 privptr->p_read_active_first=p_this_ccw->next;
2560 p_this_ccw->next=NULL;
2561 p_packh = (struct clawph *)p_this_ccw->p_buffer;
2562 if ((p_env->packing == PACK_SEND) &&
2563 (p_packh->len == 32) &&
2564 (p_packh->link_num == 0)) { /* is it a packed ctl rec? */
2565 p_packh++; /* peek past pack header */
2566 p_ctlrec = (struct clawctl *)p_packh;
2567 p_packh--; /* un peek */
2568 if ((p_ctlrec->command == CONNECTION_RESPONSE) ||
2569 (p_ctlrec->command == CONNECTION_CONFIRM))
2570 p_env->packing = DO_PACKED;
2572 if (p_env->packing == DO_PACKED)
2573 link_num=p_packh->link_num;
2574 else
2575 link_num=p_this_ccw->header.opcode / 8;
2576 if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
2577 mtc_this_frm=1;
2578 if (p_this_ccw->header.length!=
2579 privptr->p_env->read_size ) {
2580 dev_warn(p_dev,
2581 "The communication peer of %s"
2582 " sent a faulty"
2583 " frame of length %02x\n",
2584 dev->name, p_this_ccw->header.length);
2588 if (privptr->mtc_skipping) {
2590 * We're in the mode of skipping past a
2591 * multi-frame message
2592 * that we can't process for some reason or other.
2593 * The first frame without the More-To-Come flag is
2594 * the last frame of the skipped message.
2596 /* in case of More-To-Come not set in this frame */
2597 if (mtc_this_frm==0) {
2598 privptr->mtc_skipping=0; /* Ok, the end */
2599 privptr->mtc_logical_link=-1;
2601 goto NextFrame;
2604 if (link_num==0) {
2605 claw_process_control(dev, p_this_ccw);
2606 CLAW_DBF_TEXT(4, trace, "UnpkCntl");
2607 goto NextFrame;
2609 unpack_next:
2610 if (p_env->packing == DO_PACKED) {
2611 if (pack_off > p_env->read_size)
2612 goto NextFrame;
2613 p_packd = p_this_ccw->p_buffer+pack_off;
2614 p_packh = (struct clawph *) p_packd;
2615 if ((p_packh->len == 0) || /* done with this frame? */
2616 (p_packh->flag != 0))
2617 goto NextFrame;
2618 bytes_to_mov = p_packh->len;
2619 pack_off += bytes_to_mov+sizeof(struct clawph);
2620 p++;
2621 } else {
2622 bytes_to_mov=p_this_ccw->header.length;
2624 if (privptr->mtc_logical_link<0) {
2627 * if More-To-Come is set in this frame then we don't know
2628 * length of entire message, and hence have to allocate
2629 * large buffer */
2631 /* We are starting a new envelope */
2632 privptr->mtc_offset=0;
2633 privptr->mtc_logical_link=link_num;
2636 if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) {
2637 /* error */
2638 privptr->stats.rx_frame_errors++;
2639 goto NextFrame;
2641 if (p_env->packing == DO_PACKED) {
2642 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2643 p_packd+sizeof(struct clawph), bytes_to_mov);
2645 } else {
2646 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2647 p_this_ccw->p_buffer, bytes_to_mov);
2649 if (mtc_this_frm==0) {
2650 len_of_data=privptr->mtc_offset+bytes_to_mov;
2651 skb=dev_alloc_skb(len_of_data);
2652 if (skb) {
2653 memcpy(skb_put(skb,len_of_data),
2654 privptr->p_mtc_envelope,
2655 len_of_data);
2656 skb->dev=dev;
2657 skb_reset_mac_header(skb);
2658 skb->protocol=htons(ETH_P_IP);
2659 skb->ip_summed=CHECKSUM_UNNECESSARY;
2660 privptr->stats.rx_packets++;
2661 privptr->stats.rx_bytes+=len_of_data;
2662 netif_rx(skb);
2664 else {
2665 dev_info(p_dev, "Allocating a buffer for"
2666 " incoming data failed\n");
2667 privptr->stats.rx_dropped++;
2669 privptr->mtc_offset=0;
2670 privptr->mtc_logical_link=-1;
2672 else {
2673 privptr->mtc_offset+=bytes_to_mov;
2675 if (p_env->packing == DO_PACKED)
2676 goto unpack_next;
2677 NextFrame:
2679 * Remove ThisCCWblock from active read queue, and add it
2680 * to queue of free blocks to be reused.
2682 i++;
2683 p_this_ccw->header.length=0xffff;
2684 p_this_ccw->header.opcode=0xff;
2686 * add this one to the free queue for later reuse
2688 if (p_first_ccw==NULL) {
2689 p_first_ccw = p_this_ccw;
2691 else {
2692 p_last_ccw->next = p_this_ccw;
2694 p_last_ccw = p_this_ccw;
2696 * chain to next block on active read queue
2698 p_this_ccw = privptr->p_read_active_first;
2699 CLAW_DBF_TEXT_(4, trace, "rxpkt %d", p);
2700 } /* end of while */
2702 /* check validity */
2704 CLAW_DBF_TEXT_(4, trace, "rxfrm %d", i);
2705 add_claw_reads(dev, p_first_ccw, p_last_ccw);
2706 claw_strt_read(dev, LOCK_YES);
2707 return;
2708 } /* end of unpack_read */
2710 /*-------------------------------------------------------------------*
2711 * claw_strt_read *
2713 *--------------------------------------------------------------------*/
2714 static void
2715 claw_strt_read (struct net_device *dev, int lock )
2717 int rc = 0;
2718 __u32 parm;
2719 unsigned long saveflags = 0;
2720 struct claw_privbk *privptr = dev->ml_priv;
2721 struct ccwbk*p_ccwbk;
2722 struct chbk *p_ch;
2723 struct clawh *p_clawh;
2724 p_ch = &privptr->channel[READ_CHANNEL];
2726 CLAW_DBF_TEXT(4, trace, "StRdNter");
2727 p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
2728 p_clawh->flag=CLAW_IDLE; /* 0x00 */
2730 if ((privptr->p_write_active_first!=NULL &&
2731 privptr->p_write_active_first->header.flag!=CLAW_PENDING) ||
2732 (privptr->p_read_active_first!=NULL &&
2733 privptr->p_read_active_first->header.flag!=CLAW_PENDING )) {
2734 p_clawh->flag=CLAW_BUSY; /* 0xff */
2736 if (lock==LOCK_YES) {
2737 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
2739 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2740 CLAW_DBF_TEXT(4, trace, "HotRead");
2741 p_ccwbk=privptr->p_read_active_first;
2742 parm = (unsigned long) p_ch;
2743 rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm,
2744 0xff, 0);
2745 if (rc != 0) {
2746 ccw_check_return_code(p_ch->cdev, rc);
2749 else {
2750 CLAW_DBF_TEXT(2, trace, "ReadAct");
2753 if (lock==LOCK_YES) {
2754 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
2756 CLAW_DBF_TEXT(4, trace, "StRdExit");
2757 return;
2758 } /* end of claw_strt_read */
2760 /*-------------------------------------------------------------------*
2761 * claw_strt_out_IO *
2763 *--------------------------------------------------------------------*/
2765 static void
2766 claw_strt_out_IO( struct net_device *dev )
2768 int rc = 0;
2769 unsigned long parm;
2770 struct claw_privbk *privptr;
2771 struct chbk *p_ch;
2772 struct ccwbk *p_first_ccw;
2774 if (!dev) {
2775 return;
2777 privptr = (struct claw_privbk *)dev->ml_priv;
2778 p_ch = &privptr->channel[WRITE_CHANNEL];
2780 CLAW_DBF_TEXT(4, trace, "strt_io");
2781 p_first_ccw=privptr->p_write_active_first;
2783 if (p_ch->claw_state == CLAW_STOP)
2784 return;
2785 if (p_first_ccw == NULL) {
2786 return;
2788 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2789 parm = (unsigned long) p_ch;
2790 CLAW_DBF_TEXT(2, trace, "StWrtIO");
2791 rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm,
2792 0xff, 0);
2793 if (rc != 0) {
2794 ccw_check_return_code(p_ch->cdev, rc);
2797 dev->trans_start = jiffies;
2798 return;
2799 } /* end of claw_strt_out_IO */
2801 /*-------------------------------------------------------------------*
2802 * Free write buffers *
2804 *--------------------------------------------------------------------*/
2806 static void
2807 claw_free_wrt_buf( struct net_device *dev )
2810 struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv;
2811 struct ccwbk*p_this_ccw;
2812 struct ccwbk*p_next_ccw;
2814 CLAW_DBF_TEXT(4, trace, "freewrtb");
2815 /* scan the write queue to free any completed write packets */
2816 p_this_ccw=privptr->p_write_active_first;
2817 while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
2819 p_next_ccw = p_this_ccw->next;
2820 if (((p_next_ccw!=NULL) &&
2821 (p_next_ccw->header.flag!=CLAW_PENDING)) ||
2822 ((p_this_ccw == privptr->p_write_active_last) &&
2823 (p_this_ccw->header.flag!=CLAW_PENDING))) {
2824 /* The next CCW is OK or this is */
2825 /* the last CCW...free it @A1A */
2826 privptr->p_write_active_first=p_this_ccw->next;
2827 p_this_ccw->header.flag=CLAW_PENDING;
2828 p_this_ccw->next=privptr->p_write_free_chain;
2829 privptr->p_write_free_chain=p_this_ccw;
2830 ++privptr->write_free_count;
2831 privptr->stats.tx_bytes+= p_this_ccw->write.count;
2832 p_this_ccw=privptr->p_write_active_first;
2833 privptr->stats.tx_packets++;
2835 else {
2836 break;
2839 if (privptr->write_free_count!=0) {
2840 claw_clearbit_busy(TB_NOBUFFER,dev);
2842 /* whole chain removed? */
2843 if (privptr->p_write_active_first==NULL) {
2844 privptr->p_write_active_last=NULL;
2846 CLAW_DBF_TEXT_(4, trace, "FWC=%d", privptr->write_free_count);
2847 return;
2850 /*-------------------------------------------------------------------*
2851 * claw free netdevice *
2853 *--------------------------------------------------------------------*/
2854 static void
2855 claw_free_netdevice(struct net_device * dev, int free_dev)
2857 struct claw_privbk *privptr;
2859 CLAW_DBF_TEXT(2, setup, "free_dev");
2860 if (!dev)
2861 return;
2862 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2863 privptr = dev->ml_priv;
2864 if (dev->flags & IFF_RUNNING)
2865 claw_release(dev);
2866 if (privptr) {
2867 privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */
2869 dev->ml_priv = NULL;
2870 #ifdef MODULE
2871 if (free_dev) {
2872 free_netdev(dev);
2874 #endif
2875 CLAW_DBF_TEXT(2, setup, "free_ok");
2879 * Claw init netdevice
2880 * Initialize everything of the net device except the name and the
2881 * channel structs.
2883 static const struct net_device_ops claw_netdev_ops = {
2884 .ndo_open = claw_open,
2885 .ndo_stop = claw_release,
2886 .ndo_get_stats = claw_stats,
2887 .ndo_start_xmit = claw_tx,
2888 .ndo_change_mtu = claw_change_mtu,
2891 static void
2892 claw_init_netdevice(struct net_device * dev)
2894 CLAW_DBF_TEXT(2, setup, "init_dev");
2895 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2896 dev->mtu = CLAW_DEFAULT_MTU_SIZE;
2897 dev->hard_header_len = 0;
2898 dev->addr_len = 0;
2899 dev->type = ARPHRD_SLIP;
2900 dev->tx_queue_len = 1300;
2901 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2902 dev->netdev_ops = &claw_netdev_ops;
2903 CLAW_DBF_TEXT(2, setup, "initok");
2904 return;
2908 * Init a new channel in the privptr->channel[i].
2910 * @param cdev The ccw_device to be added.
2912 * @return 0 on success, !0 on error.
2914 static int
2915 add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
2917 struct chbk *p_ch;
2918 struct ccw_dev_id dev_id;
2920 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev));
2921 privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
2922 p_ch = &privptr->channel[i];
2923 p_ch->cdev = cdev;
2924 snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev));
2925 ccw_device_get_id(cdev, &dev_id);
2926 p_ch->devno = dev_id.devno;
2927 if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
2928 return -ENOMEM;
2930 return 0;
2936 * Setup an interface.
2938 * @param cgdev Device to be setup.
2940 * @returns 0 on success, !0 on failure.
2942 static int
2943 claw_new_device(struct ccwgroup_device *cgdev)
2945 struct claw_privbk *privptr;
2946 struct claw_env *p_env;
2947 struct net_device *dev;
2948 int ret;
2949 struct ccw_dev_id dev_id;
2951 dev_info(&cgdev->dev, "add for %s\n",
2952 dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
2953 CLAW_DBF_TEXT(2, setup, "new_dev");
2954 privptr = dev_get_drvdata(&cgdev->dev);
2955 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
2956 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
2957 if (!privptr)
2958 return -ENODEV;
2959 p_env = privptr->p_env;
2960 ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
2961 p_env->devno[READ_CHANNEL] = dev_id.devno;
2962 ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
2963 p_env->devno[WRITE_CHANNEL] = dev_id.devno;
2964 ret = add_channel(cgdev->cdev[0],0,privptr);
2965 if (ret == 0)
2966 ret = add_channel(cgdev->cdev[1],1,privptr);
2967 if (ret != 0) {
2968 dev_warn(&cgdev->dev, "Creating a CLAW group device"
2969 " failed with error code %d\n", ret);
2970 goto out;
2972 ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
2973 if (ret != 0) {
2974 dev_warn(&cgdev->dev,
2975 "Setting the read subchannel online"
2976 " failed with error code %d\n", ret);
2977 goto out;
2979 ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
2980 if (ret != 0) {
2981 dev_warn(&cgdev->dev,
2982 "Setting the write subchannel online "
2983 "failed with error code %d\n", ret);
2984 goto out;
2986 dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
2987 if (!dev) {
2988 dev_warn(&cgdev->dev,
2989 "Activating the CLAW device failed\n");
2990 goto out;
2992 dev->ml_priv = privptr;
2993 dev_set_drvdata(&cgdev->dev, privptr);
2994 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
2995 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
2996 /* sysfs magic */
2997 SET_NETDEV_DEV(dev, &cgdev->dev);
2998 if (register_netdev(dev) != 0) {
2999 claw_free_netdevice(dev, 1);
3000 CLAW_DBF_TEXT(2, trace, "regfail");
3001 goto out;
3003 dev->flags &=~IFF_RUNNING;
3004 if (privptr->buffs_alloc == 0) {
3005 ret=init_ccw_bk(dev);
3006 if (ret !=0) {
3007 unregister_netdev(dev);
3008 claw_free_netdevice(dev,1);
3009 CLAW_DBF_TEXT(2, trace, "ccwmem");
3010 goto out;
3013 privptr->channel[READ_CHANNEL].ndev = dev;
3014 privptr->channel[WRITE_CHANNEL].ndev = dev;
3015 privptr->p_env->ndev = dev;
3017 dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d "
3018 "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
3019 dev->name, p_env->read_size,
3020 p_env->write_size, p_env->read_buffers,
3021 p_env->write_buffers, p_env->devno[READ_CHANNEL],
3022 p_env->devno[WRITE_CHANNEL]);
3023 dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
3024 ":%.8s api_type: %.8s\n",
3025 dev->name, p_env->host_name,
3026 p_env->adapter_name , p_env->api_type);
3027 return 0;
3028 out:
3029 ccw_device_set_offline(cgdev->cdev[1]);
3030 ccw_device_set_offline(cgdev->cdev[0]);
3031 return -ENODEV;
3034 static void
3035 claw_purge_skb_queue(struct sk_buff_head *q)
3037 struct sk_buff *skb;
3039 CLAW_DBF_TEXT(4, trace, "purgque");
3040 while ((skb = skb_dequeue(q))) {
3041 atomic_dec(&skb->users);
3042 dev_kfree_skb_any(skb);
3047 * Shutdown an interface.
3049 * @param cgdev Device to be shut down.
3051 * @returns 0 on success, !0 on failure.
3053 static int
3054 claw_shutdown_device(struct ccwgroup_device *cgdev)
3056 struct claw_privbk *priv;
3057 struct net_device *ndev;
3058 int ret = 0;
3060 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3061 priv = dev_get_drvdata(&cgdev->dev);
3062 if (!priv)
3063 return -ENODEV;
3064 ndev = priv->channel[READ_CHANNEL].ndev;
3065 if (ndev) {
3066 /* Close the device */
3067 dev_info(&cgdev->dev, "%s: shutting down\n",
3068 ndev->name);
3069 if (ndev->flags & IFF_RUNNING)
3070 ret = claw_release(ndev);
3071 ndev->flags &=~IFF_RUNNING;
3072 unregister_netdev(ndev);
3073 ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
3074 claw_free_netdevice(ndev, 1);
3075 priv->channel[READ_CHANNEL].ndev = NULL;
3076 priv->channel[WRITE_CHANNEL].ndev = NULL;
3077 priv->p_env->ndev = NULL;
3079 ccw_device_set_offline(cgdev->cdev[1]);
3080 ccw_device_set_offline(cgdev->cdev[0]);
3081 return ret;
3084 static void
3085 claw_remove_device(struct ccwgroup_device *cgdev)
3087 struct claw_privbk *priv;
3089 BUG_ON(!cgdev);
3090 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3091 priv = dev_get_drvdata(&cgdev->dev);
3092 BUG_ON(!priv);
3093 dev_info(&cgdev->dev, " will be removed.\n");
3094 if (cgdev->state == CCWGROUP_ONLINE)
3095 claw_shutdown_device(cgdev);
3096 claw_remove_files(&cgdev->dev);
3097 kfree(priv->p_mtc_envelope);
3098 priv->p_mtc_envelope=NULL;
3099 kfree(priv->p_env);
3100 priv->p_env=NULL;
3101 kfree(priv->channel[0].irb);
3102 priv->channel[0].irb=NULL;
3103 kfree(priv->channel[1].irb);
3104 priv->channel[1].irb=NULL;
3105 kfree(priv);
3106 dev_set_drvdata(&cgdev->dev, NULL);
3107 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
3108 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
3109 put_device(&cgdev->dev);
3111 return;
3116 * sysfs attributes
3118 static ssize_t
3119 claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
3121 struct claw_privbk *priv;
3122 struct claw_env * p_env;
3124 priv = dev_get_drvdata(dev);
3125 if (!priv)
3126 return -ENODEV;
3127 p_env = priv->p_env;
3128 return sprintf(buf, "%s\n",p_env->host_name);
3131 static ssize_t
3132 claw_hname_write(struct device *dev, struct device_attribute *attr,
3133 const char *buf, size_t count)
3135 struct claw_privbk *priv;
3136 struct claw_env * p_env;
3138 priv = dev_get_drvdata(dev);
3139 if (!priv)
3140 return -ENODEV;
3141 p_env = priv->p_env;
3142 if (count > MAX_NAME_LEN+1)
3143 return -EINVAL;
3144 memset(p_env->host_name, 0x20, MAX_NAME_LEN);
3145 strncpy(p_env->host_name,buf, count);
3146 p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */
3147 p_env->host_name[MAX_NAME_LEN] = 0x00;
3148 CLAW_DBF_TEXT(2, setup, "HstnSet");
3149 CLAW_DBF_TEXT_(2, setup, "%s", p_env->host_name);
3151 return count;
3154 static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write);
3156 static ssize_t
3157 claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
3159 struct claw_privbk *priv;
3160 struct claw_env * p_env;
3162 priv = dev_get_drvdata(dev);
3163 if (!priv)
3164 return -ENODEV;
3165 p_env = priv->p_env;
3166 return sprintf(buf, "%s\n", p_env->adapter_name);
3169 static ssize_t
3170 claw_adname_write(struct device *dev, struct device_attribute *attr,
3171 const char *buf, size_t count)
3173 struct claw_privbk *priv;
3174 struct claw_env * p_env;
3176 priv = dev_get_drvdata(dev);
3177 if (!priv)
3178 return -ENODEV;
3179 p_env = priv->p_env;
3180 if (count > MAX_NAME_LEN+1)
3181 return -EINVAL;
3182 memset(p_env->adapter_name, 0x20, MAX_NAME_LEN);
3183 strncpy(p_env->adapter_name,buf, count);
3184 p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */
3185 p_env->adapter_name[MAX_NAME_LEN] = 0x00;
3186 CLAW_DBF_TEXT(2, setup, "AdnSet");
3187 CLAW_DBF_TEXT_(2, setup, "%s", p_env->adapter_name);
3189 return count;
3192 static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write);
3194 static ssize_t
3195 claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
3197 struct claw_privbk *priv;
3198 struct claw_env * p_env;
3200 priv = dev_get_drvdata(dev);
3201 if (!priv)
3202 return -ENODEV;
3203 p_env = priv->p_env;
3204 return sprintf(buf, "%s\n",
3205 p_env->api_type);
3208 static ssize_t
3209 claw_apname_write(struct device *dev, struct device_attribute *attr,
3210 const char *buf, size_t count)
3212 struct claw_privbk *priv;
3213 struct claw_env * p_env;
3215 priv = dev_get_drvdata(dev);
3216 if (!priv)
3217 return -ENODEV;
3218 p_env = priv->p_env;
3219 if (count > MAX_NAME_LEN+1)
3220 return -EINVAL;
3221 memset(p_env->api_type, 0x20, MAX_NAME_LEN);
3222 strncpy(p_env->api_type,buf, count);
3223 p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */
3224 p_env->api_type[MAX_NAME_LEN] = 0x00;
3225 if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
3226 p_env->read_size=DEF_PACK_BUFSIZE;
3227 p_env->write_size=DEF_PACK_BUFSIZE;
3228 p_env->packing=PACKING_ASK;
3229 CLAW_DBF_TEXT(2, setup, "PACKING");
3231 else {
3232 p_env->packing=0;
3233 p_env->read_size=CLAW_FRAME_SIZE;
3234 p_env->write_size=CLAW_FRAME_SIZE;
3235 CLAW_DBF_TEXT(2, setup, "ApiSet");
3237 CLAW_DBF_TEXT_(2, setup, "%s", p_env->api_type);
3238 return count;
3241 static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write);
3243 static ssize_t
3244 claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3246 struct claw_privbk *priv;
3247 struct claw_env * p_env;
3249 priv = dev_get_drvdata(dev);
3250 if (!priv)
3251 return -ENODEV;
3252 p_env = priv->p_env;
3253 return sprintf(buf, "%d\n", p_env->write_buffers);
3256 static ssize_t
3257 claw_wbuff_write(struct device *dev, struct device_attribute *attr,
3258 const char *buf, size_t count)
3260 struct claw_privbk *priv;
3261 struct claw_env * p_env;
3262 int nnn,max;
3264 priv = dev_get_drvdata(dev);
3265 if (!priv)
3266 return -ENODEV;
3267 p_env = priv->p_env;
3268 sscanf(buf, "%i", &nnn);
3269 if (p_env->packing) {
3270 max = 64;
3272 else {
3273 max = 512;
3275 if ((nnn > max ) || (nnn < 2))
3276 return -EINVAL;
3277 p_env->write_buffers = nnn;
3278 CLAW_DBF_TEXT(2, setup, "Wbufset");
3279 CLAW_DBF_TEXT_(2, setup, "WB=%d", p_env->write_buffers);
3280 return count;
3283 static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write);
3285 static ssize_t
3286 claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3288 struct claw_privbk *priv;
3289 struct claw_env * p_env;
3291 priv = dev_get_drvdata(dev);
3292 if (!priv)
3293 return -ENODEV;
3294 p_env = priv->p_env;
3295 return sprintf(buf, "%d\n", p_env->read_buffers);
3298 static ssize_t
3299 claw_rbuff_write(struct device *dev, struct device_attribute *attr,
3300 const char *buf, size_t count)
3302 struct claw_privbk *priv;
3303 struct claw_env *p_env;
3304 int nnn,max;
3306 priv = dev_get_drvdata(dev);
3307 if (!priv)
3308 return -ENODEV;
3309 p_env = priv->p_env;
3310 sscanf(buf, "%i", &nnn);
3311 if (p_env->packing) {
3312 max = 64;
3314 else {
3315 max = 512;
3317 if ((nnn > max ) || (nnn < 2))
3318 return -EINVAL;
3319 p_env->read_buffers = nnn;
3320 CLAW_DBF_TEXT(2, setup, "Rbufset");
3321 CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers);
3322 return count;
3325 static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
3327 static struct attribute *claw_attr[] = {
3328 &dev_attr_read_buffer.attr,
3329 &dev_attr_write_buffer.attr,
3330 &dev_attr_adapter_name.attr,
3331 &dev_attr_api_type.attr,
3332 &dev_attr_host_name.attr,
3333 NULL,
3336 static struct attribute_group claw_attr_group = {
3337 .attrs = claw_attr,
3340 static int
3341 claw_add_files(struct device *dev)
3343 CLAW_DBF_TEXT(2, setup, "add_file");
3344 return sysfs_create_group(&dev->kobj, &claw_attr_group);
3347 static void
3348 claw_remove_files(struct device *dev)
3350 CLAW_DBF_TEXT(2, setup, "rem_file");
3351 sysfs_remove_group(&dev->kobj, &claw_attr_group);
3354 /*--------------------------------------------------------------------*
3355 * claw_init and cleanup *
3356 *---------------------------------------------------------------------*/
3358 static void __exit
3359 claw_cleanup(void)
3361 driver_remove_file(&claw_group_driver.driver,
3362 &driver_attr_group);
3363 ccwgroup_driver_unregister(&claw_group_driver);
3364 ccw_driver_unregister(&claw_ccw_driver);
3365 root_device_unregister(claw_root_dev);
3366 claw_unregister_debug_facility();
3367 pr_info("Driver unloaded\n");
3372 * Initialize module.
3373 * This is called just after the module is loaded.
3375 * @return 0 on success, !0 on error.
3377 static int __init
3378 claw_init(void)
3380 int ret = 0;
3382 pr_info("Loading %s\n", version);
3383 ret = claw_register_debug_facility();
3384 if (ret) {
3385 pr_err("Registering with the S/390 debug feature"
3386 " failed with error code %d\n", ret);
3387 goto out_err;
3389 CLAW_DBF_TEXT(2, setup, "init_mod");
3390 claw_root_dev = root_device_register("claw");
3391 ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
3392 if (ret)
3393 goto register_err;
3394 ret = ccw_driver_register(&claw_ccw_driver);
3395 if (ret)
3396 goto ccw_err;
3397 claw_group_driver.driver.groups = claw_group_attr_groups;
3398 ret = ccwgroup_driver_register(&claw_group_driver);
3399 if (ret)
3400 goto ccwgroup_err;
3401 return 0;
3403 ccwgroup_err:
3404 ccw_driver_unregister(&claw_ccw_driver);
3405 ccw_err:
3406 root_device_unregister(claw_root_dev);
3407 register_err:
3408 CLAW_DBF_TEXT(2, setup, "init_bad");
3409 claw_unregister_debug_facility();
3410 out_err:
3411 pr_err("Initializing the claw device driver failed\n");
3412 return ret;
3415 module_init(claw_init);
3416 module_exit(claw_cleanup);
3418 MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
3419 MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \
3420 "Copyright 2000,2008 IBM Corporation\n");
3421 MODULE_LICENSE("GPL");