ext3: Add journal error check into ext3_delete_entry()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / s390 / net / claw.c
blob8e4153d740f3bf00d4a465bf5a1526ee66ad518e
1 /*
2 * drivers/s390/net/claw.c
3 * ESCON CLAW network driver
5 * Linux for zSeries version
6 * Copyright IBM Corp. 2002, 2009
7 * Author(s) Original code written by:
8 * Kazuo Iimura <iimura@jp.ibm.com>
9 * Rewritten by
10 * Andy Richter <richtera@us.ibm.com>
11 * Marc Price <mwprice@us.ibm.com>
13 * sysfs parms:
14 * group x.x.rrrr,x.x.wwww
15 * read_buffer nnnnnnn
16 * write_buffer nnnnnn
17 * host_name aaaaaaaa
18 * adapter_name aaaaaaaa
19 * api_type aaaaaaaa
21 * eg.
22 * group 0.0.0200 0.0.0201
23 * read_buffer 25
24 * write_buffer 20
25 * host_name LINUX390
26 * adapter_name RS6K
27 * api_type TCPIP
29 * where
31 * The device id is decided by the order entries
32 * are added to the group the first is claw0 the second claw1
33 * up to CLAW_MAX_DEV
35 * rrrr - the first of 2 consecutive device addresses used for the
36 * CLAW protocol.
37 * The specified address is always used as the input (Read)
38 * channel and the next address is used as the output channel.
40 * wwww - the second of 2 consecutive device addresses used for
41 * the CLAW protocol.
42 * The specified address is always used as the output
43 * channel and the previous address is used as the input channel.
45 * read_buffer - specifies number of input buffers to allocate.
46 * write_buffer - specifies number of output buffers to allocate.
47 * host_name - host name
48 * adaptor_name - adaptor name
49 * api_type - API type TCPIP or API will be sent and expected
50 * as ws_name
52 * Note the following requirements:
53 * 1) host_name must match the configured adapter_name on the remote side
54 * 2) adaptor_name must match the configured host name on the remote side
56 * Change History
57 * 1.00 Initial release shipped
58 * 1.10 Changes for Buffer allocation
59 * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower
60 * 1.25 Added Packing support
61 * 1.5
64 #define KMSG_COMPONENT "claw"
66 #include <asm/ccwdev.h>
67 #include <asm/ccwgroup.h>
68 #include <asm/debug.h>
69 #include <asm/idals.h>
70 #include <asm/io.h>
71 #include <linux/bitops.h>
72 #include <linux/ctype.h>
73 #include <linux/delay.h>
74 #include <linux/errno.h>
75 #include <linux/if_arp.h>
76 #include <linux/init.h>
77 #include <linux/interrupt.h>
78 #include <linux/ip.h>
79 #include <linux/kernel.h>
80 #include <linux/module.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/proc_fs.h>
84 #include <linux/sched.h>
85 #include <linux/signal.h>
86 #include <linux/skbuff.h>
87 #include <linux/slab.h>
88 #include <linux/string.h>
89 #include <linux/tcp.h>
90 #include <linux/timer.h>
91 #include <linux/types.h>
93 #include "claw.h"
96 CLAW uses the s390dbf file system see claw_trace and claw_setup
99 static char version[] __initdata = "CLAW driver";
100 static char debug_buffer[255];
102 * Debug Facility Stuff
104 static debug_info_t *claw_dbf_setup;
105 static debug_info_t *claw_dbf_trace;
108 * CLAW Debug Facility functions
110 static void
111 claw_unregister_debug_facility(void)
113 if (claw_dbf_setup)
114 debug_unregister(claw_dbf_setup);
115 if (claw_dbf_trace)
116 debug_unregister(claw_dbf_trace);
119 static int
120 claw_register_debug_facility(void)
122 claw_dbf_setup = debug_register("claw_setup", 2, 1, 8);
123 claw_dbf_trace = debug_register("claw_trace", 2, 2, 8);
124 if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) {
125 claw_unregister_debug_facility();
126 return -ENOMEM;
128 debug_register_view(claw_dbf_setup, &debug_hex_ascii_view);
129 debug_set_level(claw_dbf_setup, 2);
130 debug_register_view(claw_dbf_trace, &debug_hex_ascii_view);
131 debug_set_level(claw_dbf_trace, 2);
132 return 0;
135 static inline void
136 claw_set_busy(struct net_device *dev)
138 ((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
139 eieio();
142 static inline void
143 claw_clear_busy(struct net_device *dev)
145 clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
146 netif_wake_queue(dev);
147 eieio();
150 static inline int
151 claw_check_busy(struct net_device *dev)
153 eieio();
154 return ((struct claw_privbk *) dev->ml_priv)->tbusy;
157 static inline void
158 claw_setbit_busy(int nr,struct net_device *dev)
160 netif_stop_queue(dev);
161 set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
164 static inline void
165 claw_clearbit_busy(int nr,struct net_device *dev)
167 clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
168 netif_wake_queue(dev);
171 static inline int
172 claw_test_and_setbit_busy(int nr,struct net_device *dev)
174 netif_stop_queue(dev);
175 return test_and_set_bit(nr,
176 (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy));
180 /* Functions for the DEV methods */
182 static int claw_probe(struct ccwgroup_device *cgdev);
183 static void claw_remove_device(struct ccwgroup_device *cgdev);
184 static void claw_purge_skb_queue(struct sk_buff_head *q);
185 static int claw_new_device(struct ccwgroup_device *cgdev);
186 static int claw_shutdown_device(struct ccwgroup_device *cgdev);
187 static int claw_tx(struct sk_buff *skb, struct net_device *dev);
188 static int claw_change_mtu( struct net_device *dev, int new_mtu);
189 static int claw_open(struct net_device *dev);
190 static void claw_irq_handler(struct ccw_device *cdev,
191 unsigned long intparm, struct irb *irb);
192 static void claw_irq_tasklet ( unsigned long data );
193 static int claw_release(struct net_device *dev);
194 static void claw_write_retry ( struct chbk * p_ch );
195 static void claw_write_next ( struct chbk * p_ch );
196 static void claw_timer ( struct chbk * p_ch );
198 /* Functions */
199 static int add_claw_reads(struct net_device *dev,
200 struct ccwbk* p_first, struct ccwbk* p_last);
201 static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
202 static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
203 static int find_link(struct net_device *dev, char *host_name, char *ws_name );
204 static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
205 static int init_ccw_bk(struct net_device *dev);
206 static void probe_error( struct ccwgroup_device *cgdev);
207 static struct net_device_stats *claw_stats(struct net_device *dev);
208 static int pages_to_order_of_mag(int num_of_pages);
209 static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
210 /* sysfs Functions */
211 static ssize_t claw_hname_show(struct device *dev,
212 struct device_attribute *attr, char *buf);
213 static ssize_t claw_hname_write(struct device *dev,
214 struct device_attribute *attr,
215 const char *buf, size_t count);
216 static ssize_t claw_adname_show(struct device *dev,
217 struct device_attribute *attr, char *buf);
218 static ssize_t claw_adname_write(struct device *dev,
219 struct device_attribute *attr,
220 const char *buf, size_t count);
221 static ssize_t claw_apname_show(struct device *dev,
222 struct device_attribute *attr, char *buf);
223 static ssize_t claw_apname_write(struct device *dev,
224 struct device_attribute *attr,
225 const char *buf, size_t count);
226 static ssize_t claw_wbuff_show(struct device *dev,
227 struct device_attribute *attr, char *buf);
228 static ssize_t claw_wbuff_write(struct device *dev,
229 struct device_attribute *attr,
230 const char *buf, size_t count);
231 static ssize_t claw_rbuff_show(struct device *dev,
232 struct device_attribute *attr, char *buf);
233 static ssize_t claw_rbuff_write(struct device *dev,
234 struct device_attribute *attr,
235 const char *buf, size_t count);
236 static int claw_add_files(struct device *dev);
237 static void claw_remove_files(struct device *dev);
239 /* Functions for System Validate */
240 static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
241 static int claw_send_control(struct net_device *dev, __u8 type, __u8 link,
242 __u8 correlator, __u8 rc , char *local_name, char *remote_name);
243 static int claw_snd_conn_req(struct net_device *dev, __u8 link);
244 static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl);
245 static int claw_snd_sys_validate_rsp(struct net_device *dev,
246 struct clawctl * p_ctl, __u32 return_code);
247 static int claw_strt_conn_req(struct net_device *dev );
248 static void claw_strt_read(struct net_device *dev, int lock);
249 static void claw_strt_out_IO(struct net_device *dev);
250 static void claw_free_wrt_buf(struct net_device *dev);
252 /* Functions for unpack reads */
253 static void unpack_read(struct net_device *dev);
255 static int claw_pm_prepare(struct ccwgroup_device *gdev)
257 return -EPERM;
260 /* the root device for claw group devices */
261 static struct device *claw_root_dev;
263 /* ccwgroup table */
265 static struct ccwgroup_driver claw_group_driver = {
266 .owner = THIS_MODULE,
267 .name = "claw",
268 .max_slaves = 2,
269 .driver_id = 0xC3D3C1E6,
270 .probe = claw_probe,
271 .remove = claw_remove_device,
272 .set_online = claw_new_device,
273 .set_offline = claw_shutdown_device,
274 .prepare = claw_pm_prepare,
277 static struct ccw_device_id claw_ids[] = {
278 {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
281 MODULE_DEVICE_TABLE(ccw, claw_ids);
283 static struct ccw_driver claw_ccw_driver = {
284 .owner = THIS_MODULE,
285 .name = "claw",
286 .ids = claw_ids,
287 .probe = ccwgroup_probe_ccwdev,
288 .remove = ccwgroup_remove_ccwdev,
291 static ssize_t
292 claw_driver_group_store(struct device_driver *ddrv, const char *buf,
293 size_t count)
295 int err;
296 err = ccwgroup_create_from_string(claw_root_dev,
297 claw_group_driver.driver_id,
298 &claw_ccw_driver, 2, buf);
299 return err ? err : count;
302 static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
304 static struct attribute *claw_group_attrs[] = {
305 &driver_attr_group.attr,
306 NULL,
309 static struct attribute_group claw_group_attr_group = {
310 .attrs = claw_group_attrs,
313 static const struct attribute_group *claw_group_attr_groups[] = {
314 &claw_group_attr_group,
315 NULL,
319 * Key functions
322 /*----------------------------------------------------------------*
323 * claw_probe *
324 * this function is called for each CLAW device. *
325 *----------------------------------------------------------------*/
326 static int
327 claw_probe(struct ccwgroup_device *cgdev)
329 int rc;
330 struct claw_privbk *privptr=NULL;
332 CLAW_DBF_TEXT(2, setup, "probe");
333 if (!get_device(&cgdev->dev))
334 return -ENODEV;
335 privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
336 dev_set_drvdata(&cgdev->dev, privptr);
337 if (privptr == NULL) {
338 probe_error(cgdev);
339 put_device(&cgdev->dev);
340 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
341 return -ENOMEM;
343 privptr->p_mtc_envelope= kzalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
344 privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
345 if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) {
346 probe_error(cgdev);
347 put_device(&cgdev->dev);
348 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
349 return -ENOMEM;
351 memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8);
352 memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8);
353 memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8);
354 privptr->p_env->packing = 0;
355 privptr->p_env->write_buffers = 5;
356 privptr->p_env->read_buffers = 5;
357 privptr->p_env->read_size = CLAW_FRAME_SIZE;
358 privptr->p_env->write_size = CLAW_FRAME_SIZE;
359 rc = claw_add_files(&cgdev->dev);
360 if (rc) {
361 probe_error(cgdev);
362 put_device(&cgdev->dev);
363 dev_err(&cgdev->dev, "Creating the /proc files for a new"
364 " CLAW device failed\n");
365 CLAW_DBF_TEXT_(2, setup, "probex%d", rc);
366 return rc;
368 privptr->p_env->p_priv = privptr;
369 cgdev->cdev[0]->handler = claw_irq_handler;
370 cgdev->cdev[1]->handler = claw_irq_handler;
371 CLAW_DBF_TEXT(2, setup, "prbext 0");
373 return 0;
374 } /* end of claw_probe */
376 /*-------------------------------------------------------------------*
377 * claw_tx *
378 *-------------------------------------------------------------------*/
380 static int
381 claw_tx(struct sk_buff *skb, struct net_device *dev)
383 int rc;
384 struct claw_privbk *privptr = dev->ml_priv;
385 unsigned long saveflags;
386 struct chbk *p_ch;
388 CLAW_DBF_TEXT(4, trace, "claw_tx");
389 p_ch = &privptr->channel[WRITE_CHANNEL];
390 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
391 rc=claw_hw_tx( skb, dev, 1 );
392 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
393 CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc);
394 if (rc)
395 rc = NETDEV_TX_BUSY;
396 else
397 rc = NETDEV_TX_OK;
398 return rc;
399 } /* end of claw_tx */
401 /*------------------------------------------------------------------*
402 * pack the collect queue into an skb and return it *
403 * If not packing just return the top skb from the queue *
404 *------------------------------------------------------------------*/
406 static struct sk_buff *
407 claw_pack_skb(struct claw_privbk *privptr)
409 struct sk_buff *new_skb,*held_skb;
410 struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
411 struct claw_env *p_env = privptr->p_env;
412 int pkt_cnt,pk_ind,so_far;
414 new_skb = NULL; /* assume no dice */
415 pkt_cnt = 0;
416 CLAW_DBF_TEXT(4, trace, "PackSKBe");
417 if (!skb_queue_empty(&p_ch->collect_queue)) {
418 /* some data */
419 held_skb = skb_dequeue(&p_ch->collect_queue);
420 if (held_skb)
421 dev_kfree_skb_any(held_skb);
422 else
423 return NULL;
424 if (p_env->packing != DO_PACKED)
425 return held_skb;
426 /* get a new SKB we will pack at least one */
427 new_skb = dev_alloc_skb(p_env->write_size);
428 if (new_skb == NULL) {
429 atomic_inc(&held_skb->users);
430 skb_queue_head(&p_ch->collect_queue,held_skb);
431 return NULL;
433 /* we have packed packet and a place to put it */
434 pk_ind = 1;
435 so_far = 0;
436 new_skb->cb[1] = 'P'; /* every skb on queue has pack header */
437 while ((pk_ind) && (held_skb != NULL)) {
438 if (held_skb->len+so_far <= p_env->write_size-8) {
439 memcpy(skb_put(new_skb,held_skb->len),
440 held_skb->data,held_skb->len);
441 privptr->stats.tx_packets++;
442 so_far += held_skb->len;
443 pkt_cnt++;
444 dev_kfree_skb_any(held_skb);
445 held_skb = skb_dequeue(&p_ch->collect_queue);
446 if (held_skb)
447 atomic_dec(&held_skb->users);
448 } else {
449 pk_ind = 0;
450 atomic_inc(&held_skb->users);
451 skb_queue_head(&p_ch->collect_queue,held_skb);
455 CLAW_DBF_TEXT(4, trace, "PackSKBx");
456 return new_skb;
459 /*-------------------------------------------------------------------*
460 * claw_change_mtu *
462 *-------------------------------------------------------------------*/
464 static int
465 claw_change_mtu(struct net_device *dev, int new_mtu)
467 struct claw_privbk *privptr = dev->ml_priv;
468 int buff_size;
469 CLAW_DBF_TEXT(4, trace, "setmtu");
470 buff_size = privptr->p_env->write_size;
471 if ((new_mtu < 60) || (new_mtu > buff_size)) {
472 return -EINVAL;
474 dev->mtu = new_mtu;
475 return 0;
476 } /* end of claw_change_mtu */
479 /*-------------------------------------------------------------------*
480 * claw_open *
482 *-------------------------------------------------------------------*/
483 static int
484 claw_open(struct net_device *dev)
487 int rc;
488 int i;
489 unsigned long saveflags=0;
490 unsigned long parm;
491 struct claw_privbk *privptr;
492 DECLARE_WAITQUEUE(wait, current);
493 struct timer_list timer;
494 struct ccwbk *p_buf;
496 CLAW_DBF_TEXT(4, trace, "open");
497 privptr = (struct claw_privbk *)dev->ml_priv;
498 /* allocate and initialize CCW blocks */
499 if (privptr->buffs_alloc == 0) {
500 rc=init_ccw_bk(dev);
501 if (rc) {
502 CLAW_DBF_TEXT(2, trace, "openmem");
503 return -ENOMEM;
506 privptr->system_validate_comp=0;
507 privptr->release_pend=0;
508 if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
509 privptr->p_env->read_size=DEF_PACK_BUFSIZE;
510 privptr->p_env->write_size=DEF_PACK_BUFSIZE;
511 privptr->p_env->packing=PACKING_ASK;
512 } else {
513 privptr->p_env->packing=0;
514 privptr->p_env->read_size=CLAW_FRAME_SIZE;
515 privptr->p_env->write_size=CLAW_FRAME_SIZE;
517 claw_set_busy(dev);
518 tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
519 (unsigned long) &privptr->channel[READ_CHANNEL]);
520 for ( i = 0; i < 2; i++) {
521 CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
522 init_waitqueue_head(&privptr->channel[i].wait);
523 /* skb_queue_head_init(&p_ch->io_queue); */
524 if (i == WRITE_CHANNEL)
525 skb_queue_head_init(
526 &privptr->channel[WRITE_CHANNEL].collect_queue);
527 privptr->channel[i].flag_a = 0;
528 privptr->channel[i].IO_active = 0;
529 privptr->channel[i].flag &= ~CLAW_TIMER;
530 init_timer(&timer);
531 timer.function = (void *)claw_timer;
532 timer.data = (unsigned long)(&privptr->channel[i]);
533 timer.expires = jiffies + 15*HZ;
534 add_timer(&timer);
535 spin_lock_irqsave(get_ccwdev_lock(
536 privptr->channel[i].cdev), saveflags);
537 parm = (unsigned long) &privptr->channel[i];
538 privptr->channel[i].claw_state = CLAW_START_HALT_IO;
539 rc = 0;
540 add_wait_queue(&privptr->channel[i].wait, &wait);
541 rc = ccw_device_halt(
542 (struct ccw_device *)privptr->channel[i].cdev,parm);
543 set_current_state(TASK_INTERRUPTIBLE);
544 spin_unlock_irqrestore(
545 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
546 schedule();
547 set_current_state(TASK_RUNNING);
548 remove_wait_queue(&privptr->channel[i].wait, &wait);
549 if(rc != 0)
550 ccw_check_return_code(privptr->channel[i].cdev, rc);
551 if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
552 del_timer(&timer);
554 if ((((privptr->channel[READ_CHANNEL].last_dstat |
555 privptr->channel[WRITE_CHANNEL].last_dstat) &
556 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
557 (((privptr->channel[READ_CHANNEL].flag |
558 privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
559 dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
560 "%s: remote side is not ready\n", dev->name);
561 CLAW_DBF_TEXT(2, trace, "notrdy");
563 for ( i = 0; i < 2; i++) {
564 spin_lock_irqsave(
565 get_ccwdev_lock(privptr->channel[i].cdev),
566 saveflags);
567 parm = (unsigned long) &privptr->channel[i];
568 privptr->channel[i].claw_state = CLAW_STOP;
569 rc = ccw_device_halt(
570 (struct ccw_device *)&privptr->channel[i].cdev,
571 parm);
572 spin_unlock_irqrestore(
573 get_ccwdev_lock(privptr->channel[i].cdev),
574 saveflags);
575 if (rc != 0) {
576 ccw_check_return_code(
577 privptr->channel[i].cdev, rc);
580 free_pages((unsigned long)privptr->p_buff_ccw,
581 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
582 if (privptr->p_env->read_size < PAGE_SIZE) {
583 free_pages((unsigned long)privptr->p_buff_read,
584 (int)pages_to_order_of_mag(
585 privptr->p_buff_read_num));
587 else {
588 p_buf=privptr->p_read_active_first;
589 while (p_buf!=NULL) {
590 free_pages((unsigned long)p_buf->p_buffer,
591 (int)pages_to_order_of_mag(
592 privptr->p_buff_pages_perread ));
593 p_buf=p_buf->next;
596 if (privptr->p_env->write_size < PAGE_SIZE ) {
597 free_pages((unsigned long)privptr->p_buff_write,
598 (int)pages_to_order_of_mag(
599 privptr->p_buff_write_num));
601 else {
602 p_buf=privptr->p_write_active_first;
603 while (p_buf!=NULL) {
604 free_pages((unsigned long)p_buf->p_buffer,
605 (int)pages_to_order_of_mag(
606 privptr->p_buff_pages_perwrite ));
607 p_buf=p_buf->next;
610 privptr->buffs_alloc = 0;
611 privptr->channel[READ_CHANNEL].flag = 0x00;
612 privptr->channel[WRITE_CHANNEL].flag = 0x00;
613 privptr->p_buff_ccw=NULL;
614 privptr->p_buff_read=NULL;
615 privptr->p_buff_write=NULL;
616 claw_clear_busy(dev);
617 CLAW_DBF_TEXT(2, trace, "open EIO");
618 return -EIO;
621 /* Send SystemValidate command */
623 claw_clear_busy(dev);
624 CLAW_DBF_TEXT(4, trace, "openok");
625 return 0;
626 } /* end of claw_open */
628 /*-------------------------------------------------------------------*
630 * claw_irq_handler *
632 *--------------------------------------------------------------------*/
633 static void
634 claw_irq_handler(struct ccw_device *cdev,
635 unsigned long intparm, struct irb *irb)
637 struct chbk *p_ch = NULL;
638 struct claw_privbk *privptr = NULL;
639 struct net_device *dev = NULL;
640 struct claw_env *p_env;
641 struct chbk *p_ch_r=NULL;
643 CLAW_DBF_TEXT(4, trace, "clawirq");
644 /* Bypass all 'unsolicited interrupts' */
645 privptr = dev_get_drvdata(&cdev->dev);
646 if (!privptr) {
647 dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
648 " IRQ, c-%02x d-%02x\n",
649 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
650 CLAW_DBF_TEXT(2, trace, "badirq");
651 return;
654 /* Try to extract channel from driver data. */
655 if (privptr->channel[READ_CHANNEL].cdev == cdev)
656 p_ch = &privptr->channel[READ_CHANNEL];
657 else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
658 p_ch = &privptr->channel[WRITE_CHANNEL];
659 else {
660 dev_warn(&cdev->dev, "The device is not a CLAW device\n");
661 CLAW_DBF_TEXT(2, trace, "badchan");
662 return;
664 CLAW_DBF_TEXT_(4, trace, "IRQCH=%d", p_ch->flag);
666 dev = (struct net_device *) (p_ch->ndev);
667 p_env=privptr->p_env;
669 /* Copy interruption response block. */
670 memcpy(p_ch->irb, irb, sizeof(struct irb));
672 /* Check for good subchannel return code, otherwise info message */
673 if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
674 dev_info(&cdev->dev,
675 "%s: subchannel check for device: %04x -"
676 " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
677 dev->name, p_ch->devno,
678 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
679 irb->scsw.cmd.cpa);
680 CLAW_DBF_TEXT(2, trace, "chanchk");
681 /* return; */
684 /* Check the reason-code of a unit check */
685 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
686 ccw_check_unit_check(p_ch, irb->ecw[0]);
688 /* State machine to bring the connection up, down and to restart */
689 p_ch->last_dstat = irb->scsw.cmd.dstat;
691 switch (p_ch->claw_state) {
692 case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
693 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
694 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
695 (p_ch->irb->scsw.cmd.stctl ==
696 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))))
697 return;
698 wake_up(&p_ch->wait); /* wake up claw_release */
699 CLAW_DBF_TEXT(4, trace, "stop");
700 return;
701 case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */
702 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
703 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
704 (p_ch->irb->scsw.cmd.stctl ==
705 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
706 CLAW_DBF_TEXT(4, trace, "haltio");
707 return;
709 if (p_ch->flag == CLAW_READ) {
710 p_ch->claw_state = CLAW_START_READ;
711 wake_up(&p_ch->wait); /* wake claw_open (READ)*/
712 } else if (p_ch->flag == CLAW_WRITE) {
713 p_ch->claw_state = CLAW_START_WRITE;
714 /* send SYSTEM_VALIDATE */
715 claw_strt_read(dev, LOCK_NO);
716 claw_send_control(dev,
717 SYSTEM_VALIDATE_REQUEST,
718 0, 0, 0,
719 p_env->host_name,
720 p_env->adapter_name);
721 } else {
722 dev_warn(&cdev->dev, "The CLAW device received"
723 " an unexpected IRQ, "
724 "c-%02x d-%02x\n",
725 irb->scsw.cmd.cstat,
726 irb->scsw.cmd.dstat);
727 return;
729 CLAW_DBF_TEXT(4, trace, "haltio");
730 return;
731 case CLAW_START_READ:
732 CLAW_DBF_TEXT(4, trace, "ReadIRQ");
733 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
734 clear_bit(0, (void *)&p_ch->IO_active);
735 if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
736 (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
737 (p_ch->irb->ecw[0]) == 0) {
738 privptr->stats.rx_errors++;
739 dev_info(&cdev->dev,
740 "%s: Restart is required after remote "
741 "side recovers \n",
742 dev->name);
744 CLAW_DBF_TEXT(4, trace, "notrdy");
745 return;
747 if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) &&
748 (p_ch->irb->scsw.cmd.dstat == 0)) {
749 if (test_and_set_bit(CLAW_BH_ACTIVE,
750 (void *)&p_ch->flag_a) == 0)
751 tasklet_schedule(&p_ch->tasklet);
752 else
753 CLAW_DBF_TEXT(4, trace, "PCINoBH");
754 CLAW_DBF_TEXT(4, trace, "PCI_read");
755 return;
757 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
758 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
759 (p_ch->irb->scsw.cmd.stctl ==
760 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
761 CLAW_DBF_TEXT(4, trace, "SPend_rd");
762 return;
764 clear_bit(0, (void *)&p_ch->IO_active);
765 claw_clearbit_busy(TB_RETRY, dev);
766 if (test_and_set_bit(CLAW_BH_ACTIVE,
767 (void *)&p_ch->flag_a) == 0)
768 tasklet_schedule(&p_ch->tasklet);
769 else
770 CLAW_DBF_TEXT(4, trace, "RdBHAct");
771 CLAW_DBF_TEXT(4, trace, "RdIRQXit");
772 return;
773 case CLAW_START_WRITE:
774 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
775 dev_info(&cdev->dev,
776 "%s: Unit Check Occured in "
777 "write channel\n", dev->name);
778 clear_bit(0, (void *)&p_ch->IO_active);
779 if (p_ch->irb->ecw[0] & 0x80) {
780 dev_info(&cdev->dev,
781 "%s: Resetting Event "
782 "occurred:\n", dev->name);
783 init_timer(&p_ch->timer);
784 p_ch->timer.function =
785 (void *)claw_write_retry;
786 p_ch->timer.data = (unsigned long)p_ch;
787 p_ch->timer.expires = jiffies + 10*HZ;
788 add_timer(&p_ch->timer);
789 dev_info(&cdev->dev,
790 "%s: write connection "
791 "restarting\n", dev->name);
793 CLAW_DBF_TEXT(4, trace, "rstrtwrt");
794 return;
796 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
797 clear_bit(0, (void *)&p_ch->IO_active);
798 dev_info(&cdev->dev,
799 "%s: Unit Exception "
800 "occurred in write channel\n",
801 dev->name);
803 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
804 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
805 (p_ch->irb->scsw.cmd.stctl ==
806 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
807 CLAW_DBF_TEXT(4, trace, "writeUE");
808 return;
810 clear_bit(0, (void *)&p_ch->IO_active);
811 if (claw_test_and_setbit_busy(TB_TX, dev) == 0) {
812 claw_write_next(p_ch);
813 claw_clearbit_busy(TB_TX, dev);
814 claw_clear_busy(dev);
816 p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
817 if (test_and_set_bit(CLAW_BH_ACTIVE,
818 (void *)&p_ch_r->flag_a) == 0)
819 tasklet_schedule(&p_ch_r->tasklet);
820 CLAW_DBF_TEXT(4, trace, "StWtExit");
821 return;
822 default:
823 dev_warn(&cdev->dev,
824 "The CLAW device for %s received an unexpected IRQ\n",
825 dev->name);
826 CLAW_DBF_TEXT(2, trace, "badIRQ");
827 return;
830 } /* end of claw_irq_handler */
833 /*-------------------------------------------------------------------*
834 * claw_irq_tasklet *
836 *--------------------------------------------------------------------*/
837 static void
838 claw_irq_tasklet ( unsigned long data )
840 struct chbk * p_ch;
841 struct net_device *dev;
842 struct claw_privbk * privptr;
844 p_ch = (struct chbk *) data;
845 dev = (struct net_device *)p_ch->ndev;
846 CLAW_DBF_TEXT(4, trace, "IRQtask");
847 privptr = (struct claw_privbk *)dev->ml_priv;
848 unpack_read(dev);
849 clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
850 CLAW_DBF_TEXT(4, trace, "TskletXt");
851 return;
852 } /* end of claw_irq_bh */
854 /*-------------------------------------------------------------------*
855 * claw_release *
857 *--------------------------------------------------------------------*/
858 static int
859 claw_release(struct net_device *dev)
861 int rc;
862 int i;
863 unsigned long saveflags;
864 unsigned long parm;
865 struct claw_privbk *privptr;
866 DECLARE_WAITQUEUE(wait, current);
867 struct ccwbk* p_this_ccw;
868 struct ccwbk* p_buf;
870 if (!dev)
871 return 0;
872 privptr = (struct claw_privbk *)dev->ml_priv;
873 if (!privptr)
874 return 0;
875 CLAW_DBF_TEXT(4, trace, "release");
876 privptr->release_pend=1;
877 claw_setbit_busy(TB_STOP,dev);
878 for ( i = 1; i >=0 ; i--) {
879 spin_lock_irqsave(
880 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
881 /* del_timer(&privptr->channel[READ_CHANNEL].timer); */
882 privptr->channel[i].claw_state = CLAW_STOP;
883 privptr->channel[i].IO_active = 0;
884 parm = (unsigned long) &privptr->channel[i];
885 if (i == WRITE_CHANNEL)
886 claw_purge_skb_queue(
887 &privptr->channel[WRITE_CHANNEL].collect_queue);
888 rc = ccw_device_halt (privptr->channel[i].cdev, parm);
889 if (privptr->system_validate_comp==0x00) /* never opened? */
890 init_waitqueue_head(&privptr->channel[i].wait);
891 add_wait_queue(&privptr->channel[i].wait, &wait);
892 set_current_state(TASK_INTERRUPTIBLE);
893 spin_unlock_irqrestore(
894 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
895 schedule();
896 set_current_state(TASK_RUNNING);
897 remove_wait_queue(&privptr->channel[i].wait, &wait);
898 if (rc != 0) {
899 ccw_check_return_code(privptr->channel[i].cdev, rc);
902 if (privptr->pk_skb != NULL) {
903 dev_kfree_skb_any(privptr->pk_skb);
904 privptr->pk_skb = NULL;
906 if(privptr->buffs_alloc != 1) {
907 CLAW_DBF_TEXT(4, trace, "none2fre");
908 return 0;
910 CLAW_DBF_TEXT(4, trace, "freebufs");
911 if (privptr->p_buff_ccw != NULL) {
912 free_pages((unsigned long)privptr->p_buff_ccw,
913 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
915 CLAW_DBF_TEXT(4, trace, "freeread");
916 if (privptr->p_env->read_size < PAGE_SIZE) {
917 if (privptr->p_buff_read != NULL) {
918 free_pages((unsigned long)privptr->p_buff_read,
919 (int)pages_to_order_of_mag(privptr->p_buff_read_num));
922 else {
923 p_buf=privptr->p_read_active_first;
924 while (p_buf!=NULL) {
925 free_pages((unsigned long)p_buf->p_buffer,
926 (int)pages_to_order_of_mag(
927 privptr->p_buff_pages_perread ));
928 p_buf=p_buf->next;
931 CLAW_DBF_TEXT(4, trace, "freewrit");
932 if (privptr->p_env->write_size < PAGE_SIZE ) {
933 free_pages((unsigned long)privptr->p_buff_write,
934 (int)pages_to_order_of_mag(privptr->p_buff_write_num));
936 else {
937 p_buf=privptr->p_write_active_first;
938 while (p_buf!=NULL) {
939 free_pages((unsigned long)p_buf->p_buffer,
940 (int)pages_to_order_of_mag(
941 privptr->p_buff_pages_perwrite ));
942 p_buf=p_buf->next;
945 CLAW_DBF_TEXT(4, trace, "clearptr");
946 privptr->buffs_alloc = 0;
947 privptr->p_buff_ccw=NULL;
948 privptr->p_buff_read=NULL;
949 privptr->p_buff_write=NULL;
950 privptr->system_validate_comp=0;
951 privptr->release_pend=0;
952 /* Remove any writes that were pending and reset all reads */
953 p_this_ccw=privptr->p_read_active_first;
954 while (p_this_ccw!=NULL) {
955 p_this_ccw->header.length=0xffff;
956 p_this_ccw->header.opcode=0xff;
957 p_this_ccw->header.flag=0x00;
958 p_this_ccw=p_this_ccw->next;
961 while (privptr->p_write_active_first!=NULL) {
962 p_this_ccw=privptr->p_write_active_first;
963 p_this_ccw->header.flag=CLAW_PENDING;
964 privptr->p_write_active_first=p_this_ccw->next;
965 p_this_ccw->next=privptr->p_write_free_chain;
966 privptr->p_write_free_chain=p_this_ccw;
967 ++privptr->write_free_count;
969 privptr->p_write_active_last=NULL;
970 privptr->mtc_logical_link = -1;
971 privptr->mtc_skipping = 1;
972 privptr->mtc_offset=0;
974 if (((privptr->channel[READ_CHANNEL].last_dstat |
975 privptr->channel[WRITE_CHANNEL].last_dstat) &
976 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
977 dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
978 "Deactivating %s completed with incorrect"
979 " subchannel status "
980 "(read %02x, write %02x)\n",
981 dev->name,
982 privptr->channel[READ_CHANNEL].last_dstat,
983 privptr->channel[WRITE_CHANNEL].last_dstat);
984 CLAW_DBF_TEXT(2, trace, "badclose");
986 CLAW_DBF_TEXT(4, trace, "rlsexit");
987 return 0;
988 } /* end of claw_release */
990 /*-------------------------------------------------------------------*
991 * claw_write_retry *
993 *--------------------------------------------------------------------*/
995 static void
996 claw_write_retry ( struct chbk *p_ch )
999 struct net_device *dev=p_ch->ndev;
1001 CLAW_DBF_TEXT(4, trace, "w_retry");
1002 if (p_ch->claw_state == CLAW_STOP) {
1003 return;
1005 claw_strt_out_IO( dev );
1006 CLAW_DBF_TEXT(4, trace, "rtry_xit");
1007 return;
1008 } /* end of claw_write_retry */
1011 /*-------------------------------------------------------------------*
1012 * claw_write_next *
1014 *--------------------------------------------------------------------*/
1016 static void
1017 claw_write_next ( struct chbk * p_ch )
1020 struct net_device *dev;
1021 struct claw_privbk *privptr=NULL;
1022 struct sk_buff *pk_skb;
1023 int rc;
1025 CLAW_DBF_TEXT(4, trace, "claw_wrt");
1026 if (p_ch->claw_state == CLAW_STOP)
1027 return;
1028 dev = (struct net_device *) p_ch->ndev;
1029 privptr = (struct claw_privbk *) dev->ml_priv;
1030 claw_free_wrt_buf( dev );
1031 if ((privptr->write_free_count > 0) &&
1032 !skb_queue_empty(&p_ch->collect_queue)) {
1033 pk_skb = claw_pack_skb(privptr);
1034 while (pk_skb != NULL) {
1035 rc = claw_hw_tx( pk_skb, dev,1);
1036 if (privptr->write_free_count > 0) {
1037 pk_skb = claw_pack_skb(privptr);
1038 } else
1039 pk_skb = NULL;
1042 if (privptr->p_write_active_first!=NULL) {
1043 claw_strt_out_IO(dev);
1045 return;
1046 } /* end of claw_write_next */
1048 /*-------------------------------------------------------------------*
1050 * claw_timer *
1051 *--------------------------------------------------------------------*/
1053 static void
1054 claw_timer ( struct chbk * p_ch )
1056 CLAW_DBF_TEXT(4, trace, "timer");
1057 p_ch->flag |= CLAW_TIMER;
1058 wake_up(&p_ch->wait);
1059 return;
1060 } /* end of claw_timer */
1064 * functions
1068 /*-------------------------------------------------------------------*
1070 * pages_to_order_of_mag *
1072 * takes a number of pages from 1 to 512 and returns the *
1073 * log(num_pages)/log(2) get_free_pages() needs a base 2 order *
1074 * of magnitude get_free_pages() has an upper order of 9 *
1075 *--------------------------------------------------------------------*/
1077 static int
1078 pages_to_order_of_mag(int num_of_pages)
1080 int order_of_mag=1; /* assume 2 pages */
1081 int nump;
1083 CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages);
1084 if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */
1085 /* 512 pages = 2Meg on 4k page systems */
1086 if (num_of_pages >= 512) {return 9; }
1087 /* we have two or more pages order is at least 1 */
1088 for (nump=2 ;nump <= 512;nump*=2) {
1089 if (num_of_pages <= nump)
1090 break;
1091 order_of_mag +=1;
1093 if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */
1094 CLAW_DBF_TEXT_(5, trace, "mag%d", order_of_mag);
1095 return order_of_mag;
1098 /*-------------------------------------------------------------------*
1100 * add_claw_reads *
1102 *--------------------------------------------------------------------*/
1103 static int
1104 add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1105 struct ccwbk* p_last)
1107 struct claw_privbk *privptr;
1108 struct ccw1 temp_ccw;
1109 struct endccw * p_end;
1110 CLAW_DBF_TEXT(4, trace, "addreads");
1111 privptr = dev->ml_priv;
1112 p_end = privptr->p_end_ccw;
1114 /* first CCW and last CCW contains a new set of read channel programs
1115 * to apend the running channel programs
1117 if ( p_first==NULL) {
1118 CLAW_DBF_TEXT(4, trace, "addexit");
1119 return 0;
1122 /* set up ending CCW sequence for this segment */
1123 if (p_end->read1) {
1124 p_end->read1=0x00; /* second ending CCW is now active */
1125 /* reset ending CCWs and setup TIC CCWs */
1126 p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1127 p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1128 p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1);
1129 p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1);
1130 p_end->read2_nop2.cda=0;
1131 p_end->read2_nop2.count=1;
1133 else {
1134 p_end->read1=0x01; /* first ending CCW is now active */
1135 /* reset ending CCWs and setup TIC CCWs */
1136 p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1137 p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1138 p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1);
1139 p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1);
1140 p_end->read1_nop2.cda=0;
1141 p_end->read1_nop2.count=1;
1144 if ( privptr-> p_read_active_first ==NULL ) {
1145 privptr->p_read_active_first = p_first; /* set new first */
1146 privptr->p_read_active_last = p_last; /* set new last */
1148 else {
1150 /* set up TIC ccw */
1151 temp_ccw.cda= (__u32)__pa(&p_first->read);
1152 temp_ccw.count=0;
1153 temp_ccw.flags=0;
1154 temp_ccw.cmd_code = CCW_CLAW_CMD_TIC;
1157 if (p_end->read1) {
1159 /* first set of CCW's is chained to the new read */
1160 /* chain, so the second set is chained to the active chain. */
1161 /* Therefore modify the second set to point to the new */
1162 /* read chain set up TIC CCWs */
1163 /* make sure we update the CCW so channel doesn't fetch it */
1164 /* when it's only half done */
1165 memcpy( &p_end->read2_nop2, &temp_ccw ,
1166 sizeof(struct ccw1));
1167 privptr->p_read_active_last->r_TIC_1.cda=
1168 (__u32)__pa(&p_first->read);
1169 privptr->p_read_active_last->r_TIC_2.cda=
1170 (__u32)__pa(&p_first->read);
1172 else {
1173 /* make sure we update the CCW so channel doesn't */
1174 /* fetch it when it is only half done */
1175 memcpy( &p_end->read1_nop2, &temp_ccw ,
1176 sizeof(struct ccw1));
1177 privptr->p_read_active_last->r_TIC_1.cda=
1178 (__u32)__pa(&p_first->read);
1179 privptr->p_read_active_last->r_TIC_2.cda=
1180 (__u32)__pa(&p_first->read);
1182 /* chain in new set of blocks */
1183 privptr->p_read_active_last->next = p_first;
1184 privptr->p_read_active_last=p_last;
1185 } /* end of if ( privptr-> p_read_active_first ==NULL) */
1186 CLAW_DBF_TEXT(4, trace, "addexit");
1187 return 0;
1188 } /* end of add_claw_reads */
1190 /*-------------------------------------------------------------------*
1191 * ccw_check_return_code *
1193 *-------------------------------------------------------------------*/
1195 static void
1196 ccw_check_return_code(struct ccw_device *cdev, int return_code)
1198 CLAW_DBF_TEXT(4, trace, "ccwret");
1199 if (return_code != 0) {
1200 switch (return_code) {
1201 case -EBUSY: /* BUSY is a transient state no action needed */
1202 break;
1203 case -ENODEV:
1204 dev_err(&cdev->dev, "The remote channel adapter is not"
1205 " available\n");
1206 break;
1207 case -EINVAL:
1208 dev_err(&cdev->dev,
1209 "The status of the remote channel adapter"
1210 " is not valid\n");
1211 break;
1212 default:
1213 dev_err(&cdev->dev, "The common device layer"
1214 " returned error code %d\n",
1215 return_code);
1218 CLAW_DBF_TEXT(4, trace, "ccwret");
1219 } /* end of ccw_check_return_code */
1221 /*-------------------------------------------------------------------*
1222 * ccw_check_unit_check *
1223 *--------------------------------------------------------------------*/
1225 static void
1226 ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1228 struct net_device *ndev = p_ch->ndev;
1229 struct device *dev = &p_ch->cdev->dev;
1231 CLAW_DBF_TEXT(4, trace, "unitchek");
1232 dev_warn(dev, "The communication peer of %s disconnected\n",
1233 ndev->name);
1235 if (sense & 0x40) {
1236 if (sense & 0x01) {
1237 dev_warn(dev, "The remote channel adapter for"
1238 " %s has been reset\n",
1239 ndev->name);
1241 } else if (sense & 0x20) {
1242 if (sense & 0x04) {
1243 dev_warn(dev, "A data streaming timeout occurred"
1244 " for %s\n",
1245 ndev->name);
1246 } else if (sense & 0x10) {
1247 dev_warn(dev, "The remote channel adapter for %s"
1248 " is faulty\n",
1249 ndev->name);
1250 } else {
1251 dev_warn(dev, "A data transfer parity error occurred"
1252 " for %s\n",
1253 ndev->name);
1255 } else if (sense & 0x10) {
1256 dev_warn(dev, "A read data parity error occurred"
1257 " for %s\n",
1258 ndev->name);
1261 } /* end of ccw_check_unit_check */
1263 /*-------------------------------------------------------------------*
1264 * find_link *
1265 *--------------------------------------------------------------------*/
1266 static int
1267 find_link(struct net_device *dev, char *host_name, char *ws_name )
1269 struct claw_privbk *privptr;
1270 struct claw_env *p_env;
1271 int rc=0;
1273 CLAW_DBF_TEXT(2, setup, "findlink");
1274 privptr = dev->ml_priv;
1275 p_env=privptr->p_env;
1276 switch (p_env->packing)
1278 case PACKING_ASK:
1279 if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) ||
1280 (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 ))
1281 rc = EINVAL;
1282 break;
1283 case DO_PACKED:
1284 case PACK_SEND:
1285 if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) ||
1286 (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 ))
1287 rc = EINVAL;
1288 break;
1289 default:
1290 if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) ||
1291 (memcmp(p_env->api_type , ws_name, 8)!=0))
1292 rc = EINVAL;
1293 break;
1296 return rc;
1297 } /* end of find_link */
1299 /*-------------------------------------------------------------------*
1300 * claw_hw_tx *
1303 *-------------------------------------------------------------------*/
1305 static int
1306 claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1308 int rc=0;
1309 struct claw_privbk *privptr;
1310 struct ccwbk *p_this_ccw;
1311 struct ccwbk *p_first_ccw;
1312 struct ccwbk *p_last_ccw;
1313 __u32 numBuffers;
1314 signed long len_of_data;
1315 unsigned long bytesInThisBuffer;
1316 unsigned char *pDataAddress;
1317 struct endccw *pEnd;
1318 struct ccw1 tempCCW;
1319 struct chbk *p_ch;
1320 struct claw_env *p_env;
1321 int lock;
1322 struct clawph *pk_head;
1323 struct chbk *ch;
1325 CLAW_DBF_TEXT(4, trace, "hw_tx");
1326 privptr = (struct claw_privbk *)(dev->ml_priv);
1327 p_ch = (struct chbk *)&privptr->channel[WRITE_CHANNEL];
1328 p_env =privptr->p_env;
1329 claw_free_wrt_buf(dev); /* Clean up free chain if posible */
1330 /* scan the write queue to free any completed write packets */
1331 p_first_ccw=NULL;
1332 p_last_ccw=NULL;
1333 if ((p_env->packing >= PACK_SEND) &&
1334 (skb->cb[1] != 'P')) {
1335 skb_push(skb,sizeof(struct clawph));
1336 pk_head=(struct clawph *)skb->data;
1337 pk_head->len=skb->len-sizeof(struct clawph);
1338 if (pk_head->len%4) {
1339 pk_head->len+= 4-(pk_head->len%4);
1340 skb_pad(skb,4-(pk_head->len%4));
1341 skb_put(skb,4-(pk_head->len%4));
1343 if (p_env->packing == DO_PACKED)
1344 pk_head->link_num = linkid;
1345 else
1346 pk_head->link_num = 0;
1347 pk_head->flag = 0x00;
1348 skb_pad(skb,4);
1349 skb->cb[1] = 'P';
1351 if (linkid == 0) {
1352 if (claw_check_busy(dev)) {
1353 if (privptr->write_free_count!=0) {
1354 claw_clear_busy(dev);
1356 else {
1357 claw_strt_out_IO(dev );
1358 claw_free_wrt_buf( dev );
1359 if (privptr->write_free_count==0) {
1360 ch = &privptr->channel[WRITE_CHANNEL];
1361 atomic_inc(&skb->users);
1362 skb_queue_tail(&ch->collect_queue, skb);
1363 goto Done;
1365 else {
1366 claw_clear_busy(dev);
1370 /* tx lock */
1371 if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
1372 ch = &privptr->channel[WRITE_CHANNEL];
1373 atomic_inc(&skb->users);
1374 skb_queue_tail(&ch->collect_queue, skb);
1375 claw_strt_out_IO(dev );
1376 rc=-EBUSY;
1377 goto Done2;
1380 /* See how many write buffers are required to hold this data */
1381 numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size);
1383 /* If that number of buffers isn't available, give up for now */
1384 if (privptr->write_free_count < numBuffers ||
1385 privptr->p_write_free_chain == NULL ) {
1387 claw_setbit_busy(TB_NOBUFFER,dev);
1388 ch = &privptr->channel[WRITE_CHANNEL];
1389 atomic_inc(&skb->users);
1390 skb_queue_tail(&ch->collect_queue, skb);
1391 CLAW_DBF_TEXT(2, trace, "clawbusy");
1392 goto Done2;
1394 pDataAddress=skb->data;
1395 len_of_data=skb->len;
1397 while (len_of_data > 0) {
1398 p_this_ccw=privptr->p_write_free_chain; /* get a block */
1399 if (p_this_ccw == NULL) { /* lost the race */
1400 ch = &privptr->channel[WRITE_CHANNEL];
1401 atomic_inc(&skb->users);
1402 skb_queue_tail(&ch->collect_queue, skb);
1403 goto Done2;
1405 privptr->p_write_free_chain=p_this_ccw->next;
1406 p_this_ccw->next=NULL;
1407 --privptr->write_free_count; /* -1 */
1408 if (len_of_data >= privptr->p_env->write_size)
1409 bytesInThisBuffer = privptr->p_env->write_size;
1410 else
1411 bytesInThisBuffer = len_of_data;
1412 memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer);
1413 len_of_data-=bytesInThisBuffer;
1414 pDataAddress+=(unsigned long)bytesInThisBuffer;
1415 /* setup write CCW */
1416 p_this_ccw->write.cmd_code = (linkid * 8) +1;
1417 if (len_of_data>0) {
1418 p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG;
1420 p_this_ccw->write.count=bytesInThisBuffer;
1421 /* now add to end of this chain */
1422 if (p_first_ccw==NULL) {
1423 p_first_ccw=p_this_ccw;
1425 if (p_last_ccw!=NULL) {
1426 p_last_ccw->next=p_this_ccw;
1427 /* set up TIC ccws */
1428 p_last_ccw->w_TIC_1.cda=
1429 (__u32)__pa(&p_this_ccw->write);
1431 p_last_ccw=p_this_ccw; /* save new last block */
1434 /* FirstCCW and LastCCW now contain a new set of write channel
1435 * programs to append to the running channel program
1438 if (p_first_ccw!=NULL) {
1439 /* setup ending ccw sequence for this segment */
1440 pEnd=privptr->p_end_ccw;
1441 if (pEnd->write1) {
1442 pEnd->write1=0x00; /* second end ccw is now active */
1443 /* set up Tic CCWs */
1444 p_last_ccw->w_TIC_1.cda=
1445 (__u32)__pa(&pEnd->write2_nop1);
1446 pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1447 pEnd->write2_nop2.flags =
1448 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1449 pEnd->write2_nop2.cda=0;
1450 pEnd->write2_nop2.count=1;
1452 else { /* end of if (pEnd->write1)*/
1453 pEnd->write1=0x01; /* first end ccw is now active */
1454 /* set up Tic CCWs */
1455 p_last_ccw->w_TIC_1.cda=
1456 (__u32)__pa(&pEnd->write1_nop1);
1457 pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1458 pEnd->write1_nop2.flags =
1459 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1460 pEnd->write1_nop2.cda=0;
1461 pEnd->write1_nop2.count=1;
1462 } /* end if if (pEnd->write1) */
1464 if (privptr->p_write_active_first==NULL ) {
1465 privptr->p_write_active_first=p_first_ccw;
1466 privptr->p_write_active_last=p_last_ccw;
1468 else {
1469 /* set up Tic CCWs */
1471 tempCCW.cda=(__u32)__pa(&p_first_ccw->write);
1472 tempCCW.count=0;
1473 tempCCW.flags=0;
1474 tempCCW.cmd_code=CCW_CLAW_CMD_TIC;
1476 if (pEnd->write1) {
1479 * first set of ending CCW's is chained to the new write
1480 * chain, so the second set is chained to the active chain
1481 * Therefore modify the second set to point the new write chain.
1482 * make sure we update the CCW atomically
1483 * so channel does not fetch it when it's only half done
1485 memcpy( &pEnd->write2_nop2, &tempCCW ,
1486 sizeof(struct ccw1));
1487 privptr->p_write_active_last->w_TIC_1.cda=
1488 (__u32)__pa(&p_first_ccw->write);
1490 else {
1492 /*make sure we update the CCW atomically
1493 *so channel does not fetch it when it's only half done
1495 memcpy(&pEnd->write1_nop2, &tempCCW ,
1496 sizeof(struct ccw1));
1497 privptr->p_write_active_last->w_TIC_1.cda=
1498 (__u32)__pa(&p_first_ccw->write);
1500 } /* end if if (pEnd->write1) */
1502 privptr->p_write_active_last->next=p_first_ccw;
1503 privptr->p_write_active_last=p_last_ccw;
1506 } /* endif (p_first_ccw!=NULL) */
1507 dev_kfree_skb_any(skb);
1508 if (linkid==0) {
1509 lock=LOCK_NO;
1511 else {
1512 lock=LOCK_YES;
1514 claw_strt_out_IO(dev );
1515 /* if write free count is zero , set NOBUFFER */
1516 if (privptr->write_free_count==0) {
1517 claw_setbit_busy(TB_NOBUFFER,dev);
1519 Done2:
1520 claw_clearbit_busy(TB_TX,dev);
1521 Done:
1522 return(rc);
1523 } /* end of claw_hw_tx */
1525 /*-------------------------------------------------------------------*
1527 * init_ccw_bk *
1529 *--------------------------------------------------------------------*/
1531 static int
1532 init_ccw_bk(struct net_device *dev)
1535 __u32 ccw_blocks_required;
1536 __u32 ccw_blocks_perpage;
1537 __u32 ccw_pages_required;
1538 __u32 claw_reads_perpage=1;
1539 __u32 claw_read_pages;
1540 __u32 claw_writes_perpage=1;
1541 __u32 claw_write_pages;
1542 void *p_buff=NULL;
1543 struct ccwbk*p_free_chain;
1544 struct ccwbk*p_buf;
1545 struct ccwbk*p_last_CCWB;
1546 struct ccwbk*p_first_CCWB;
1547 struct endccw *p_endccw=NULL;
1548 addr_t real_address;
1549 struct claw_privbk *privptr = dev->ml_priv;
1550 struct clawh *pClawH=NULL;
1551 addr_t real_TIC_address;
1552 int i,j;
1553 CLAW_DBF_TEXT(4, trace, "init_ccw");
1555 /* initialize statistics field */
1556 privptr->active_link_ID=0;
1557 /* initialize ccwbk pointers */
1558 privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/
1559 privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/
1560 privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/
1561 privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/
1562 privptr->p_read_active_last=NULL; /* pointer to the last read ccw */
1563 privptr->p_end_ccw=NULL; /* pointer to ending ccw */
1564 privptr->p_claw_signal_blk=NULL; /* pointer to signal block */
1565 privptr->buffs_alloc = 0;
1566 memset(&privptr->end_ccw, 0x00, sizeof(struct endccw));
1567 memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl));
1568 /* initialize free write ccwbk counter */
1569 privptr->write_free_count=0; /* number of free bufs on write chain */
1570 p_last_CCWB = NULL;
1571 p_first_CCWB= NULL;
1573 * We need 1 CCW block for each read buffer, 1 for each
1574 * write buffer, plus 1 for ClawSignalBlock
1576 ccw_blocks_required =
1577 privptr->p_env->read_buffers+privptr->p_env->write_buffers+1;
1579 * compute number of CCW blocks that will fit in a page
1581 ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE;
1582 ccw_pages_required=
1583 DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage);
1586 * read and write sizes are set by 2 constants in claw.h
1587 * 4k and 32k. Unpacked values other than 4k are not going to
1588 * provide good performance. With packing buffers support 32k
1589 * buffers are used.
1591 if (privptr->p_env->read_size < PAGE_SIZE) {
1592 claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size;
1593 claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers,
1594 claw_reads_perpage);
1596 else { /* > or equal */
1597 privptr->p_buff_pages_perread =
1598 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1599 claw_read_pages = privptr->p_env->read_buffers *
1600 privptr->p_buff_pages_perread;
1602 if (privptr->p_env->write_size < PAGE_SIZE) {
1603 claw_writes_perpage =
1604 PAGE_SIZE / privptr->p_env->write_size;
1605 claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers,
1606 claw_writes_perpage);
1609 else { /* > or equal */
1610 privptr->p_buff_pages_perwrite =
1611 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1612 claw_write_pages = privptr->p_env->write_buffers *
1613 privptr->p_buff_pages_perwrite;
1616 * allocate ccw_pages_required
1618 if (privptr->p_buff_ccw==NULL) {
1619 privptr->p_buff_ccw=
1620 (void *)__get_free_pages(__GFP_DMA,
1621 (int)pages_to_order_of_mag(ccw_pages_required ));
1622 if (privptr->p_buff_ccw==NULL) {
1623 return -ENOMEM;
1625 privptr->p_buff_ccw_num=ccw_pages_required;
1627 memset(privptr->p_buff_ccw, 0x00,
1628 privptr->p_buff_ccw_num * PAGE_SIZE);
1631 * obtain ending ccw block address
1634 privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw;
1635 real_address = (__u32)__pa(privptr->p_end_ccw);
1636 /* Initialize ending CCW block */
1637 p_endccw=privptr->p_end_ccw;
1638 p_endccw->real=real_address;
1639 p_endccw->write1=0x00;
1640 p_endccw->read1=0x00;
1642 /* write1_nop1 */
1643 p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1644 p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1645 p_endccw->write1_nop1.count = 1;
1646 p_endccw->write1_nop1.cda = 0;
1648 /* write1_nop2 */
1649 p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1650 p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1651 p_endccw->write1_nop2.count = 1;
1652 p_endccw->write1_nop2.cda = 0;
1654 /* write2_nop1 */
1655 p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1656 p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1657 p_endccw->write2_nop1.count = 1;
1658 p_endccw->write2_nop1.cda = 0;
1660 /* write2_nop2 */
1661 p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1662 p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1663 p_endccw->write2_nop2.count = 1;
1664 p_endccw->write2_nop2.cda = 0;
1666 /* read1_nop1 */
1667 p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1668 p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1669 p_endccw->read1_nop1.count = 1;
1670 p_endccw->read1_nop1.cda = 0;
1672 /* read1_nop2 */
1673 p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1674 p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1675 p_endccw->read1_nop2.count = 1;
1676 p_endccw->read1_nop2.cda = 0;
1678 /* read2_nop1 */
1679 p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1680 p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1681 p_endccw->read2_nop1.count = 1;
1682 p_endccw->read2_nop1.cda = 0;
1684 /* read2_nop2 */
1685 p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1686 p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1687 p_endccw->read2_nop2.count = 1;
1688 p_endccw->read2_nop2.cda = 0;
1691 * Build a chain of CCWs
1694 p_buff=privptr->p_buff_ccw;
1696 p_free_chain=NULL;
1697 for (i=0 ; i < ccw_pages_required; i++ ) {
1698 real_address = (__u32)__pa(p_buff);
1699 p_buf=p_buff;
1700 for (j=0 ; j < ccw_blocks_perpage ; j++) {
1701 p_buf->next = p_free_chain;
1702 p_free_chain = p_buf;
1703 p_buf->real=(__u32)__pa(p_buf);
1704 ++p_buf;
1706 p_buff+=PAGE_SIZE;
1709 * Initialize ClawSignalBlock
1712 if (privptr->p_claw_signal_blk==NULL) {
1713 privptr->p_claw_signal_blk=p_free_chain;
1714 p_free_chain=p_free_chain->next;
1715 pClawH=(struct clawh *)privptr->p_claw_signal_blk;
1716 pClawH->length=0xffff;
1717 pClawH->opcode=0xff;
1718 pClawH->flag=CLAW_BUSY;
1722 * allocate write_pages_required and add to free chain
1724 if (privptr->p_buff_write==NULL) {
1725 if (privptr->p_env->write_size < PAGE_SIZE) {
1726 privptr->p_buff_write=
1727 (void *)__get_free_pages(__GFP_DMA,
1728 (int)pages_to_order_of_mag(claw_write_pages ));
1729 if (privptr->p_buff_write==NULL) {
1730 privptr->p_buff_ccw=NULL;
1731 return -ENOMEM;
1734 * Build CLAW write free chain
1738 memset(privptr->p_buff_write, 0x00,
1739 ccw_pages_required * PAGE_SIZE);
1740 privptr->p_write_free_chain=NULL;
1742 p_buff=privptr->p_buff_write;
1744 for (i=0 ; i< privptr->p_env->write_buffers ; i++) {
1745 p_buf = p_free_chain; /* get a CCW */
1746 p_free_chain = p_buf->next;
1747 p_buf->next =privptr->p_write_free_chain;
1748 privptr->p_write_free_chain = p_buf;
1749 p_buf-> p_buffer = (struct clawbuf *)p_buff;
1750 p_buf-> write.cda = (__u32)__pa(p_buff);
1751 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1752 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1753 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1754 p_buf-> w_read_FF.count = 1;
1755 p_buf-> w_read_FF.cda =
1756 (__u32)__pa(&p_buf-> header.flag);
1757 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1758 p_buf-> w_TIC_1.flags = 0;
1759 p_buf-> w_TIC_1.count = 0;
1761 if (((unsigned long)p_buff +
1762 privptr->p_env->write_size) >=
1763 ((unsigned long)(p_buff+2*
1764 (privptr->p_env->write_size) - 1) & PAGE_MASK)) {
1765 p_buff = p_buff+privptr->p_env->write_size;
1769 else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */
1771 privptr->p_write_free_chain=NULL;
1772 for (i = 0; i< privptr->p_env->write_buffers ; i++) {
1773 p_buff=(void *)__get_free_pages(__GFP_DMA,
1774 (int)pages_to_order_of_mag(
1775 privptr->p_buff_pages_perwrite) );
1776 if (p_buff==NULL) {
1777 free_pages((unsigned long)privptr->p_buff_ccw,
1778 (int)pages_to_order_of_mag(
1779 privptr->p_buff_ccw_num));
1780 privptr->p_buff_ccw=NULL;
1781 p_buf=privptr->p_buff_write;
1782 while (p_buf!=NULL) {
1783 free_pages((unsigned long)
1784 p_buf->p_buffer,
1785 (int)pages_to_order_of_mag(
1786 privptr->p_buff_pages_perwrite));
1787 p_buf=p_buf->next;
1789 return -ENOMEM;
1790 } /* Error on get_pages */
1791 memset(p_buff, 0x00, privptr->p_env->write_size );
1792 p_buf = p_free_chain;
1793 p_free_chain = p_buf->next;
1794 p_buf->next = privptr->p_write_free_chain;
1795 privptr->p_write_free_chain = p_buf;
1796 privptr->p_buff_write = p_buf;
1797 p_buf->p_buffer=(struct clawbuf *)p_buff;
1798 p_buf-> write.cda = (__u32)__pa(p_buff);
1799 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1800 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1801 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1802 p_buf-> w_read_FF.count = 1;
1803 p_buf-> w_read_FF.cda =
1804 (__u32)__pa(&p_buf-> header.flag);
1805 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1806 p_buf-> w_TIC_1.flags = 0;
1807 p_buf-> w_TIC_1.count = 0;
1808 } /* for all write_buffers */
1810 } /* else buffers are PAGE_SIZE or bigger */
1813 privptr->p_buff_write_num=claw_write_pages;
1814 privptr->write_free_count=privptr->p_env->write_buffers;
1818 * allocate read_pages_required and chain to free chain
1820 if (privptr->p_buff_read==NULL) {
1821 if (privptr->p_env->read_size < PAGE_SIZE) {
1822 privptr->p_buff_read=
1823 (void *)__get_free_pages(__GFP_DMA,
1824 (int)pages_to_order_of_mag(claw_read_pages) );
1825 if (privptr->p_buff_read==NULL) {
1826 free_pages((unsigned long)privptr->p_buff_ccw,
1827 (int)pages_to_order_of_mag(
1828 privptr->p_buff_ccw_num));
1829 /* free the write pages size is < page size */
1830 free_pages((unsigned long)privptr->p_buff_write,
1831 (int)pages_to_order_of_mag(
1832 privptr->p_buff_write_num));
1833 privptr->p_buff_ccw=NULL;
1834 privptr->p_buff_write=NULL;
1835 return -ENOMEM;
1837 memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE);
1838 privptr->p_buff_read_num=claw_read_pages;
1840 * Build CLAW read free chain
1843 p_buff=privptr->p_buff_read;
1844 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1845 p_buf = p_free_chain;
1846 p_free_chain = p_buf->next;
1848 if (p_last_CCWB==NULL) {
1849 p_buf->next=NULL;
1850 real_TIC_address=0;
1851 p_last_CCWB=p_buf;
1853 else {
1854 p_buf->next=p_first_CCWB;
1855 real_TIC_address=
1856 (__u32)__pa(&p_first_CCWB -> read );
1859 p_first_CCWB=p_buf;
1861 p_buf->p_buffer=(struct clawbuf *)p_buff;
1862 /* initialize read command */
1863 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1864 p_buf-> read.cda = (__u32)__pa(p_buff);
1865 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1866 p_buf-> read.count = privptr->p_env->read_size;
1868 /* initialize read_h command */
1869 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1870 p_buf-> read_h.cda =
1871 (__u32)__pa(&(p_buf->header));
1872 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1873 p_buf-> read_h.count = sizeof(struct clawh);
1875 /* initialize Signal command */
1876 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1877 p_buf-> signal.cda =
1878 (__u32)__pa(&(pClawH->flag));
1879 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1880 p_buf-> signal.count = 1;
1882 /* initialize r_TIC_1 command */
1883 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1884 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1885 p_buf-> r_TIC_1.flags = 0;
1886 p_buf-> r_TIC_1.count = 0;
1888 /* initialize r_read_FF command */
1889 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1890 p_buf-> r_read_FF.cda =
1891 (__u32)__pa(&(pClawH->flag));
1892 p_buf-> r_read_FF.flags =
1893 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
1894 p_buf-> r_read_FF.count = 1;
1896 /* initialize r_TIC_2 */
1897 memcpy(&p_buf->r_TIC_2,
1898 &p_buf->r_TIC_1, sizeof(struct ccw1));
1900 /* initialize Header */
1901 p_buf->header.length=0xffff;
1902 p_buf->header.opcode=0xff;
1903 p_buf->header.flag=CLAW_PENDING;
1905 if (((unsigned long)p_buff+privptr->p_env->read_size) >=
1906 ((unsigned long)(p_buff+2*(privptr->p_env->read_size)
1908 & PAGE_MASK)) {
1909 p_buff= p_buff+privptr->p_env->read_size;
1911 else {
1912 p_buff=
1913 (void *)((unsigned long)
1914 (p_buff+2*(privptr->p_env->read_size)-1)
1915 & PAGE_MASK) ;
1917 } /* for read_buffers */
1918 } /* read_size < PAGE_SIZE */
1919 else { /* read Size >= PAGE_SIZE */
1920 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1921 p_buff = (void *)__get_free_pages(__GFP_DMA,
1922 (int)pages_to_order_of_mag(
1923 privptr->p_buff_pages_perread));
1924 if (p_buff==NULL) {
1925 free_pages((unsigned long)privptr->p_buff_ccw,
1926 (int)pages_to_order_of_mag(privptr->
1927 p_buff_ccw_num));
1928 /* free the write pages */
1929 p_buf=privptr->p_buff_write;
1930 while (p_buf!=NULL) {
1931 free_pages(
1932 (unsigned long)p_buf->p_buffer,
1933 (int)pages_to_order_of_mag(
1934 privptr->p_buff_pages_perwrite));
1935 p_buf=p_buf->next;
1937 /* free any read pages already alloc */
1938 p_buf=privptr->p_buff_read;
1939 while (p_buf!=NULL) {
1940 free_pages(
1941 (unsigned long)p_buf->p_buffer,
1942 (int)pages_to_order_of_mag(
1943 privptr->p_buff_pages_perread));
1944 p_buf=p_buf->next;
1946 privptr->p_buff_ccw=NULL;
1947 privptr->p_buff_write=NULL;
1948 return -ENOMEM;
1950 memset(p_buff, 0x00, privptr->p_env->read_size);
1951 p_buf = p_free_chain;
1952 privptr->p_buff_read = p_buf;
1953 p_free_chain = p_buf->next;
1955 if (p_last_CCWB==NULL) {
1956 p_buf->next=NULL;
1957 real_TIC_address=0;
1958 p_last_CCWB=p_buf;
1960 else {
1961 p_buf->next=p_first_CCWB;
1962 real_TIC_address=
1963 (addr_t)__pa(
1964 &p_first_CCWB -> read );
1967 p_first_CCWB=p_buf;
1968 /* save buff address */
1969 p_buf->p_buffer=(struct clawbuf *)p_buff;
1970 /* initialize read command */
1971 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1972 p_buf-> read.cda = (__u32)__pa(p_buff);
1973 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1974 p_buf-> read.count = privptr->p_env->read_size;
1976 /* initialize read_h command */
1977 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1978 p_buf-> read_h.cda =
1979 (__u32)__pa(&(p_buf->header));
1980 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1981 p_buf-> read_h.count = sizeof(struct clawh);
1983 /* initialize Signal command */
1984 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1985 p_buf-> signal.cda =
1986 (__u32)__pa(&(pClawH->flag));
1987 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1988 p_buf-> signal.count = 1;
1990 /* initialize r_TIC_1 command */
1991 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1992 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1993 p_buf-> r_TIC_1.flags = 0;
1994 p_buf-> r_TIC_1.count = 0;
1996 /* initialize r_read_FF command */
1997 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1998 p_buf-> r_read_FF.cda =
1999 (__u32)__pa(&(pClawH->flag));
2000 p_buf-> r_read_FF.flags =
2001 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
2002 p_buf-> r_read_FF.count = 1;
2004 /* initialize r_TIC_2 */
2005 memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1,
2006 sizeof(struct ccw1));
2008 /* initialize Header */
2009 p_buf->header.length=0xffff;
2010 p_buf->header.opcode=0xff;
2011 p_buf->header.flag=CLAW_PENDING;
2013 } /* For read_buffers */
2014 } /* read_size >= PAGE_SIZE */
2015 } /* pBuffread = NULL */
2016 add_claw_reads( dev ,p_first_CCWB , p_last_CCWB);
2017 privptr->buffs_alloc = 1;
2019 return 0;
2020 } /* end of init_ccw_bk */
2022 /*-------------------------------------------------------------------*
2024 * probe_error *
2026 *--------------------------------------------------------------------*/
2028 static void
2029 probe_error( struct ccwgroup_device *cgdev)
2031 struct claw_privbk *privptr;
2033 CLAW_DBF_TEXT(4, trace, "proberr");
2034 privptr = dev_get_drvdata(&cgdev->dev);
2035 if (privptr != NULL) {
2036 dev_set_drvdata(&cgdev->dev, NULL);
2037 kfree(privptr->p_env);
2038 kfree(privptr->p_mtc_envelope);
2039 kfree(privptr);
2041 } /* probe_error */
2043 /*-------------------------------------------------------------------*
2044 * claw_process_control *
2047 *--------------------------------------------------------------------*/
2049 static int
2050 claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2053 struct clawbuf *p_buf;
2054 struct clawctl ctlbk;
2055 struct clawctl *p_ctlbk;
2056 char temp_host_name[8];
2057 char temp_ws_name[8];
2058 struct claw_privbk *privptr;
2059 struct claw_env *p_env;
2060 struct sysval *p_sysval;
2061 struct conncmd *p_connect=NULL;
2062 int rc;
2063 struct chbk *p_ch = NULL;
2064 struct device *tdev;
2065 CLAW_DBF_TEXT(2, setup, "clw_cntl");
2066 udelay(1000); /* Wait a ms for the control packets to
2067 *catch up to each other */
2068 privptr = dev->ml_priv;
2069 p_env=privptr->p_env;
2070 tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
2071 memcpy( &temp_host_name, p_env->host_name, 8);
2072 memcpy( &temp_ws_name, p_env->adapter_name , 8);
2073 dev_info(tdev, "%s: CLAW device %.8s: "
2074 "Received Control Packet\n",
2075 dev->name, temp_ws_name);
2076 if (privptr->release_pend==1) {
2077 return 0;
2079 p_buf=p_ccw->p_buffer;
2080 p_ctlbk=&ctlbk;
2081 if (p_env->packing == DO_PACKED) { /* packing in progress?*/
2082 memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl));
2083 } else {
2084 memcpy(p_ctlbk, p_buf, sizeof(struct clawctl));
2086 switch (p_ctlbk->command)
2088 case SYSTEM_VALIDATE_REQUEST:
2089 if (p_ctlbk->version != CLAW_VERSION_ID) {
2090 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2091 CLAW_RC_WRONG_VERSION);
2092 dev_warn(tdev, "The communication peer of %s"
2093 " uses an incorrect API version %d\n",
2094 dev->name, p_ctlbk->version);
2096 p_sysval = (struct sysval *)&(p_ctlbk->data);
2097 dev_info(tdev, "%s: Recv Sys Validate Request: "
2098 "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s,"
2099 "Host name=%.8s\n",
2100 dev->name, p_ctlbk->version,
2101 p_ctlbk->linkid,
2102 p_ctlbk->correlator,
2103 p_sysval->WS_name,
2104 p_sysval->host_name);
2105 if (memcmp(temp_host_name, p_sysval->host_name, 8)) {
2106 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2107 CLAW_RC_NAME_MISMATCH);
2108 CLAW_DBF_TEXT(2, setup, "HSTBAD");
2109 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name);
2110 CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name);
2111 dev_warn(tdev,
2112 "Host name %s for %s does not match the"
2113 " remote adapter name %s\n",
2114 p_sysval->host_name,
2115 dev->name,
2116 temp_host_name);
2118 if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) {
2119 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2120 CLAW_RC_NAME_MISMATCH);
2121 CLAW_DBF_TEXT(2, setup, "WSNBAD");
2122 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name);
2123 CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name);
2124 dev_warn(tdev, "Adapter name %s for %s does not match"
2125 " the remote host name %s\n",
2126 p_sysval->WS_name,
2127 dev->name,
2128 temp_ws_name);
2130 if ((p_sysval->write_frame_size < p_env->write_size) &&
2131 (p_env->packing == 0)) {
2132 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2133 CLAW_RC_HOST_RCV_TOO_SMALL);
2134 dev_warn(tdev,
2135 "The local write buffer is smaller than the"
2136 " remote read buffer\n");
2137 CLAW_DBF_TEXT(2, setup, "wrtszbad");
2139 if ((p_sysval->read_frame_size < p_env->read_size) &&
2140 (p_env->packing == 0)) {
2141 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2142 CLAW_RC_HOST_RCV_TOO_SMALL);
2143 dev_warn(tdev,
2144 "The local read buffer is smaller than the"
2145 " remote write buffer\n");
2146 CLAW_DBF_TEXT(2, setup, "rdsizbad");
2148 claw_snd_sys_validate_rsp(dev, p_ctlbk, 0);
2149 dev_info(tdev,
2150 "CLAW device %.8s: System validate"
2151 " completed.\n", temp_ws_name);
2152 dev_info(tdev,
2153 "%s: sys Validate Rsize:%d Wsize:%d\n",
2154 dev->name, p_sysval->read_frame_size,
2155 p_sysval->write_frame_size);
2156 privptr->system_validate_comp = 1;
2157 if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0)
2158 p_env->packing = PACKING_ASK;
2159 claw_strt_conn_req(dev);
2160 break;
2161 case SYSTEM_VALIDATE_RESPONSE:
2162 p_sysval = (struct sysval *)&(p_ctlbk->data);
2163 dev_info(tdev,
2164 "Settings for %s validated (version=%d, "
2165 "remote device=%d, rc=%d, adapter name=%.8s, "
2166 "host name=%.8s)\n",
2167 dev->name,
2168 p_ctlbk->version,
2169 p_ctlbk->correlator,
2170 p_ctlbk->rc,
2171 p_sysval->WS_name,
2172 p_sysval->host_name);
2173 switch (p_ctlbk->rc) {
2174 case 0:
2175 dev_info(tdev, "%s: CLAW device "
2176 "%.8s: System validate completed.\n",
2177 dev->name, temp_ws_name);
2178 if (privptr->system_validate_comp == 0)
2179 claw_strt_conn_req(dev);
2180 privptr->system_validate_comp = 1;
2181 break;
2182 case CLAW_RC_NAME_MISMATCH:
2183 dev_warn(tdev, "Validating %s failed because of"
2184 " a host or adapter name mismatch\n",
2185 dev->name);
2186 break;
2187 case CLAW_RC_WRONG_VERSION:
2188 dev_warn(tdev, "Validating %s failed because of a"
2189 " version conflict\n",
2190 dev->name);
2191 break;
2192 case CLAW_RC_HOST_RCV_TOO_SMALL:
2193 dev_warn(tdev, "Validating %s failed because of a"
2194 " frame size conflict\n",
2195 dev->name);
2196 break;
2197 default:
2198 dev_warn(tdev, "The communication peer of %s rejected"
2199 " the connection\n",
2200 dev->name);
2201 break;
2203 break;
2205 case CONNECTION_REQUEST:
2206 p_connect = (struct conncmd *)&(p_ctlbk->data);
2207 dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d,"
2208 "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
2209 dev->name,
2210 p_ctlbk->version,
2211 p_ctlbk->linkid,
2212 p_ctlbk->correlator,
2213 p_connect->host_name,
2214 p_connect->WS_name);
2215 if (privptr->active_link_ID != 0) {
2216 claw_snd_disc(dev, p_ctlbk);
2217 dev_info(tdev, "%s rejected a connection request"
2218 " because it is already active\n",
2219 dev->name);
2221 if (p_ctlbk->linkid != 1) {
2222 claw_snd_disc(dev, p_ctlbk);
2223 dev_info(tdev, "%s rejected a request to open multiple"
2224 " connections\n",
2225 dev->name);
2227 rc = find_link(dev, p_connect->host_name, p_connect->WS_name);
2228 if (rc != 0) {
2229 claw_snd_disc(dev, p_ctlbk);
2230 dev_info(tdev, "%s rejected a connection request"
2231 " because of a type mismatch\n",
2232 dev->name);
2234 claw_send_control(dev,
2235 CONNECTION_CONFIRM, p_ctlbk->linkid,
2236 p_ctlbk->correlator,
2237 0, p_connect->host_name,
2238 p_connect->WS_name);
2239 if (p_env->packing == PACKING_ASK) {
2240 p_env->packing = PACK_SEND;
2241 claw_snd_conn_req(dev, 0);
2243 dev_info(tdev, "%s: CLAW device %.8s: Connection "
2244 "completed link_id=%d.\n",
2245 dev->name, temp_ws_name,
2246 p_ctlbk->linkid);
2247 privptr->active_link_ID = p_ctlbk->linkid;
2248 p_ch = &privptr->channel[WRITE_CHANNEL];
2249 wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
2250 break;
2251 case CONNECTION_RESPONSE:
2252 p_connect = (struct conncmd *)&(p_ctlbk->data);
2253 dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d,"
2254 "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
2255 dev->name,
2256 p_ctlbk->version,
2257 p_ctlbk->linkid,
2258 p_ctlbk->correlator,
2259 p_ctlbk->rc,
2260 p_connect->host_name,
2261 p_connect->WS_name);
2263 if (p_ctlbk->rc != 0) {
2264 dev_warn(tdev, "The communication peer of %s rejected"
2265 " a connection request\n",
2266 dev->name);
2267 return 1;
2269 rc = find_link(dev,
2270 p_connect->host_name, p_connect->WS_name);
2271 if (rc != 0) {
2272 claw_snd_disc(dev, p_ctlbk);
2273 dev_warn(tdev, "The communication peer of %s"
2274 " rejected a connection "
2275 "request because of a type mismatch\n",
2276 dev->name);
2278 /* should be until CONNECTION_CONFIRM */
2279 privptr->active_link_ID = -(p_ctlbk->linkid);
2280 break;
2281 case CONNECTION_CONFIRM:
2282 p_connect = (struct conncmd *)&(p_ctlbk->data);
2283 dev_info(tdev,
2284 "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
2285 "Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
2286 dev->name,
2287 p_ctlbk->version,
2288 p_ctlbk->linkid,
2289 p_ctlbk->correlator,
2290 p_connect->host_name,
2291 p_connect->WS_name);
2292 if (p_ctlbk->linkid == -(privptr->active_link_ID)) {
2293 privptr->active_link_ID = p_ctlbk->linkid;
2294 if (p_env->packing > PACKING_ASK) {
2295 dev_info(tdev,
2296 "%s: Confirmed Now packing\n", dev->name);
2297 p_env->packing = DO_PACKED;
2299 p_ch = &privptr->channel[WRITE_CHANNEL];
2300 wake_up(&p_ch->wait);
2301 } else {
2302 dev_warn(tdev, "Activating %s failed because of"
2303 " an incorrect link ID=%d\n",
2304 dev->name, p_ctlbk->linkid);
2305 claw_snd_disc(dev, p_ctlbk);
2307 break;
2308 case DISCONNECT:
2309 dev_info(tdev, "%s: Disconnect: "
2310 "Vers=%d,link_id=%d,Corr=%d\n",
2311 dev->name, p_ctlbk->version,
2312 p_ctlbk->linkid, p_ctlbk->correlator);
2313 if ((p_ctlbk->linkid == 2) &&
2314 (p_env->packing == PACK_SEND)) {
2315 privptr->active_link_ID = 1;
2316 p_env->packing = DO_PACKED;
2317 } else
2318 privptr->active_link_ID = 0;
2319 break;
2320 case CLAW_ERROR:
2321 dev_warn(tdev, "The communication peer of %s failed\n",
2322 dev->name);
2323 break;
2324 default:
2325 dev_warn(tdev, "The communication peer of %s sent"
2326 " an unknown command code\n",
2327 dev->name);
2328 break;
2331 return 0;
2332 } /* end of claw_process_control */
2335 /*-------------------------------------------------------------------*
2336 * claw_send_control *
2338 *--------------------------------------------------------------------*/
2340 static int
2341 claw_send_control(struct net_device *dev, __u8 type, __u8 link,
2342 __u8 correlator, __u8 rc, char *local_name, char *remote_name)
2344 struct claw_privbk *privptr;
2345 struct clawctl *p_ctl;
2346 struct sysval *p_sysval;
2347 struct conncmd *p_connect;
2348 struct sk_buff *skb;
2350 CLAW_DBF_TEXT(2, setup, "sndcntl");
2351 privptr = dev->ml_priv;
2352 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2354 p_ctl->command=type;
2355 p_ctl->version=CLAW_VERSION_ID;
2356 p_ctl->linkid=link;
2357 p_ctl->correlator=correlator;
2358 p_ctl->rc=rc;
2360 p_sysval=(struct sysval *)&p_ctl->data;
2361 p_connect=(struct conncmd *)&p_ctl->data;
2363 switch (p_ctl->command) {
2364 case SYSTEM_VALIDATE_REQUEST:
2365 case SYSTEM_VALIDATE_RESPONSE:
2366 memcpy(&p_sysval->host_name, local_name, 8);
2367 memcpy(&p_sysval->WS_name, remote_name, 8);
2368 if (privptr->p_env->packing > 0) {
2369 p_sysval->read_frame_size = DEF_PACK_BUFSIZE;
2370 p_sysval->write_frame_size = DEF_PACK_BUFSIZE;
2371 } else {
2372 /* how big is the biggest group of packets */
2373 p_sysval->read_frame_size =
2374 privptr->p_env->read_size;
2375 p_sysval->write_frame_size =
2376 privptr->p_env->write_size;
2378 memset(&p_sysval->reserved, 0x00, 4);
2379 break;
2380 case CONNECTION_REQUEST:
2381 case CONNECTION_RESPONSE:
2382 case CONNECTION_CONFIRM:
2383 case DISCONNECT:
2384 memcpy(&p_sysval->host_name, local_name, 8);
2385 memcpy(&p_sysval->WS_name, remote_name, 8);
2386 if (privptr->p_env->packing > 0) {
2387 /* How big is the biggest packet */
2388 p_connect->reserved1[0]=CLAW_FRAME_SIZE;
2389 p_connect->reserved1[1]=CLAW_FRAME_SIZE;
2390 } else {
2391 memset(&p_connect->reserved1, 0x00, 4);
2392 memset(&p_connect->reserved2, 0x00, 4);
2394 break;
2395 default:
2396 break;
2399 /* write Control Record to the device */
2402 skb = dev_alloc_skb(sizeof(struct clawctl));
2403 if (!skb) {
2404 return -ENOMEM;
2406 memcpy(skb_put(skb, sizeof(struct clawctl)),
2407 p_ctl, sizeof(struct clawctl));
2408 if (privptr->p_env->packing >= PACK_SEND)
2409 claw_hw_tx(skb, dev, 1);
2410 else
2411 claw_hw_tx(skb, dev, 0);
2412 return 0;
2413 } /* end of claw_send_control */
2415 /*-------------------------------------------------------------------*
2416 * claw_snd_conn_req *
2418 *--------------------------------------------------------------------*/
2419 static int
2420 claw_snd_conn_req(struct net_device *dev, __u8 link)
2422 int rc;
2423 struct claw_privbk *privptr = dev->ml_priv;
2424 struct clawctl *p_ctl;
2426 CLAW_DBF_TEXT(2, setup, "snd_conn");
2427 rc = 1;
2428 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2429 p_ctl->linkid = link;
2430 if ( privptr->system_validate_comp==0x00 ) {
2431 return rc;
2433 if (privptr->p_env->packing == PACKING_ASK )
2434 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2435 WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED);
2436 if (privptr->p_env->packing == PACK_SEND) {
2437 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2438 WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME);
2440 if (privptr->p_env->packing == 0)
2441 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2442 HOST_APPL_NAME, privptr->p_env->api_type);
2443 return rc;
2445 } /* end of claw_snd_conn_req */
2448 /*-------------------------------------------------------------------*
2449 * claw_snd_disc *
2451 *--------------------------------------------------------------------*/
2453 static int
2454 claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
2456 int rc;
2457 struct conncmd * p_connect;
2459 CLAW_DBF_TEXT(2, setup, "snd_dsc");
2460 p_connect=(struct conncmd *)&p_ctl->data;
2462 rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid,
2463 p_ctl->correlator, 0,
2464 p_connect->host_name, p_connect->WS_name);
2465 return rc;
2466 } /* end of claw_snd_disc */
2469 /*-------------------------------------------------------------------*
2470 * claw_snd_sys_validate_rsp *
2472 *--------------------------------------------------------------------*/
2474 static int
2475 claw_snd_sys_validate_rsp(struct net_device *dev,
2476 struct clawctl *p_ctl, __u32 return_code)
2478 struct claw_env * p_env;
2479 struct claw_privbk *privptr;
2480 int rc;
2482 CLAW_DBF_TEXT(2, setup, "chkresp");
2483 privptr = dev->ml_priv;
2484 p_env=privptr->p_env;
2485 rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
2486 p_ctl->linkid,
2487 p_ctl->correlator,
2488 return_code,
2489 p_env->host_name,
2490 p_env->adapter_name );
2491 return rc;
2492 } /* end of claw_snd_sys_validate_rsp */
2494 /*-------------------------------------------------------------------*
2495 * claw_strt_conn_req *
2497 *--------------------------------------------------------------------*/
2499 static int
2500 claw_strt_conn_req(struct net_device *dev )
2502 int rc;
2504 CLAW_DBF_TEXT(2, setup, "conn_req");
2505 rc=claw_snd_conn_req(dev, 1);
2506 return rc;
2507 } /* end of claw_strt_conn_req */
2511 /*-------------------------------------------------------------------*
2512 * claw_stats *
2513 *-------------------------------------------------------------------*/
2515 static struct
2516 net_device_stats *claw_stats(struct net_device *dev)
2518 struct claw_privbk *privptr;
2520 CLAW_DBF_TEXT(4, trace, "stats");
2521 privptr = dev->ml_priv;
2522 return &privptr->stats;
2523 } /* end of claw_stats */
2526 /*-------------------------------------------------------------------*
2527 * unpack_read *
2529 *--------------------------------------------------------------------*/
2530 static void
2531 unpack_read(struct net_device *dev )
2533 struct sk_buff *skb;
2534 struct claw_privbk *privptr;
2535 struct claw_env *p_env;
2536 struct ccwbk *p_this_ccw;
2537 struct ccwbk *p_first_ccw;
2538 struct ccwbk *p_last_ccw;
2539 struct clawph *p_packh;
2540 void *p_packd;
2541 struct clawctl *p_ctlrec=NULL;
2542 struct device *p_dev;
2544 __u32 len_of_data;
2545 __u32 pack_off;
2546 __u8 link_num;
2547 __u8 mtc_this_frm=0;
2548 __u32 bytes_to_mov;
2549 int i=0;
2550 int p=0;
2552 CLAW_DBF_TEXT(4, trace, "unpkread");
2553 p_first_ccw=NULL;
2554 p_last_ccw=NULL;
2555 p_packh=NULL;
2556 p_packd=NULL;
2557 privptr = dev->ml_priv;
2559 p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
2560 p_env = privptr->p_env;
2561 p_this_ccw=privptr->p_read_active_first;
2562 while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
2563 pack_off = 0;
2564 p = 0;
2565 p_this_ccw->header.flag=CLAW_PENDING;
2566 privptr->p_read_active_first=p_this_ccw->next;
2567 p_this_ccw->next=NULL;
2568 p_packh = (struct clawph *)p_this_ccw->p_buffer;
2569 if ((p_env->packing == PACK_SEND) &&
2570 (p_packh->len == 32) &&
2571 (p_packh->link_num == 0)) { /* is it a packed ctl rec? */
2572 p_packh++; /* peek past pack header */
2573 p_ctlrec = (struct clawctl *)p_packh;
2574 p_packh--; /* un peek */
2575 if ((p_ctlrec->command == CONNECTION_RESPONSE) ||
2576 (p_ctlrec->command == CONNECTION_CONFIRM))
2577 p_env->packing = DO_PACKED;
2579 if (p_env->packing == DO_PACKED)
2580 link_num=p_packh->link_num;
2581 else
2582 link_num=p_this_ccw->header.opcode / 8;
2583 if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
2584 mtc_this_frm=1;
2585 if (p_this_ccw->header.length!=
2586 privptr->p_env->read_size ) {
2587 dev_warn(p_dev,
2588 "The communication peer of %s"
2589 " sent a faulty"
2590 " frame of length %02x\n",
2591 dev->name, p_this_ccw->header.length);
2595 if (privptr->mtc_skipping) {
2597 * We're in the mode of skipping past a
2598 * multi-frame message
2599 * that we can't process for some reason or other.
2600 * The first frame without the More-To-Come flag is
2601 * the last frame of the skipped message.
2603 /* in case of More-To-Come not set in this frame */
2604 if (mtc_this_frm==0) {
2605 privptr->mtc_skipping=0; /* Ok, the end */
2606 privptr->mtc_logical_link=-1;
2608 goto NextFrame;
2611 if (link_num==0) {
2612 claw_process_control(dev, p_this_ccw);
2613 CLAW_DBF_TEXT(4, trace, "UnpkCntl");
2614 goto NextFrame;
2616 unpack_next:
2617 if (p_env->packing == DO_PACKED) {
2618 if (pack_off > p_env->read_size)
2619 goto NextFrame;
2620 p_packd = p_this_ccw->p_buffer+pack_off;
2621 p_packh = (struct clawph *) p_packd;
2622 if ((p_packh->len == 0) || /* done with this frame? */
2623 (p_packh->flag != 0))
2624 goto NextFrame;
2625 bytes_to_mov = p_packh->len;
2626 pack_off += bytes_to_mov+sizeof(struct clawph);
2627 p++;
2628 } else {
2629 bytes_to_mov=p_this_ccw->header.length;
2631 if (privptr->mtc_logical_link<0) {
2634 * if More-To-Come is set in this frame then we don't know
2635 * length of entire message, and hence have to allocate
2636 * large buffer */
2638 /* We are starting a new envelope */
2639 privptr->mtc_offset=0;
2640 privptr->mtc_logical_link=link_num;
2643 if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) {
2644 /* error */
2645 privptr->stats.rx_frame_errors++;
2646 goto NextFrame;
2648 if (p_env->packing == DO_PACKED) {
2649 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2650 p_packd+sizeof(struct clawph), bytes_to_mov);
2652 } else {
2653 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2654 p_this_ccw->p_buffer, bytes_to_mov);
2656 if (mtc_this_frm==0) {
2657 len_of_data=privptr->mtc_offset+bytes_to_mov;
2658 skb=dev_alloc_skb(len_of_data);
2659 if (skb) {
2660 memcpy(skb_put(skb,len_of_data),
2661 privptr->p_mtc_envelope,
2662 len_of_data);
2663 skb->dev=dev;
2664 skb_reset_mac_header(skb);
2665 skb->protocol=htons(ETH_P_IP);
2666 skb->ip_summed=CHECKSUM_UNNECESSARY;
2667 privptr->stats.rx_packets++;
2668 privptr->stats.rx_bytes+=len_of_data;
2669 netif_rx(skb);
2671 else {
2672 dev_info(p_dev, "Allocating a buffer for"
2673 " incoming data failed\n");
2674 privptr->stats.rx_dropped++;
2676 privptr->mtc_offset=0;
2677 privptr->mtc_logical_link=-1;
2679 else {
2680 privptr->mtc_offset+=bytes_to_mov;
2682 if (p_env->packing == DO_PACKED)
2683 goto unpack_next;
2684 NextFrame:
2686 * Remove ThisCCWblock from active read queue, and add it
2687 * to queue of free blocks to be reused.
2689 i++;
2690 p_this_ccw->header.length=0xffff;
2691 p_this_ccw->header.opcode=0xff;
2693 * add this one to the free queue for later reuse
2695 if (p_first_ccw==NULL) {
2696 p_first_ccw = p_this_ccw;
2698 else {
2699 p_last_ccw->next = p_this_ccw;
2701 p_last_ccw = p_this_ccw;
2703 * chain to next block on active read queue
2705 p_this_ccw = privptr->p_read_active_first;
2706 CLAW_DBF_TEXT_(4, trace, "rxpkt %d", p);
2707 } /* end of while */
2709 /* check validity */
2711 CLAW_DBF_TEXT_(4, trace, "rxfrm %d", i);
2712 add_claw_reads(dev, p_first_ccw, p_last_ccw);
2713 claw_strt_read(dev, LOCK_YES);
2714 return;
2715 } /* end of unpack_read */
2717 /*-------------------------------------------------------------------*
2718 * claw_strt_read *
2720 *--------------------------------------------------------------------*/
2721 static void
2722 claw_strt_read (struct net_device *dev, int lock )
2724 int rc = 0;
2725 __u32 parm;
2726 unsigned long saveflags = 0;
2727 struct claw_privbk *privptr = dev->ml_priv;
2728 struct ccwbk*p_ccwbk;
2729 struct chbk *p_ch;
2730 struct clawh *p_clawh;
2731 p_ch = &privptr->channel[READ_CHANNEL];
2733 CLAW_DBF_TEXT(4, trace, "StRdNter");
2734 p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
2735 p_clawh->flag=CLAW_IDLE; /* 0x00 */
2737 if ((privptr->p_write_active_first!=NULL &&
2738 privptr->p_write_active_first->header.flag!=CLAW_PENDING) ||
2739 (privptr->p_read_active_first!=NULL &&
2740 privptr->p_read_active_first->header.flag!=CLAW_PENDING )) {
2741 p_clawh->flag=CLAW_BUSY; /* 0xff */
2743 if (lock==LOCK_YES) {
2744 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
2746 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2747 CLAW_DBF_TEXT(4, trace, "HotRead");
2748 p_ccwbk=privptr->p_read_active_first;
2749 parm = (unsigned long) p_ch;
2750 rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm,
2751 0xff, 0);
2752 if (rc != 0) {
2753 ccw_check_return_code(p_ch->cdev, rc);
2756 else {
2757 CLAW_DBF_TEXT(2, trace, "ReadAct");
2760 if (lock==LOCK_YES) {
2761 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
2763 CLAW_DBF_TEXT(4, trace, "StRdExit");
2764 return;
2765 } /* end of claw_strt_read */
2767 /*-------------------------------------------------------------------*
2768 * claw_strt_out_IO *
2770 *--------------------------------------------------------------------*/
2772 static void
2773 claw_strt_out_IO( struct net_device *dev )
2775 int rc = 0;
2776 unsigned long parm;
2777 struct claw_privbk *privptr;
2778 struct chbk *p_ch;
2779 struct ccwbk *p_first_ccw;
2781 if (!dev) {
2782 return;
2784 privptr = (struct claw_privbk *)dev->ml_priv;
2785 p_ch = &privptr->channel[WRITE_CHANNEL];
2787 CLAW_DBF_TEXT(4, trace, "strt_io");
2788 p_first_ccw=privptr->p_write_active_first;
2790 if (p_ch->claw_state == CLAW_STOP)
2791 return;
2792 if (p_first_ccw == NULL) {
2793 return;
2795 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2796 parm = (unsigned long) p_ch;
2797 CLAW_DBF_TEXT(2, trace, "StWrtIO");
2798 rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm,
2799 0xff, 0);
2800 if (rc != 0) {
2801 ccw_check_return_code(p_ch->cdev, rc);
2804 dev->trans_start = jiffies;
2805 return;
2806 } /* end of claw_strt_out_IO */
2808 /*-------------------------------------------------------------------*
2809 * Free write buffers *
2811 *--------------------------------------------------------------------*/
2813 static void
2814 claw_free_wrt_buf( struct net_device *dev )
2817 struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv;
2818 struct ccwbk*p_first_ccw;
2819 struct ccwbk*p_last_ccw;
2820 struct ccwbk*p_this_ccw;
2821 struct ccwbk*p_next_ccw;
2823 CLAW_DBF_TEXT(4, trace, "freewrtb");
2824 /* scan the write queue to free any completed write packets */
2825 p_first_ccw=NULL;
2826 p_last_ccw=NULL;
2827 p_this_ccw=privptr->p_write_active_first;
2828 while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
2830 p_next_ccw = p_this_ccw->next;
2831 if (((p_next_ccw!=NULL) &&
2832 (p_next_ccw->header.flag!=CLAW_PENDING)) ||
2833 ((p_this_ccw == privptr->p_write_active_last) &&
2834 (p_this_ccw->header.flag!=CLAW_PENDING))) {
2835 /* The next CCW is OK or this is */
2836 /* the last CCW...free it @A1A */
2837 privptr->p_write_active_first=p_this_ccw->next;
2838 p_this_ccw->header.flag=CLAW_PENDING;
2839 p_this_ccw->next=privptr->p_write_free_chain;
2840 privptr->p_write_free_chain=p_this_ccw;
2841 ++privptr->write_free_count;
2842 privptr->stats.tx_bytes+= p_this_ccw->write.count;
2843 p_this_ccw=privptr->p_write_active_first;
2844 privptr->stats.tx_packets++;
2846 else {
2847 break;
2850 if (privptr->write_free_count!=0) {
2851 claw_clearbit_busy(TB_NOBUFFER,dev);
2853 /* whole chain removed? */
2854 if (privptr->p_write_active_first==NULL) {
2855 privptr->p_write_active_last=NULL;
2857 CLAW_DBF_TEXT_(4, trace, "FWC=%d", privptr->write_free_count);
2858 return;
2861 /*-------------------------------------------------------------------*
2862 * claw free netdevice *
2864 *--------------------------------------------------------------------*/
2865 static void
2866 claw_free_netdevice(struct net_device * dev, int free_dev)
2868 struct claw_privbk *privptr;
2870 CLAW_DBF_TEXT(2, setup, "free_dev");
2871 if (!dev)
2872 return;
2873 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2874 privptr = dev->ml_priv;
2875 if (dev->flags & IFF_RUNNING)
2876 claw_release(dev);
2877 if (privptr) {
2878 privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */
2880 dev->ml_priv = NULL;
2881 #ifdef MODULE
2882 if (free_dev) {
2883 free_netdev(dev);
2885 #endif
2886 CLAW_DBF_TEXT(2, setup, "free_ok");
2890 * Claw init netdevice
2891 * Initialize everything of the net device except the name and the
2892 * channel structs.
2894 static const struct net_device_ops claw_netdev_ops = {
2895 .ndo_open = claw_open,
2896 .ndo_stop = claw_release,
2897 .ndo_get_stats = claw_stats,
2898 .ndo_start_xmit = claw_tx,
2899 .ndo_change_mtu = claw_change_mtu,
2902 static void
2903 claw_init_netdevice(struct net_device * dev)
2905 CLAW_DBF_TEXT(2, setup, "init_dev");
2906 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2907 dev->mtu = CLAW_DEFAULT_MTU_SIZE;
2908 dev->hard_header_len = 0;
2909 dev->addr_len = 0;
2910 dev->type = ARPHRD_SLIP;
2911 dev->tx_queue_len = 1300;
2912 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2913 dev->netdev_ops = &claw_netdev_ops;
2914 CLAW_DBF_TEXT(2, setup, "initok");
2915 return;
2919 * Init a new channel in the privptr->channel[i].
2921 * @param cdev The ccw_device to be added.
2923 * @return 0 on success, !0 on error.
2925 static int
2926 add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
2928 struct chbk *p_ch;
2929 struct ccw_dev_id dev_id;
2931 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev));
2932 privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
2933 p_ch = &privptr->channel[i];
2934 p_ch->cdev = cdev;
2935 snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev));
2936 ccw_device_get_id(cdev, &dev_id);
2937 p_ch->devno = dev_id.devno;
2938 if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
2939 return -ENOMEM;
2941 return 0;
2947 * Setup an interface.
2949 * @param cgdev Device to be setup.
2951 * @returns 0 on success, !0 on failure.
2953 static int
2954 claw_new_device(struct ccwgroup_device *cgdev)
2956 struct claw_privbk *privptr;
2957 struct claw_env *p_env;
2958 struct net_device *dev;
2959 int ret;
2960 struct ccw_dev_id dev_id;
2962 dev_info(&cgdev->dev, "add for %s\n",
2963 dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
2964 CLAW_DBF_TEXT(2, setup, "new_dev");
2965 privptr = dev_get_drvdata(&cgdev->dev);
2966 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
2967 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
2968 if (!privptr)
2969 return -ENODEV;
2970 p_env = privptr->p_env;
2971 ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
2972 p_env->devno[READ_CHANNEL] = dev_id.devno;
2973 ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
2974 p_env->devno[WRITE_CHANNEL] = dev_id.devno;
2975 ret = add_channel(cgdev->cdev[0],0,privptr);
2976 if (ret == 0)
2977 ret = add_channel(cgdev->cdev[1],1,privptr);
2978 if (ret != 0) {
2979 dev_warn(&cgdev->dev, "Creating a CLAW group device"
2980 " failed with error code %d\n", ret);
2981 goto out;
2983 ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
2984 if (ret != 0) {
2985 dev_warn(&cgdev->dev,
2986 "Setting the read subchannel online"
2987 " failed with error code %d\n", ret);
2988 goto out;
2990 ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
2991 if (ret != 0) {
2992 dev_warn(&cgdev->dev,
2993 "Setting the write subchannel online "
2994 "failed with error code %d\n", ret);
2995 goto out;
2997 dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
2998 if (!dev) {
2999 dev_warn(&cgdev->dev,
3000 "Activating the CLAW device failed\n");
3001 goto out;
3003 dev->ml_priv = privptr;
3004 dev_set_drvdata(&cgdev->dev, privptr);
3005 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
3006 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
3007 /* sysfs magic */
3008 SET_NETDEV_DEV(dev, &cgdev->dev);
3009 if (register_netdev(dev) != 0) {
3010 claw_free_netdevice(dev, 1);
3011 CLAW_DBF_TEXT(2, trace, "regfail");
3012 goto out;
3014 dev->flags &=~IFF_RUNNING;
3015 if (privptr->buffs_alloc == 0) {
3016 ret=init_ccw_bk(dev);
3017 if (ret !=0) {
3018 unregister_netdev(dev);
3019 claw_free_netdevice(dev,1);
3020 CLAW_DBF_TEXT(2, trace, "ccwmem");
3021 goto out;
3024 privptr->channel[READ_CHANNEL].ndev = dev;
3025 privptr->channel[WRITE_CHANNEL].ndev = dev;
3026 privptr->p_env->ndev = dev;
3028 dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d "
3029 "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
3030 dev->name, p_env->read_size,
3031 p_env->write_size, p_env->read_buffers,
3032 p_env->write_buffers, p_env->devno[READ_CHANNEL],
3033 p_env->devno[WRITE_CHANNEL]);
3034 dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
3035 ":%.8s api_type: %.8s\n",
3036 dev->name, p_env->host_name,
3037 p_env->adapter_name , p_env->api_type);
3038 return 0;
3039 out:
3040 ccw_device_set_offline(cgdev->cdev[1]);
3041 ccw_device_set_offline(cgdev->cdev[0]);
3042 return -ENODEV;
3045 static void
3046 claw_purge_skb_queue(struct sk_buff_head *q)
3048 struct sk_buff *skb;
3050 CLAW_DBF_TEXT(4, trace, "purgque");
3051 while ((skb = skb_dequeue(q))) {
3052 atomic_dec(&skb->users);
3053 dev_kfree_skb_any(skb);
3058 * Shutdown an interface.
3060 * @param cgdev Device to be shut down.
3062 * @returns 0 on success, !0 on failure.
3064 static int
3065 claw_shutdown_device(struct ccwgroup_device *cgdev)
3067 struct claw_privbk *priv;
3068 struct net_device *ndev;
3069 int ret;
3071 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3072 priv = dev_get_drvdata(&cgdev->dev);
3073 if (!priv)
3074 return -ENODEV;
3075 ndev = priv->channel[READ_CHANNEL].ndev;
3076 if (ndev) {
3077 /* Close the device */
3078 dev_info(&cgdev->dev, "%s: shutting down\n",
3079 ndev->name);
3080 if (ndev->flags & IFF_RUNNING)
3081 ret = claw_release(ndev);
3082 ndev->flags &=~IFF_RUNNING;
3083 unregister_netdev(ndev);
3084 ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
3085 claw_free_netdevice(ndev, 1);
3086 priv->channel[READ_CHANNEL].ndev = NULL;
3087 priv->channel[WRITE_CHANNEL].ndev = NULL;
3088 priv->p_env->ndev = NULL;
3090 ccw_device_set_offline(cgdev->cdev[1]);
3091 ccw_device_set_offline(cgdev->cdev[0]);
3092 return 0;
3095 static void
3096 claw_remove_device(struct ccwgroup_device *cgdev)
3098 struct claw_privbk *priv;
3100 BUG_ON(!cgdev);
3101 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3102 priv = dev_get_drvdata(&cgdev->dev);
3103 BUG_ON(!priv);
3104 dev_info(&cgdev->dev, " will be removed.\n");
3105 if (cgdev->state == CCWGROUP_ONLINE)
3106 claw_shutdown_device(cgdev);
3107 claw_remove_files(&cgdev->dev);
3108 kfree(priv->p_mtc_envelope);
3109 priv->p_mtc_envelope=NULL;
3110 kfree(priv->p_env);
3111 priv->p_env=NULL;
3112 kfree(priv->channel[0].irb);
3113 priv->channel[0].irb=NULL;
3114 kfree(priv->channel[1].irb);
3115 priv->channel[1].irb=NULL;
3116 kfree(priv);
3117 dev_set_drvdata(&cgdev->dev, NULL);
3118 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
3119 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
3120 put_device(&cgdev->dev);
3122 return;
3127 * sysfs attributes
3129 static ssize_t
3130 claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
3132 struct claw_privbk *priv;
3133 struct claw_env * p_env;
3135 priv = dev_get_drvdata(dev);
3136 if (!priv)
3137 return -ENODEV;
3138 p_env = priv->p_env;
3139 return sprintf(buf, "%s\n",p_env->host_name);
3142 static ssize_t
3143 claw_hname_write(struct device *dev, struct device_attribute *attr,
3144 const char *buf, size_t count)
3146 struct claw_privbk *priv;
3147 struct claw_env * p_env;
3149 priv = dev_get_drvdata(dev);
3150 if (!priv)
3151 return -ENODEV;
3152 p_env = priv->p_env;
3153 if (count > MAX_NAME_LEN+1)
3154 return -EINVAL;
3155 memset(p_env->host_name, 0x20, MAX_NAME_LEN);
3156 strncpy(p_env->host_name,buf, count);
3157 p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */
3158 p_env->host_name[MAX_NAME_LEN] = 0x00;
3159 CLAW_DBF_TEXT(2, setup, "HstnSet");
3160 CLAW_DBF_TEXT_(2, setup, "%s", p_env->host_name);
3162 return count;
3165 static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write);
3167 static ssize_t
3168 claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
3170 struct claw_privbk *priv;
3171 struct claw_env * p_env;
3173 priv = dev_get_drvdata(dev);
3174 if (!priv)
3175 return -ENODEV;
3176 p_env = priv->p_env;
3177 return sprintf(buf, "%s\n", p_env->adapter_name);
3180 static ssize_t
3181 claw_adname_write(struct device *dev, struct device_attribute *attr,
3182 const char *buf, size_t count)
3184 struct claw_privbk *priv;
3185 struct claw_env * p_env;
3187 priv = dev_get_drvdata(dev);
3188 if (!priv)
3189 return -ENODEV;
3190 p_env = priv->p_env;
3191 if (count > MAX_NAME_LEN+1)
3192 return -EINVAL;
3193 memset(p_env->adapter_name, 0x20, MAX_NAME_LEN);
3194 strncpy(p_env->adapter_name,buf, count);
3195 p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */
3196 p_env->adapter_name[MAX_NAME_LEN] = 0x00;
3197 CLAW_DBF_TEXT(2, setup, "AdnSet");
3198 CLAW_DBF_TEXT_(2, setup, "%s", p_env->adapter_name);
3200 return count;
3203 static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write);
3205 static ssize_t
3206 claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
3208 struct claw_privbk *priv;
3209 struct claw_env * p_env;
3211 priv = dev_get_drvdata(dev);
3212 if (!priv)
3213 return -ENODEV;
3214 p_env = priv->p_env;
3215 return sprintf(buf, "%s\n",
3216 p_env->api_type);
3219 static ssize_t
3220 claw_apname_write(struct device *dev, struct device_attribute *attr,
3221 const char *buf, size_t count)
3223 struct claw_privbk *priv;
3224 struct claw_env * p_env;
3226 priv = dev_get_drvdata(dev);
3227 if (!priv)
3228 return -ENODEV;
3229 p_env = priv->p_env;
3230 if (count > MAX_NAME_LEN+1)
3231 return -EINVAL;
3232 memset(p_env->api_type, 0x20, MAX_NAME_LEN);
3233 strncpy(p_env->api_type,buf, count);
3234 p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */
3235 p_env->api_type[MAX_NAME_LEN] = 0x00;
3236 if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
3237 p_env->read_size=DEF_PACK_BUFSIZE;
3238 p_env->write_size=DEF_PACK_BUFSIZE;
3239 p_env->packing=PACKING_ASK;
3240 CLAW_DBF_TEXT(2, setup, "PACKING");
3242 else {
3243 p_env->packing=0;
3244 p_env->read_size=CLAW_FRAME_SIZE;
3245 p_env->write_size=CLAW_FRAME_SIZE;
3246 CLAW_DBF_TEXT(2, setup, "ApiSet");
3248 CLAW_DBF_TEXT_(2, setup, "%s", p_env->api_type);
3249 return count;
3252 static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write);
3254 static ssize_t
3255 claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3257 struct claw_privbk *priv;
3258 struct claw_env * p_env;
3260 priv = dev_get_drvdata(dev);
3261 if (!priv)
3262 return -ENODEV;
3263 p_env = priv->p_env;
3264 return sprintf(buf, "%d\n", p_env->write_buffers);
3267 static ssize_t
3268 claw_wbuff_write(struct device *dev, struct device_attribute *attr,
3269 const char *buf, size_t count)
3271 struct claw_privbk *priv;
3272 struct claw_env * p_env;
3273 int nnn,max;
3275 priv = dev_get_drvdata(dev);
3276 if (!priv)
3277 return -ENODEV;
3278 p_env = priv->p_env;
3279 sscanf(buf, "%i", &nnn);
3280 if (p_env->packing) {
3281 max = 64;
3283 else {
3284 max = 512;
3286 if ((nnn > max ) || (nnn < 2))
3287 return -EINVAL;
3288 p_env->write_buffers = nnn;
3289 CLAW_DBF_TEXT(2, setup, "Wbufset");
3290 CLAW_DBF_TEXT_(2, setup, "WB=%d", p_env->write_buffers);
3291 return count;
3294 static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write);
3296 static ssize_t
3297 claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3299 struct claw_privbk *priv;
3300 struct claw_env * p_env;
3302 priv = dev_get_drvdata(dev);
3303 if (!priv)
3304 return -ENODEV;
3305 p_env = priv->p_env;
3306 return sprintf(buf, "%d\n", p_env->read_buffers);
3309 static ssize_t
3310 claw_rbuff_write(struct device *dev, struct device_attribute *attr,
3311 const char *buf, size_t count)
3313 struct claw_privbk *priv;
3314 struct claw_env *p_env;
3315 int nnn,max;
3317 priv = dev_get_drvdata(dev);
3318 if (!priv)
3319 return -ENODEV;
3320 p_env = priv->p_env;
3321 sscanf(buf, "%i", &nnn);
3322 if (p_env->packing) {
3323 max = 64;
3325 else {
3326 max = 512;
3328 if ((nnn > max ) || (nnn < 2))
3329 return -EINVAL;
3330 p_env->read_buffers = nnn;
3331 CLAW_DBF_TEXT(2, setup, "Rbufset");
3332 CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers);
3333 return count;
3336 static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
3338 static struct attribute *claw_attr[] = {
3339 &dev_attr_read_buffer.attr,
3340 &dev_attr_write_buffer.attr,
3341 &dev_attr_adapter_name.attr,
3342 &dev_attr_api_type.attr,
3343 &dev_attr_host_name.attr,
3344 NULL,
3347 static struct attribute_group claw_attr_group = {
3348 .attrs = claw_attr,
3351 static int
3352 claw_add_files(struct device *dev)
3354 CLAW_DBF_TEXT(2, setup, "add_file");
3355 return sysfs_create_group(&dev->kobj, &claw_attr_group);
3358 static void
3359 claw_remove_files(struct device *dev)
3361 CLAW_DBF_TEXT(2, setup, "rem_file");
3362 sysfs_remove_group(&dev->kobj, &claw_attr_group);
3365 /*--------------------------------------------------------------------*
3366 * claw_init and cleanup *
3367 *---------------------------------------------------------------------*/
3369 static void __exit
3370 claw_cleanup(void)
3372 driver_remove_file(&claw_group_driver.driver,
3373 &driver_attr_group);
3374 ccwgroup_driver_unregister(&claw_group_driver);
3375 ccw_driver_unregister(&claw_ccw_driver);
3376 root_device_unregister(claw_root_dev);
3377 claw_unregister_debug_facility();
3378 pr_info("Driver unloaded\n");
3383 * Initialize module.
3384 * This is called just after the module is loaded.
3386 * @return 0 on success, !0 on error.
3388 static int __init
3389 claw_init(void)
3391 int ret = 0;
3393 pr_info("Loading %s\n", version);
3394 ret = claw_register_debug_facility();
3395 if (ret) {
3396 pr_err("Registering with the S/390 debug feature"
3397 " failed with error code %d\n", ret);
3398 goto out_err;
3400 CLAW_DBF_TEXT(2, setup, "init_mod");
3401 claw_root_dev = root_device_register("claw");
3402 ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
3403 if (ret)
3404 goto register_err;
3405 ret = ccw_driver_register(&claw_ccw_driver);
3406 if (ret)
3407 goto ccw_err;
3408 claw_group_driver.driver.groups = claw_group_attr_groups;
3409 ret = ccwgroup_driver_register(&claw_group_driver);
3410 if (ret)
3411 goto ccwgroup_err;
3412 return 0;
3414 ccwgroup_err:
3415 ccw_driver_unregister(&claw_ccw_driver);
3416 ccw_err:
3417 root_device_unregister(claw_root_dev);
3418 register_err:
3419 CLAW_DBF_TEXT(2, setup, "init_bad");
3420 claw_unregister_debug_facility();
3421 out_err:
3422 pr_err("Initializing the claw device driver failed\n");
3423 return ret;
3426 module_init(claw_init);
3427 module_exit(claw_cleanup);
3429 MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
3430 MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \
3431 "Copyright 2000,2008 IBM Corporation\n");
3432 MODULE_LICENSE("GPL");