added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / drivers / s390 / net / claw.c
blobf5e618562c5ff9b1be02d014a022ca13e98e10ef
1 /*
2 * drivers/s390/net/claw.c
3 * ESCON CLAW network driver
5 * Linux for zSeries version
6 * Copyright (C) 2002,2005 IBM Corporation
7 * Author(s) Original code written by:
8 * Kazuo Iimura (iimura@jp.ibm.com)
9 * Rewritten by
10 * Andy Richter (richtera@us.ibm.com)
11 * Marc Price (mwprice@us.ibm.com)
13 * sysfs parms:
14 * group x.x.rrrr,x.x.wwww
15 * read_buffer nnnnnnn
16 * write_buffer nnnnnn
17 * host_name aaaaaaaa
18 * adapter_name aaaaaaaa
19 * api_type aaaaaaaa
21 * eg.
22 * group 0.0.0200 0.0.0201
23 * read_buffer 25
24 * write_buffer 20
25 * host_name LINUX390
26 * adapter_name RS6K
27 * api_type TCPIP
29 * where
31 * The device id is decided by the order entries
32 * are added to the group the first is claw0 the second claw1
33 * up to CLAW_MAX_DEV
35 * rrrr - the first of 2 consecutive device addresses used for the
36 * CLAW protocol.
37 * The specified address is always used as the input (Read)
38 * channel and the next address is used as the output channel.
40 * wwww - the second of 2 consecutive device addresses used for
41 * the CLAW protocol.
42 * The specified address is always used as the output
43 * channel and the previous address is used as the input channel.
45 * read_buffer - specifies number of input buffers to allocate.
46 * write_buffer - specifies number of output buffers to allocate.
47 * host_name - host name
48 * adaptor_name - adaptor name
49 * api_type - API type TCPIP or API will be sent and expected
50 * as ws_name
52 * Note the following requirements:
53 * 1) host_name must match the configured adapter_name on the remote side
54 * 2) adaptor_name must match the configured host name on the remote side
56 * Change History
57 * 1.00 Initial release shipped
58 * 1.10 Changes for Buffer allocation
59 * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower
60 * 1.25 Added Packing support
61 * 1.5
63 #include <asm/ccwdev.h>
64 #include <asm/ccwgroup.h>
65 #include <asm/debug.h>
66 #include <asm/idals.h>
67 #include <asm/io.h>
68 #include <linux/bitops.h>
69 #include <linux/ctype.h>
70 #include <linux/delay.h>
71 #include <linux/errno.h>
72 #include <linux/if_arp.h>
73 #include <linux/init.h>
74 #include <linux/interrupt.h>
75 #include <linux/ip.h>
76 #include <linux/kernel.h>
77 #include <linux/module.h>
78 #include <linux/netdevice.h>
79 #include <linux/etherdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/sched.h>
82 #include <linux/signal.h>
83 #include <linux/skbuff.h>
84 #include <linux/slab.h>
85 #include <linux/string.h>
86 #include <linux/tcp.h>
87 #include <linux/timer.h>
88 #include <linux/types.h>
90 #include "cu3088.h"
91 #include "claw.h"
94 CLAW uses the s390dbf file system see claw_trace and claw_setup
98 static char debug_buffer[255];
99 /**
100 * Debug Facility Stuff
102 static debug_info_t *claw_dbf_setup;
103 static debug_info_t *claw_dbf_trace;
106 * CLAW Debug Facility functions
108 static void
109 claw_unregister_debug_facility(void)
111 if (claw_dbf_setup)
112 debug_unregister(claw_dbf_setup);
113 if (claw_dbf_trace)
114 debug_unregister(claw_dbf_trace);
117 static int
118 claw_register_debug_facility(void)
120 claw_dbf_setup = debug_register("claw_setup", 2, 1, 8);
121 claw_dbf_trace = debug_register("claw_trace", 2, 2, 8);
122 if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) {
123 claw_unregister_debug_facility();
124 return -ENOMEM;
126 debug_register_view(claw_dbf_setup, &debug_hex_ascii_view);
127 debug_set_level(claw_dbf_setup, 2);
128 debug_register_view(claw_dbf_trace, &debug_hex_ascii_view);
129 debug_set_level(claw_dbf_trace, 2);
130 return 0;
133 static inline void
134 claw_set_busy(struct net_device *dev)
136 ((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
137 eieio();
140 static inline void
141 claw_clear_busy(struct net_device *dev)
143 clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
144 netif_wake_queue(dev);
145 eieio();
148 static inline int
149 claw_check_busy(struct net_device *dev)
151 eieio();
152 return ((struct claw_privbk *) dev->ml_priv)->tbusy;
155 static inline void
156 claw_setbit_busy(int nr,struct net_device *dev)
158 netif_stop_queue(dev);
159 set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
162 static inline void
163 claw_clearbit_busy(int nr,struct net_device *dev)
165 clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
166 netif_wake_queue(dev);
169 static inline int
170 claw_test_and_setbit_busy(int nr,struct net_device *dev)
172 netif_stop_queue(dev);
173 return test_and_set_bit(nr,
174 (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy));
178 /* Functions for the DEV methods */
180 static int claw_probe(struct ccwgroup_device *cgdev);
181 static void claw_remove_device(struct ccwgroup_device *cgdev);
182 static void claw_purge_skb_queue(struct sk_buff_head *q);
183 static int claw_new_device(struct ccwgroup_device *cgdev);
184 static int claw_shutdown_device(struct ccwgroup_device *cgdev);
185 static int claw_tx(struct sk_buff *skb, struct net_device *dev);
186 static int claw_change_mtu( struct net_device *dev, int new_mtu);
187 static int claw_open(struct net_device *dev);
188 static void claw_irq_handler(struct ccw_device *cdev,
189 unsigned long intparm, struct irb *irb);
190 static void claw_irq_tasklet ( unsigned long data );
191 static int claw_release(struct net_device *dev);
192 static void claw_write_retry ( struct chbk * p_ch );
193 static void claw_write_next ( struct chbk * p_ch );
194 static void claw_timer ( struct chbk * p_ch );
196 /* Functions */
197 static int add_claw_reads(struct net_device *dev,
198 struct ccwbk* p_first, struct ccwbk* p_last);
199 static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
200 static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
201 static int find_link(struct net_device *dev, char *host_name, char *ws_name );
202 static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
203 static int init_ccw_bk(struct net_device *dev);
204 static void probe_error( struct ccwgroup_device *cgdev);
205 static struct net_device_stats *claw_stats(struct net_device *dev);
206 static int pages_to_order_of_mag(int num_of_pages);
207 static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
208 /* sysfs Functions */
209 static ssize_t claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf);
210 static ssize_t claw_hname_write(struct device *dev, struct device_attribute *attr,
211 const char *buf, size_t count);
212 static ssize_t claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf);
213 static ssize_t claw_adname_write(struct device *dev, struct device_attribute *attr,
214 const char *buf, size_t count);
215 static ssize_t claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf);
216 static ssize_t claw_apname_write(struct device *dev, struct device_attribute *attr,
217 const char *buf, size_t count);
218 static ssize_t claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf);
219 static ssize_t claw_wbuff_write(struct device *dev, struct device_attribute *attr,
220 const char *buf, size_t count);
221 static ssize_t claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf);
222 static ssize_t claw_rbuff_write(struct device *dev, struct device_attribute *attr,
223 const char *buf, size_t count);
224 static int claw_add_files(struct device *dev);
225 static void claw_remove_files(struct device *dev);
227 /* Functions for System Validate */
228 static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
229 static int claw_send_control(struct net_device *dev, __u8 type, __u8 link,
230 __u8 correlator, __u8 rc , char *local_name, char *remote_name);
231 static int claw_snd_conn_req(struct net_device *dev, __u8 link);
232 static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl);
233 static int claw_snd_sys_validate_rsp(struct net_device *dev,
234 struct clawctl * p_ctl, __u32 return_code);
235 static int claw_strt_conn_req(struct net_device *dev );
236 static void claw_strt_read(struct net_device *dev, int lock);
237 static void claw_strt_out_IO(struct net_device *dev);
238 static void claw_free_wrt_buf(struct net_device *dev);
240 /* Functions for unpack reads */
241 static void unpack_read(struct net_device *dev);
243 /* ccwgroup table */
245 static struct ccwgroup_driver claw_group_driver = {
246 .owner = THIS_MODULE,
247 .name = "claw",
248 .max_slaves = 2,
249 .driver_id = 0xC3D3C1E6,
250 .probe = claw_probe,
251 .remove = claw_remove_device,
252 .set_online = claw_new_device,
253 .set_offline = claw_shutdown_device,
257 * Key functions
260 /*----------------------------------------------------------------*
261 * claw_probe *
262 * this function is called for each CLAW device. *
263 *----------------------------------------------------------------*/
264 static int
265 claw_probe(struct ccwgroup_device *cgdev)
267 int rc;
268 struct claw_privbk *privptr=NULL;
270 CLAW_DBF_TEXT(2, setup, "probe");
271 if (!get_device(&cgdev->dev))
272 return -ENODEV;
273 privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
274 cgdev->dev.driver_data = privptr;
275 if (privptr == NULL) {
276 probe_error(cgdev);
277 put_device(&cgdev->dev);
278 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
279 return -ENOMEM;
281 privptr->p_mtc_envelope= kzalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
282 privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
283 if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) {
284 probe_error(cgdev);
285 put_device(&cgdev->dev);
286 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
287 return -ENOMEM;
289 memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8);
290 memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8);
291 memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8);
292 privptr->p_env->packing = 0;
293 privptr->p_env->write_buffers = 5;
294 privptr->p_env->read_buffers = 5;
295 privptr->p_env->read_size = CLAW_FRAME_SIZE;
296 privptr->p_env->write_size = CLAW_FRAME_SIZE;
297 rc = claw_add_files(&cgdev->dev);
298 if (rc) {
299 probe_error(cgdev);
300 put_device(&cgdev->dev);
301 printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n",
302 dev_name(&cgdev->cdev[0]->dev), __func__, __LINE__);
303 CLAW_DBF_TEXT_(2, setup, "probex%d", rc);
304 return rc;
306 privptr->p_env->p_priv = privptr;
307 cgdev->cdev[0]->handler = claw_irq_handler;
308 cgdev->cdev[1]->handler = claw_irq_handler;
309 CLAW_DBF_TEXT(2, setup, "prbext 0");
311 return 0;
312 } /* end of claw_probe */
314 /*-------------------------------------------------------------------*
315 * claw_tx *
316 *-------------------------------------------------------------------*/
318 static int
319 claw_tx(struct sk_buff *skb, struct net_device *dev)
321 int rc;
322 struct claw_privbk *privptr = dev->ml_priv;
323 unsigned long saveflags;
324 struct chbk *p_ch;
326 CLAW_DBF_TEXT(4, trace, "claw_tx");
327 p_ch=&privptr->channel[WRITE];
328 if (skb == NULL) {
329 privptr->stats.tx_dropped++;
330 privptr->stats.tx_errors++;
331 CLAW_DBF_TEXT_(2, trace, "clawtx%d", -EIO);
332 return -EIO;
334 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
335 rc=claw_hw_tx( skb, dev, 1 );
336 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
337 CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc);
338 return rc;
339 } /* end of claw_tx */
341 /*------------------------------------------------------------------*
342 * pack the collect queue into an skb and return it *
343 * If not packing just return the top skb from the queue *
344 *------------------------------------------------------------------*/
346 static struct sk_buff *
347 claw_pack_skb(struct claw_privbk *privptr)
349 struct sk_buff *new_skb,*held_skb;
350 struct chbk *p_ch = &privptr->channel[WRITE];
351 struct claw_env *p_env = privptr->p_env;
352 int pkt_cnt,pk_ind,so_far;
354 new_skb = NULL; /* assume no dice */
355 pkt_cnt = 0;
356 CLAW_DBF_TEXT(4, trace, "PackSKBe");
357 if (!skb_queue_empty(&p_ch->collect_queue)) {
358 /* some data */
359 held_skb = skb_dequeue(&p_ch->collect_queue);
360 if (held_skb)
361 dev_kfree_skb_any(held_skb);
362 else
363 return NULL;
364 if (p_env->packing != DO_PACKED)
365 return held_skb;
366 /* get a new SKB we will pack at least one */
367 new_skb = dev_alloc_skb(p_env->write_size);
368 if (new_skb == NULL) {
369 atomic_inc(&held_skb->users);
370 skb_queue_head(&p_ch->collect_queue,held_skb);
371 return NULL;
373 /* we have packed packet and a place to put it */
374 pk_ind = 1;
375 so_far = 0;
376 new_skb->cb[1] = 'P'; /* every skb on queue has pack header */
377 while ((pk_ind) && (held_skb != NULL)) {
378 if (held_skb->len+so_far <= p_env->write_size-8) {
379 memcpy(skb_put(new_skb,held_skb->len),
380 held_skb->data,held_skb->len);
381 privptr->stats.tx_packets++;
382 so_far += held_skb->len;
383 pkt_cnt++;
384 dev_kfree_skb_any(held_skb);
385 held_skb = skb_dequeue(&p_ch->collect_queue);
386 if (held_skb)
387 atomic_dec(&held_skb->users);
388 } else {
389 pk_ind = 0;
390 atomic_inc(&held_skb->users);
391 skb_queue_head(&p_ch->collect_queue,held_skb);
395 CLAW_DBF_TEXT(4, trace, "PackSKBx");
396 return new_skb;
399 /*-------------------------------------------------------------------*
400 * claw_change_mtu *
402 *-------------------------------------------------------------------*/
404 static int
405 claw_change_mtu(struct net_device *dev, int new_mtu)
407 struct claw_privbk *privptr = dev->ml_priv;
408 int buff_size;
409 CLAW_DBF_TEXT(4, trace, "setmtu");
410 buff_size = privptr->p_env->write_size;
411 if ((new_mtu < 60) || (new_mtu > buff_size)) {
412 return -EINVAL;
414 dev->mtu = new_mtu;
415 return 0;
416 } /* end of claw_change_mtu */
419 /*-------------------------------------------------------------------*
420 * claw_open *
422 *-------------------------------------------------------------------*/
423 static int
424 claw_open(struct net_device *dev)
427 int rc;
428 int i;
429 unsigned long saveflags=0;
430 unsigned long parm;
431 struct claw_privbk *privptr;
432 DECLARE_WAITQUEUE(wait, current);
433 struct timer_list timer;
434 struct ccwbk *p_buf;
436 CLAW_DBF_TEXT(4, trace, "open");
437 privptr = (struct claw_privbk *)dev->ml_priv;
438 /* allocate and initialize CCW blocks */
439 if (privptr->buffs_alloc == 0) {
440 rc=init_ccw_bk(dev);
441 if (rc) {
442 CLAW_DBF_TEXT(2, trace, "openmem");
443 return -ENOMEM;
446 privptr->system_validate_comp=0;
447 privptr->release_pend=0;
448 if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
449 privptr->p_env->read_size=DEF_PACK_BUFSIZE;
450 privptr->p_env->write_size=DEF_PACK_BUFSIZE;
451 privptr->p_env->packing=PACKING_ASK;
452 } else {
453 privptr->p_env->packing=0;
454 privptr->p_env->read_size=CLAW_FRAME_SIZE;
455 privptr->p_env->write_size=CLAW_FRAME_SIZE;
457 claw_set_busy(dev);
458 tasklet_init(&privptr->channel[READ].tasklet, claw_irq_tasklet,
459 (unsigned long) &privptr->channel[READ]);
460 for ( i = 0; i < 2; i++) {
461 CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
462 init_waitqueue_head(&privptr->channel[i].wait);
463 /* skb_queue_head_init(&p_ch->io_queue); */
464 if (i == WRITE)
465 skb_queue_head_init(
466 &privptr->channel[WRITE].collect_queue);
467 privptr->channel[i].flag_a = 0;
468 privptr->channel[i].IO_active = 0;
469 privptr->channel[i].flag &= ~CLAW_TIMER;
470 init_timer(&timer);
471 timer.function = (void *)claw_timer;
472 timer.data = (unsigned long)(&privptr->channel[i]);
473 timer.expires = jiffies + 15*HZ;
474 add_timer(&timer);
475 spin_lock_irqsave(get_ccwdev_lock(
476 privptr->channel[i].cdev), saveflags);
477 parm = (unsigned long) &privptr->channel[i];
478 privptr->channel[i].claw_state = CLAW_START_HALT_IO;
479 rc = 0;
480 add_wait_queue(&privptr->channel[i].wait, &wait);
481 rc = ccw_device_halt(
482 (struct ccw_device *)privptr->channel[i].cdev,parm);
483 set_current_state(TASK_INTERRUPTIBLE);
484 spin_unlock_irqrestore(
485 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
486 schedule();
487 set_current_state(TASK_RUNNING);
488 remove_wait_queue(&privptr->channel[i].wait, &wait);
489 if(rc != 0)
490 ccw_check_return_code(privptr->channel[i].cdev, rc);
491 if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
492 del_timer(&timer);
494 if ((((privptr->channel[READ].last_dstat |
495 privptr->channel[WRITE].last_dstat) &
496 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
497 (((privptr->channel[READ].flag |
498 privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) {
499 printk(KERN_INFO "%s: remote side is not ready\n", dev->name);
500 CLAW_DBF_TEXT(2, trace, "notrdy");
502 for ( i = 0; i < 2; i++) {
503 spin_lock_irqsave(
504 get_ccwdev_lock(privptr->channel[i].cdev),
505 saveflags);
506 parm = (unsigned long) &privptr->channel[i];
507 privptr->channel[i].claw_state = CLAW_STOP;
508 rc = ccw_device_halt(
509 (struct ccw_device *)&privptr->channel[i].cdev,
510 parm);
511 spin_unlock_irqrestore(
512 get_ccwdev_lock(privptr->channel[i].cdev),
513 saveflags);
514 if (rc != 0) {
515 ccw_check_return_code(
516 privptr->channel[i].cdev, rc);
519 free_pages((unsigned long)privptr->p_buff_ccw,
520 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
521 if (privptr->p_env->read_size < PAGE_SIZE) {
522 free_pages((unsigned long)privptr->p_buff_read,
523 (int)pages_to_order_of_mag(
524 privptr->p_buff_read_num));
526 else {
527 p_buf=privptr->p_read_active_first;
528 while (p_buf!=NULL) {
529 free_pages((unsigned long)p_buf->p_buffer,
530 (int)pages_to_order_of_mag(
531 privptr->p_buff_pages_perread ));
532 p_buf=p_buf->next;
535 if (privptr->p_env->write_size < PAGE_SIZE ) {
536 free_pages((unsigned long)privptr->p_buff_write,
537 (int)pages_to_order_of_mag(
538 privptr->p_buff_write_num));
540 else {
541 p_buf=privptr->p_write_active_first;
542 while (p_buf!=NULL) {
543 free_pages((unsigned long)p_buf->p_buffer,
544 (int)pages_to_order_of_mag(
545 privptr->p_buff_pages_perwrite ));
546 p_buf=p_buf->next;
549 privptr->buffs_alloc = 0;
550 privptr->channel[READ].flag= 0x00;
551 privptr->channel[WRITE].flag = 0x00;
552 privptr->p_buff_ccw=NULL;
553 privptr->p_buff_read=NULL;
554 privptr->p_buff_write=NULL;
555 claw_clear_busy(dev);
556 CLAW_DBF_TEXT(2, trace, "open EIO");
557 return -EIO;
560 /* Send SystemValidate command */
562 claw_clear_busy(dev);
563 CLAW_DBF_TEXT(4, trace, "openok");
564 return 0;
565 } /* end of claw_open */
567 /*-------------------------------------------------------------------*
569 * claw_irq_handler *
571 *--------------------------------------------------------------------*/
572 static void
573 claw_irq_handler(struct ccw_device *cdev,
574 unsigned long intparm, struct irb *irb)
576 struct chbk *p_ch = NULL;
577 struct claw_privbk *privptr = NULL;
578 struct net_device *dev = NULL;
579 struct claw_env *p_env;
580 struct chbk *p_ch_r=NULL;
582 CLAW_DBF_TEXT(4, trace, "clawirq");
583 /* Bypass all 'unsolicited interrupts' */
584 if (!cdev->dev.driver_data) {
585 printk(KERN_WARNING "claw: unsolicited interrupt for device:"
586 "%s received c-%02x d-%02x\n",
587 dev_name(&cdev->dev), irb->scsw.cmd.cstat,
588 irb->scsw.cmd.dstat);
589 CLAW_DBF_TEXT(2, trace, "badirq");
590 return;
592 privptr = (struct claw_privbk *)cdev->dev.driver_data;
594 /* Try to extract channel from driver data. */
595 if (privptr->channel[READ].cdev == cdev)
596 p_ch = &privptr->channel[READ];
597 else if (privptr->channel[WRITE].cdev == cdev)
598 p_ch = &privptr->channel[WRITE];
599 else {
600 printk(KERN_WARNING "claw: Can't determine channel for "
601 "interrupt, device %s\n", dev_name(&cdev->dev));
602 CLAW_DBF_TEXT(2, trace, "badchan");
603 return;
605 CLAW_DBF_TEXT_(4, trace, "IRQCH=%d", p_ch->flag);
607 dev = (struct net_device *) (p_ch->ndev);
608 p_env=privptr->p_env;
610 /* Copy interruption response block. */
611 memcpy(p_ch->irb, irb, sizeof(struct irb));
613 /* Check for good subchannel return code, otherwise info message */
614 if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
615 printk(KERN_INFO "%s: subchannel check for device: %04x -"
616 " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
617 dev->name, p_ch->devno,
618 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
619 irb->scsw.cmd.cpa);
620 CLAW_DBF_TEXT(2, trace, "chanchk");
621 /* return; */
624 /* Check the reason-code of a unit check */
625 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
626 ccw_check_unit_check(p_ch, irb->ecw[0]);
628 /* State machine to bring the connection up, down and to restart */
629 p_ch->last_dstat = irb->scsw.cmd.dstat;
631 switch (p_ch->claw_state) {
632 case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
633 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
634 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
635 (p_ch->irb->scsw.cmd.stctl ==
636 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))))
637 return;
638 wake_up(&p_ch->wait); /* wake up claw_release */
639 CLAW_DBF_TEXT(4, trace, "stop");
640 return;
641 case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */
642 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
643 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
644 (p_ch->irb->scsw.cmd.stctl ==
645 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
646 CLAW_DBF_TEXT(4, trace, "haltio");
647 return;
649 if (p_ch->flag == CLAW_READ) {
650 p_ch->claw_state = CLAW_START_READ;
651 wake_up(&p_ch->wait); /* wake claw_open (READ)*/
652 } else if (p_ch->flag == CLAW_WRITE) {
653 p_ch->claw_state = CLAW_START_WRITE;
654 /* send SYSTEM_VALIDATE */
655 claw_strt_read(dev, LOCK_NO);
656 claw_send_control(dev,
657 SYSTEM_VALIDATE_REQUEST,
658 0, 0, 0,
659 p_env->host_name,
660 p_env->adapter_name);
661 } else {
662 printk(KERN_WARNING "claw: unsolicited "
663 "interrupt for device:"
664 "%s received c-%02x d-%02x\n",
665 dev_name(&cdev->dev),
666 irb->scsw.cmd.cstat,
667 irb->scsw.cmd.dstat);
668 return;
670 CLAW_DBF_TEXT(4, trace, "haltio");
671 return;
672 case CLAW_START_READ:
673 CLAW_DBF_TEXT(4, trace, "ReadIRQ");
674 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
675 clear_bit(0, (void *)&p_ch->IO_active);
676 if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
677 (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
678 (p_ch->irb->ecw[0]) == 0) {
679 privptr->stats.rx_errors++;
680 printk(KERN_INFO "%s: Restart is "
681 "required after remote "
682 "side recovers \n",
683 dev->name);
685 CLAW_DBF_TEXT(4, trace, "notrdy");
686 return;
688 if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) &&
689 (p_ch->irb->scsw.cmd.dstat == 0)) {
690 if (test_and_set_bit(CLAW_BH_ACTIVE,
691 (void *)&p_ch->flag_a) == 0)
692 tasklet_schedule(&p_ch->tasklet);
693 else
694 CLAW_DBF_TEXT(4, trace, "PCINoBH");
695 CLAW_DBF_TEXT(4, trace, "PCI_read");
696 return;
698 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
699 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
700 (p_ch->irb->scsw.cmd.stctl ==
701 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
702 CLAW_DBF_TEXT(4, trace, "SPend_rd");
703 return;
705 clear_bit(0, (void *)&p_ch->IO_active);
706 claw_clearbit_busy(TB_RETRY, dev);
707 if (test_and_set_bit(CLAW_BH_ACTIVE,
708 (void *)&p_ch->flag_a) == 0)
709 tasklet_schedule(&p_ch->tasklet);
710 else
711 CLAW_DBF_TEXT(4, trace, "RdBHAct");
712 CLAW_DBF_TEXT(4, trace, "RdIRQXit");
713 return;
714 case CLAW_START_WRITE:
715 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
716 printk(KERN_INFO "%s: Unit Check Occured in "
717 "write channel\n", dev->name);
718 clear_bit(0, (void *)&p_ch->IO_active);
719 if (p_ch->irb->ecw[0] & 0x80) {
720 printk(KERN_INFO "%s: Resetting Event "
721 "occurred:\n", dev->name);
722 init_timer(&p_ch->timer);
723 p_ch->timer.function =
724 (void *)claw_write_retry;
725 p_ch->timer.data = (unsigned long)p_ch;
726 p_ch->timer.expires = jiffies + 10*HZ;
727 add_timer(&p_ch->timer);
728 printk(KERN_INFO "%s: write connection "
729 "restarting\n", dev->name);
731 CLAW_DBF_TEXT(4, trace, "rstrtwrt");
732 return;
734 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
735 clear_bit(0, (void *)&p_ch->IO_active);
736 printk(KERN_INFO "%s: Unit Exception "
737 "Occured in write channel\n",
738 dev->name);
740 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
741 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
742 (p_ch->irb->scsw.cmd.stctl ==
743 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
744 CLAW_DBF_TEXT(4, trace, "writeUE");
745 return;
747 clear_bit(0, (void *)&p_ch->IO_active);
748 if (claw_test_and_setbit_busy(TB_TX, dev) == 0) {
749 claw_write_next(p_ch);
750 claw_clearbit_busy(TB_TX, dev);
751 claw_clear_busy(dev);
753 p_ch_r = (struct chbk *)&privptr->channel[READ];
754 if (test_and_set_bit(CLAW_BH_ACTIVE,
755 (void *)&p_ch_r->flag_a) == 0)
756 tasklet_schedule(&p_ch_r->tasklet);
757 CLAW_DBF_TEXT(4, trace, "StWtExit");
758 return;
759 default:
760 printk(KERN_WARNING "%s: wrong selection code - irq "
761 "state=%d\n", dev->name, p_ch->claw_state);
762 CLAW_DBF_TEXT(2, trace, "badIRQ");
763 return;
766 } /* end of claw_irq_handler */
769 /*-------------------------------------------------------------------*
770 * claw_irq_tasklet *
772 *--------------------------------------------------------------------*/
773 static void
774 claw_irq_tasklet ( unsigned long data )
776 struct chbk * p_ch;
777 struct net_device *dev;
778 struct claw_privbk * privptr;
780 p_ch = (struct chbk *) data;
781 dev = (struct net_device *)p_ch->ndev;
782 CLAW_DBF_TEXT(4, trace, "IRQtask");
783 privptr = (struct claw_privbk *)dev->ml_priv;
784 unpack_read(dev);
785 clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
786 CLAW_DBF_TEXT(4, trace, "TskletXt");
787 return;
788 } /* end of claw_irq_bh */
790 /*-------------------------------------------------------------------*
791 * claw_release *
793 *--------------------------------------------------------------------*/
794 static int
795 claw_release(struct net_device *dev)
797 int rc;
798 int i;
799 unsigned long saveflags;
800 unsigned long parm;
801 struct claw_privbk *privptr;
802 DECLARE_WAITQUEUE(wait, current);
803 struct ccwbk* p_this_ccw;
804 struct ccwbk* p_buf;
806 if (!dev)
807 return 0;
808 privptr = (struct claw_privbk *)dev->ml_priv;
809 if (!privptr)
810 return 0;
811 CLAW_DBF_TEXT(4, trace, "release");
812 privptr->release_pend=1;
813 claw_setbit_busy(TB_STOP,dev);
814 for ( i = 1; i >=0 ; i--) {
815 spin_lock_irqsave(
816 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
817 /* del_timer(&privptr->channel[READ].timer); */
818 privptr->channel[i].claw_state = CLAW_STOP;
819 privptr->channel[i].IO_active = 0;
820 parm = (unsigned long) &privptr->channel[i];
821 if (i == WRITE)
822 claw_purge_skb_queue(
823 &privptr->channel[WRITE].collect_queue);
824 rc = ccw_device_halt (privptr->channel[i].cdev, parm);
825 if (privptr->system_validate_comp==0x00) /* never opened? */
826 init_waitqueue_head(&privptr->channel[i].wait);
827 add_wait_queue(&privptr->channel[i].wait, &wait);
828 set_current_state(TASK_INTERRUPTIBLE);
829 spin_unlock_irqrestore(
830 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
831 schedule();
832 set_current_state(TASK_RUNNING);
833 remove_wait_queue(&privptr->channel[i].wait, &wait);
834 if (rc != 0) {
835 ccw_check_return_code(privptr->channel[i].cdev, rc);
838 if (privptr->pk_skb != NULL) {
839 dev_kfree_skb_any(privptr->pk_skb);
840 privptr->pk_skb = NULL;
842 if(privptr->buffs_alloc != 1) {
843 CLAW_DBF_TEXT(4, trace, "none2fre");
844 return 0;
846 CLAW_DBF_TEXT(4, trace, "freebufs");
847 if (privptr->p_buff_ccw != NULL) {
848 free_pages((unsigned long)privptr->p_buff_ccw,
849 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
851 CLAW_DBF_TEXT(4, trace, "freeread");
852 if (privptr->p_env->read_size < PAGE_SIZE) {
853 if (privptr->p_buff_read != NULL) {
854 free_pages((unsigned long)privptr->p_buff_read,
855 (int)pages_to_order_of_mag(privptr->p_buff_read_num));
858 else {
859 p_buf=privptr->p_read_active_first;
860 while (p_buf!=NULL) {
861 free_pages((unsigned long)p_buf->p_buffer,
862 (int)pages_to_order_of_mag(
863 privptr->p_buff_pages_perread ));
864 p_buf=p_buf->next;
867 CLAW_DBF_TEXT(4, trace, "freewrit");
868 if (privptr->p_env->write_size < PAGE_SIZE ) {
869 free_pages((unsigned long)privptr->p_buff_write,
870 (int)pages_to_order_of_mag(privptr->p_buff_write_num));
872 else {
873 p_buf=privptr->p_write_active_first;
874 while (p_buf!=NULL) {
875 free_pages((unsigned long)p_buf->p_buffer,
876 (int)pages_to_order_of_mag(
877 privptr->p_buff_pages_perwrite ));
878 p_buf=p_buf->next;
881 CLAW_DBF_TEXT(4, trace, "clearptr");
882 privptr->buffs_alloc = 0;
883 privptr->p_buff_ccw=NULL;
884 privptr->p_buff_read=NULL;
885 privptr->p_buff_write=NULL;
886 privptr->system_validate_comp=0;
887 privptr->release_pend=0;
888 /* Remove any writes that were pending and reset all reads */
889 p_this_ccw=privptr->p_read_active_first;
890 while (p_this_ccw!=NULL) {
891 p_this_ccw->header.length=0xffff;
892 p_this_ccw->header.opcode=0xff;
893 p_this_ccw->header.flag=0x00;
894 p_this_ccw=p_this_ccw->next;
897 while (privptr->p_write_active_first!=NULL) {
898 p_this_ccw=privptr->p_write_active_first;
899 p_this_ccw->header.flag=CLAW_PENDING;
900 privptr->p_write_active_first=p_this_ccw->next;
901 p_this_ccw->next=privptr->p_write_free_chain;
902 privptr->p_write_free_chain=p_this_ccw;
903 ++privptr->write_free_count;
905 privptr->p_write_active_last=NULL;
906 privptr->mtc_logical_link = -1;
907 privptr->mtc_skipping = 1;
908 privptr->mtc_offset=0;
910 if (((privptr->channel[READ].last_dstat |
911 privptr->channel[WRITE].last_dstat) &
912 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
913 printk(KERN_WARNING "%s: channel problems during close - "
914 "read: %02x - write: %02x\n",
915 dev->name,
916 privptr->channel[READ].last_dstat,
917 privptr->channel[WRITE].last_dstat);
918 CLAW_DBF_TEXT(2, trace, "badclose");
920 CLAW_DBF_TEXT(4, trace, "rlsexit");
921 return 0;
922 } /* end of claw_release */
924 /*-------------------------------------------------------------------*
925 * claw_write_retry *
927 *--------------------------------------------------------------------*/
929 static void
930 claw_write_retry ( struct chbk *p_ch )
933 struct net_device *dev=p_ch->ndev;
935 CLAW_DBF_TEXT(4, trace, "w_retry");
936 if (p_ch->claw_state == CLAW_STOP) {
937 return;
939 claw_strt_out_IO( dev );
940 CLAW_DBF_TEXT(4, trace, "rtry_xit");
941 return;
942 } /* end of claw_write_retry */
945 /*-------------------------------------------------------------------*
946 * claw_write_next *
948 *--------------------------------------------------------------------*/
950 static void
951 claw_write_next ( struct chbk * p_ch )
954 struct net_device *dev;
955 struct claw_privbk *privptr=NULL;
956 struct sk_buff *pk_skb;
957 int rc;
959 CLAW_DBF_TEXT(4, trace, "claw_wrt");
960 if (p_ch->claw_state == CLAW_STOP)
961 return;
962 dev = (struct net_device *) p_ch->ndev;
963 privptr = (struct claw_privbk *) dev->ml_priv;
964 claw_free_wrt_buf( dev );
965 if ((privptr->write_free_count > 0) &&
966 !skb_queue_empty(&p_ch->collect_queue)) {
967 pk_skb = claw_pack_skb(privptr);
968 while (pk_skb != NULL) {
969 rc = claw_hw_tx( pk_skb, dev,1);
970 if (privptr->write_free_count > 0) {
971 pk_skb = claw_pack_skb(privptr);
972 } else
973 pk_skb = NULL;
976 if (privptr->p_write_active_first!=NULL) {
977 claw_strt_out_IO(dev);
979 return;
980 } /* end of claw_write_next */
982 /*-------------------------------------------------------------------*
984 * claw_timer *
985 *--------------------------------------------------------------------*/
987 static void
988 claw_timer ( struct chbk * p_ch )
990 CLAW_DBF_TEXT(4, trace, "timer");
991 p_ch->flag |= CLAW_TIMER;
992 wake_up(&p_ch->wait);
993 return;
994 } /* end of claw_timer */
998 * functions
1002 /*-------------------------------------------------------------------*
1004 * pages_to_order_of_mag *
1006 * takes a number of pages from 1 to 512 and returns the *
1007 * log(num_pages)/log(2) get_free_pages() needs a base 2 order *
1008 * of magnitude get_free_pages() has an upper order of 9 *
1009 *--------------------------------------------------------------------*/
1011 static int
1012 pages_to_order_of_mag(int num_of_pages)
1014 int order_of_mag=1; /* assume 2 pages */
1015 int nump=2;
1017 CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages);
1018 if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */
1019 /* 512 pages = 2Meg on 4k page systems */
1020 if (num_of_pages >= 512) {return 9; }
1021 /* we have two or more pages order is at least 1 */
1022 for (nump=2 ;nump <= 512;nump*=2) {
1023 if (num_of_pages <= nump)
1024 break;
1025 order_of_mag +=1;
1027 if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */
1028 CLAW_DBF_TEXT_(5, trace, "mag%d", order_of_mag);
1029 return order_of_mag;
1032 /*-------------------------------------------------------------------*
1034 * add_claw_reads *
1036 *--------------------------------------------------------------------*/
1037 static int
1038 add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1039 struct ccwbk* p_last)
1041 struct claw_privbk *privptr;
1042 struct ccw1 temp_ccw;
1043 struct endccw * p_end;
1044 CLAW_DBF_TEXT(4, trace, "addreads");
1045 privptr = dev->ml_priv;
1046 p_end = privptr->p_end_ccw;
1048 /* first CCW and last CCW contains a new set of read channel programs
1049 * to apend the running channel programs
1051 if ( p_first==NULL) {
1052 CLAW_DBF_TEXT(4, trace, "addexit");
1053 return 0;
1056 /* set up ending CCW sequence for this segment */
1057 if (p_end->read1) {
1058 p_end->read1=0x00; /* second ending CCW is now active */
1059 /* reset ending CCWs and setup TIC CCWs */
1060 p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1061 p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1062 p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1);
1063 p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1);
1064 p_end->read2_nop2.cda=0;
1065 p_end->read2_nop2.count=1;
1067 else {
1068 p_end->read1=0x01; /* first ending CCW is now active */
1069 /* reset ending CCWs and setup TIC CCWs */
1070 p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1071 p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1072 p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1);
1073 p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1);
1074 p_end->read1_nop2.cda=0;
1075 p_end->read1_nop2.count=1;
1078 if ( privptr-> p_read_active_first ==NULL ) {
1079 privptr-> p_read_active_first= p_first; /* set new first */
1080 privptr-> p_read_active_last = p_last; /* set new last */
1082 else {
1084 /* set up TIC ccw */
1085 temp_ccw.cda= (__u32)__pa(&p_first->read);
1086 temp_ccw.count=0;
1087 temp_ccw.flags=0;
1088 temp_ccw.cmd_code = CCW_CLAW_CMD_TIC;
1091 if (p_end->read1) {
1093 /* first set of CCW's is chained to the new read */
1094 /* chain, so the second set is chained to the active chain. */
1095 /* Therefore modify the second set to point to the new */
1096 /* read chain set up TIC CCWs */
1097 /* make sure we update the CCW so channel doesn't fetch it */
1098 /* when it's only half done */
1099 memcpy( &p_end->read2_nop2, &temp_ccw ,
1100 sizeof(struct ccw1));
1101 privptr->p_read_active_last->r_TIC_1.cda=
1102 (__u32)__pa(&p_first->read);
1103 privptr->p_read_active_last->r_TIC_2.cda=
1104 (__u32)__pa(&p_first->read);
1106 else {
1107 /* make sure we update the CCW so channel doesn't */
1108 /* fetch it when it is only half done */
1109 memcpy( &p_end->read1_nop2, &temp_ccw ,
1110 sizeof(struct ccw1));
1111 privptr->p_read_active_last->r_TIC_1.cda=
1112 (__u32)__pa(&p_first->read);
1113 privptr->p_read_active_last->r_TIC_2.cda=
1114 (__u32)__pa(&p_first->read);
1116 /* chain in new set of blocks */
1117 privptr->p_read_active_last->next = p_first;
1118 privptr->p_read_active_last=p_last;
1119 } /* end of if ( privptr-> p_read_active_first ==NULL) */
1120 CLAW_DBF_TEXT(4, trace, "addexit");
1121 return 0;
1122 } /* end of add_claw_reads */
1124 /*-------------------------------------------------------------------*
1125 * ccw_check_return_code *
1127 *-------------------------------------------------------------------*/
1129 static void
1130 ccw_check_return_code(struct ccw_device *cdev, int return_code)
1132 CLAW_DBF_TEXT(4, trace, "ccwret");
1133 if (return_code != 0) {
1134 switch (return_code) {
1135 case -EBUSY: /* BUSY is a transient state no action needed */
1136 break;
1137 case -ENODEV:
1138 printk(KERN_EMERG "%s: Missing device called "
1139 "for IO ENODEV\n", dev_name(&cdev->dev));
1140 break;
1141 case -EIO:
1142 printk(KERN_EMERG "%s: Status pending... EIO \n",
1143 dev_name(&cdev->dev));
1144 break;
1145 case -EINVAL:
1146 printk(KERN_EMERG "%s: Invalid Dev State EINVAL \n",
1147 dev_name(&cdev->dev));
1148 break;
1149 default:
1150 printk(KERN_EMERG "%s: Unknown error in "
1151 "Do_IO %d\n", dev_name(&cdev->dev),
1152 return_code);
1155 CLAW_DBF_TEXT(4, trace, "ccwret");
1156 } /* end of ccw_check_return_code */
1158 /*-------------------------------------------------------------------*
1159 * ccw_check_unit_check *
1160 *--------------------------------------------------------------------*/
1162 static void
1163 ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1165 struct net_device *ndev = p_ch->ndev;
1167 CLAW_DBF_TEXT(4, trace, "unitchek");
1168 printk(KERN_INFO "%s: Unit Check with sense byte:0x%04x\n",
1169 ndev->name, sense);
1171 if (sense & 0x40) {
1172 if (sense & 0x01) {
1173 printk(KERN_WARNING "%s: Interface disconnect or "
1174 "Selective reset "
1175 "occurred (remote side)\n", ndev->name);
1177 else {
1178 printk(KERN_WARNING "%s: System reset occured"
1179 " (remote side)\n", ndev->name);
1182 else if (sense & 0x20) {
1183 if (sense & 0x04) {
1184 printk(KERN_WARNING "%s: Data-streaming "
1185 "timeout)\n", ndev->name);
1187 else {
1188 printk(KERN_WARNING "%s: Data-transfer parity"
1189 " error\n", ndev->name);
1192 else if (sense & 0x10) {
1193 if (sense & 0x20) {
1194 printk(KERN_WARNING "%s: Hardware malfunction "
1195 "(remote side)\n", ndev->name);
1197 else {
1198 printk(KERN_WARNING "%s: read-data parity error "
1199 "(remote side)\n", ndev->name);
1203 } /* end of ccw_check_unit_check */
1205 /*-------------------------------------------------------------------*
1206 * find_link *
1207 *--------------------------------------------------------------------*/
1208 static int
1209 find_link(struct net_device *dev, char *host_name, char *ws_name )
1211 struct claw_privbk *privptr;
1212 struct claw_env *p_env;
1213 int rc=0;
1215 CLAW_DBF_TEXT(2, setup, "findlink");
1216 privptr = dev->ml_priv;
1217 p_env=privptr->p_env;
1218 switch (p_env->packing)
1220 case PACKING_ASK:
1221 if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) ||
1222 (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 ))
1223 rc = EINVAL;
1224 break;
1225 case DO_PACKED:
1226 case PACK_SEND:
1227 if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) ||
1228 (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 ))
1229 rc = EINVAL;
1230 break;
1231 default:
1232 if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) ||
1233 (memcmp(p_env->api_type , ws_name, 8)!=0))
1234 rc = EINVAL;
1235 break;
1238 return 0;
1239 } /* end of find_link */
1241 /*-------------------------------------------------------------------*
1242 * claw_hw_tx *
1245 *-------------------------------------------------------------------*/
1247 static int
1248 claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1250 int rc=0;
1251 struct claw_privbk *privptr;
1252 struct ccwbk *p_this_ccw;
1253 struct ccwbk *p_first_ccw;
1254 struct ccwbk *p_last_ccw;
1255 __u32 numBuffers;
1256 signed long len_of_data;
1257 unsigned long bytesInThisBuffer;
1258 unsigned char *pDataAddress;
1259 struct endccw *pEnd;
1260 struct ccw1 tempCCW;
1261 struct chbk *p_ch;
1262 struct claw_env *p_env;
1263 int lock;
1264 struct clawph *pk_head;
1265 struct chbk *ch;
1267 CLAW_DBF_TEXT(4, trace, "hw_tx");
1268 privptr = (struct claw_privbk *)(dev->ml_priv);
1269 p_ch=(struct chbk *)&privptr->channel[WRITE];
1270 p_env =privptr->p_env;
1271 claw_free_wrt_buf(dev); /* Clean up free chain if posible */
1272 /* scan the write queue to free any completed write packets */
1273 p_first_ccw=NULL;
1274 p_last_ccw=NULL;
1275 if ((p_env->packing >= PACK_SEND) &&
1276 (skb->cb[1] != 'P')) {
1277 skb_push(skb,sizeof(struct clawph));
1278 pk_head=(struct clawph *)skb->data;
1279 pk_head->len=skb->len-sizeof(struct clawph);
1280 if (pk_head->len%4) {
1281 pk_head->len+= 4-(pk_head->len%4);
1282 skb_pad(skb,4-(pk_head->len%4));
1283 skb_put(skb,4-(pk_head->len%4));
1285 if (p_env->packing == DO_PACKED)
1286 pk_head->link_num = linkid;
1287 else
1288 pk_head->link_num = 0;
1289 pk_head->flag = 0x00;
1290 skb_pad(skb,4);
1291 skb->cb[1] = 'P';
1293 if (linkid == 0) {
1294 if (claw_check_busy(dev)) {
1295 if (privptr->write_free_count!=0) {
1296 claw_clear_busy(dev);
1298 else {
1299 claw_strt_out_IO(dev );
1300 claw_free_wrt_buf( dev );
1301 if (privptr->write_free_count==0) {
1302 ch = &privptr->channel[WRITE];
1303 atomic_inc(&skb->users);
1304 skb_queue_tail(&ch->collect_queue, skb);
1305 goto Done;
1307 else {
1308 claw_clear_busy(dev);
1312 /* tx lock */
1313 if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
1314 ch = &privptr->channel[WRITE];
1315 atomic_inc(&skb->users);
1316 skb_queue_tail(&ch->collect_queue, skb);
1317 claw_strt_out_IO(dev );
1318 rc=-EBUSY;
1319 goto Done2;
1322 /* See how many write buffers are required to hold this data */
1323 numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size);
1325 /* If that number of buffers isn't available, give up for now */
1326 if (privptr->write_free_count < numBuffers ||
1327 privptr->p_write_free_chain == NULL ) {
1329 claw_setbit_busy(TB_NOBUFFER,dev);
1330 ch = &privptr->channel[WRITE];
1331 atomic_inc(&skb->users);
1332 skb_queue_tail(&ch->collect_queue, skb);
1333 CLAW_DBF_TEXT(2, trace, "clawbusy");
1334 goto Done2;
1336 pDataAddress=skb->data;
1337 len_of_data=skb->len;
1339 while (len_of_data > 0) {
1340 p_this_ccw=privptr->p_write_free_chain; /* get a block */
1341 if (p_this_ccw == NULL) { /* lost the race */
1342 ch = &privptr->channel[WRITE];
1343 atomic_inc(&skb->users);
1344 skb_queue_tail(&ch->collect_queue, skb);
1345 goto Done2;
1347 privptr->p_write_free_chain=p_this_ccw->next;
1348 p_this_ccw->next=NULL;
1349 --privptr->write_free_count; /* -1 */
1350 bytesInThisBuffer=len_of_data;
1351 memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer);
1352 len_of_data-=bytesInThisBuffer;
1353 pDataAddress+=(unsigned long)bytesInThisBuffer;
1354 /* setup write CCW */
1355 p_this_ccw->write.cmd_code = (linkid * 8) +1;
1356 if (len_of_data>0) {
1357 p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG;
1359 p_this_ccw->write.count=bytesInThisBuffer;
1360 /* now add to end of this chain */
1361 if (p_first_ccw==NULL) {
1362 p_first_ccw=p_this_ccw;
1364 if (p_last_ccw!=NULL) {
1365 p_last_ccw->next=p_this_ccw;
1366 /* set up TIC ccws */
1367 p_last_ccw->w_TIC_1.cda=
1368 (__u32)__pa(&p_this_ccw->write);
1370 p_last_ccw=p_this_ccw; /* save new last block */
1373 /* FirstCCW and LastCCW now contain a new set of write channel
1374 * programs to append to the running channel program
1377 if (p_first_ccw!=NULL) {
1378 /* setup ending ccw sequence for this segment */
1379 pEnd=privptr->p_end_ccw;
1380 if (pEnd->write1) {
1381 pEnd->write1=0x00; /* second end ccw is now active */
1382 /* set up Tic CCWs */
1383 p_last_ccw->w_TIC_1.cda=
1384 (__u32)__pa(&pEnd->write2_nop1);
1385 pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1386 pEnd->write2_nop2.flags =
1387 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1388 pEnd->write2_nop2.cda=0;
1389 pEnd->write2_nop2.count=1;
1391 else { /* end of if (pEnd->write1)*/
1392 pEnd->write1=0x01; /* first end ccw is now active */
1393 /* set up Tic CCWs */
1394 p_last_ccw->w_TIC_1.cda=
1395 (__u32)__pa(&pEnd->write1_nop1);
1396 pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1397 pEnd->write1_nop2.flags =
1398 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1399 pEnd->write1_nop2.cda=0;
1400 pEnd->write1_nop2.count=1;
1401 } /* end if if (pEnd->write1) */
1403 if (privptr->p_write_active_first==NULL ) {
1404 privptr->p_write_active_first=p_first_ccw;
1405 privptr->p_write_active_last=p_last_ccw;
1407 else {
1408 /* set up Tic CCWs */
1410 tempCCW.cda=(__u32)__pa(&p_first_ccw->write);
1411 tempCCW.count=0;
1412 tempCCW.flags=0;
1413 tempCCW.cmd_code=CCW_CLAW_CMD_TIC;
1415 if (pEnd->write1) {
1418 * first set of ending CCW's is chained to the new write
1419 * chain, so the second set is chained to the active chain
1420 * Therefore modify the second set to point the new write chain.
1421 * make sure we update the CCW atomically
1422 * so channel does not fetch it when it's only half done
1424 memcpy( &pEnd->write2_nop2, &tempCCW ,
1425 sizeof(struct ccw1));
1426 privptr->p_write_active_last->w_TIC_1.cda=
1427 (__u32)__pa(&p_first_ccw->write);
1429 else {
1431 /*make sure we update the CCW atomically
1432 *so channel does not fetch it when it's only half done
1434 memcpy(&pEnd->write1_nop2, &tempCCW ,
1435 sizeof(struct ccw1));
1436 privptr->p_write_active_last->w_TIC_1.cda=
1437 (__u32)__pa(&p_first_ccw->write);
1439 } /* end if if (pEnd->write1) */
1441 privptr->p_write_active_last->next=p_first_ccw;
1442 privptr->p_write_active_last=p_last_ccw;
1445 } /* endif (p_first_ccw!=NULL) */
1446 dev_kfree_skb_any(skb);
1447 if (linkid==0) {
1448 lock=LOCK_NO;
1450 else {
1451 lock=LOCK_YES;
1453 claw_strt_out_IO(dev );
1454 /* if write free count is zero , set NOBUFFER */
1455 if (privptr->write_free_count==0) {
1456 claw_setbit_busy(TB_NOBUFFER,dev);
1458 Done2:
1459 claw_clearbit_busy(TB_TX,dev);
1460 Done:
1461 return(rc);
1462 } /* end of claw_hw_tx */
1464 /*-------------------------------------------------------------------*
1466 * init_ccw_bk *
1468 *--------------------------------------------------------------------*/
1470 static int
1471 init_ccw_bk(struct net_device *dev)
1474 __u32 ccw_blocks_required;
1475 __u32 ccw_blocks_perpage;
1476 __u32 ccw_pages_required;
1477 __u32 claw_reads_perpage=1;
1478 __u32 claw_read_pages;
1479 __u32 claw_writes_perpage=1;
1480 __u32 claw_write_pages;
1481 void *p_buff=NULL;
1482 struct ccwbk*p_free_chain;
1483 struct ccwbk*p_buf;
1484 struct ccwbk*p_last_CCWB;
1485 struct ccwbk*p_first_CCWB;
1486 struct endccw *p_endccw=NULL;
1487 addr_t real_address;
1488 struct claw_privbk *privptr = dev->ml_priv;
1489 struct clawh *pClawH=NULL;
1490 addr_t real_TIC_address;
1491 int i,j;
1492 CLAW_DBF_TEXT(4, trace, "init_ccw");
1494 /* initialize statistics field */
1495 privptr->active_link_ID=0;
1496 /* initialize ccwbk pointers */
1497 privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/
1498 privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/
1499 privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/
1500 privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/
1501 privptr->p_read_active_last=NULL; /* pointer to the last read ccw */
1502 privptr->p_end_ccw=NULL; /* pointer to ending ccw */
1503 privptr->p_claw_signal_blk=NULL; /* pointer to signal block */
1504 privptr->buffs_alloc = 0;
1505 memset(&privptr->end_ccw, 0x00, sizeof(struct endccw));
1506 memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl));
1507 /* initialize free write ccwbk counter */
1508 privptr->write_free_count=0; /* number of free bufs on write chain */
1509 p_last_CCWB = NULL;
1510 p_first_CCWB= NULL;
1512 * We need 1 CCW block for each read buffer, 1 for each
1513 * write buffer, plus 1 for ClawSignalBlock
1515 ccw_blocks_required =
1516 privptr->p_env->read_buffers+privptr->p_env->write_buffers+1;
1518 * compute number of CCW blocks that will fit in a page
1520 ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE;
1521 ccw_pages_required=
1522 DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage);
1525 * read and write sizes are set by 2 constants in claw.h
1526 * 4k and 32k. Unpacked values other than 4k are not going to
1527 * provide good performance. With packing buffers support 32k
1528 * buffers are used.
1530 if (privptr->p_env->read_size < PAGE_SIZE) {
1531 claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size;
1532 claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers,
1533 claw_reads_perpage);
1535 else { /* > or equal */
1536 privptr->p_buff_pages_perread =
1537 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1538 claw_read_pages = privptr->p_env->read_buffers *
1539 privptr->p_buff_pages_perread;
1541 if (privptr->p_env->write_size < PAGE_SIZE) {
1542 claw_writes_perpage =
1543 PAGE_SIZE / privptr->p_env->write_size;
1544 claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers,
1545 claw_writes_perpage);
1548 else { /* > or equal */
1549 privptr->p_buff_pages_perwrite =
1550 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1551 claw_write_pages = privptr->p_env->write_buffers *
1552 privptr->p_buff_pages_perwrite;
1555 * allocate ccw_pages_required
1557 if (privptr->p_buff_ccw==NULL) {
1558 privptr->p_buff_ccw=
1559 (void *)__get_free_pages(__GFP_DMA,
1560 (int)pages_to_order_of_mag(ccw_pages_required ));
1561 if (privptr->p_buff_ccw==NULL) {
1562 return -ENOMEM;
1564 privptr->p_buff_ccw_num=ccw_pages_required;
1566 memset(privptr->p_buff_ccw, 0x00,
1567 privptr->p_buff_ccw_num * PAGE_SIZE);
1570 * obtain ending ccw block address
1573 privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw;
1574 real_address = (__u32)__pa(privptr->p_end_ccw);
1575 /* Initialize ending CCW block */
1576 p_endccw=privptr->p_end_ccw;
1577 p_endccw->real=real_address;
1578 p_endccw->write1=0x00;
1579 p_endccw->read1=0x00;
1581 /* write1_nop1 */
1582 p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1583 p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1584 p_endccw->write1_nop1.count = 1;
1585 p_endccw->write1_nop1.cda = 0;
1587 /* write1_nop2 */
1588 p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1589 p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1590 p_endccw->write1_nop2.count = 1;
1591 p_endccw->write1_nop2.cda = 0;
1593 /* write2_nop1 */
1594 p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1595 p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1596 p_endccw->write2_nop1.count = 1;
1597 p_endccw->write2_nop1.cda = 0;
1599 /* write2_nop2 */
1600 p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1601 p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1602 p_endccw->write2_nop2.count = 1;
1603 p_endccw->write2_nop2.cda = 0;
1605 /* read1_nop1 */
1606 p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1607 p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1608 p_endccw->read1_nop1.count = 1;
1609 p_endccw->read1_nop1.cda = 0;
1611 /* read1_nop2 */
1612 p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1613 p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1614 p_endccw->read1_nop2.count = 1;
1615 p_endccw->read1_nop2.cda = 0;
1617 /* read2_nop1 */
1618 p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1619 p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1620 p_endccw->read2_nop1.count = 1;
1621 p_endccw->read2_nop1.cda = 0;
1623 /* read2_nop2 */
1624 p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1625 p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1626 p_endccw->read2_nop2.count = 1;
1627 p_endccw->read2_nop2.cda = 0;
1630 * Build a chain of CCWs
1633 p_buff=privptr->p_buff_ccw;
1635 p_free_chain=NULL;
1636 for (i=0 ; i < ccw_pages_required; i++ ) {
1637 real_address = (__u32)__pa(p_buff);
1638 p_buf=p_buff;
1639 for (j=0 ; j < ccw_blocks_perpage ; j++) {
1640 p_buf->next = p_free_chain;
1641 p_free_chain = p_buf;
1642 p_buf->real=(__u32)__pa(p_buf);
1643 ++p_buf;
1645 p_buff+=PAGE_SIZE;
1648 * Initialize ClawSignalBlock
1651 if (privptr->p_claw_signal_blk==NULL) {
1652 privptr->p_claw_signal_blk=p_free_chain;
1653 p_free_chain=p_free_chain->next;
1654 pClawH=(struct clawh *)privptr->p_claw_signal_blk;
1655 pClawH->length=0xffff;
1656 pClawH->opcode=0xff;
1657 pClawH->flag=CLAW_BUSY;
1661 * allocate write_pages_required and add to free chain
1663 if (privptr->p_buff_write==NULL) {
1664 if (privptr->p_env->write_size < PAGE_SIZE) {
1665 privptr->p_buff_write=
1666 (void *)__get_free_pages(__GFP_DMA,
1667 (int)pages_to_order_of_mag(claw_write_pages ));
1668 if (privptr->p_buff_write==NULL) {
1669 privptr->p_buff_ccw=NULL;
1670 return -ENOMEM;
1673 * Build CLAW write free chain
1677 memset(privptr->p_buff_write, 0x00,
1678 ccw_pages_required * PAGE_SIZE);
1679 privptr->p_write_free_chain=NULL;
1681 p_buff=privptr->p_buff_write;
1683 for (i=0 ; i< privptr->p_env->write_buffers ; i++) {
1684 p_buf = p_free_chain; /* get a CCW */
1685 p_free_chain = p_buf->next;
1686 p_buf->next =privptr->p_write_free_chain;
1687 privptr->p_write_free_chain = p_buf;
1688 p_buf-> p_buffer = (struct clawbuf *)p_buff;
1689 p_buf-> write.cda = (__u32)__pa(p_buff);
1690 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1691 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1692 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1693 p_buf-> w_read_FF.count = 1;
1694 p_buf-> w_read_FF.cda =
1695 (__u32)__pa(&p_buf-> header.flag);
1696 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1697 p_buf-> w_TIC_1.flags = 0;
1698 p_buf-> w_TIC_1.count = 0;
1700 if (((unsigned long)p_buff+privptr->p_env->write_size) >=
1701 ((unsigned long)(p_buff+2*
1702 (privptr->p_env->write_size) -1) & PAGE_MASK)) {
1703 p_buff= p_buff+privptr->p_env->write_size;
1707 else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */
1709 privptr->p_write_free_chain=NULL;
1710 for (i = 0; i< privptr->p_env->write_buffers ; i++) {
1711 p_buff=(void *)__get_free_pages(__GFP_DMA,
1712 (int)pages_to_order_of_mag(
1713 privptr->p_buff_pages_perwrite) );
1714 if (p_buff==NULL) {
1715 free_pages((unsigned long)privptr->p_buff_ccw,
1716 (int)pages_to_order_of_mag(
1717 privptr->p_buff_ccw_num));
1718 privptr->p_buff_ccw=NULL;
1719 p_buf=privptr->p_buff_write;
1720 while (p_buf!=NULL) {
1721 free_pages((unsigned long)
1722 p_buf->p_buffer,
1723 (int)pages_to_order_of_mag(
1724 privptr->p_buff_pages_perwrite));
1725 p_buf=p_buf->next;
1727 return -ENOMEM;
1728 } /* Error on get_pages */
1729 memset(p_buff, 0x00, privptr->p_env->write_size );
1730 p_buf = p_free_chain;
1731 p_free_chain = p_buf->next;
1732 p_buf->next = privptr->p_write_free_chain;
1733 privptr->p_write_free_chain = p_buf;
1734 privptr->p_buff_write = p_buf;
1735 p_buf->p_buffer=(struct clawbuf *)p_buff;
1736 p_buf-> write.cda = (__u32)__pa(p_buff);
1737 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1738 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1739 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1740 p_buf-> w_read_FF.count = 1;
1741 p_buf-> w_read_FF.cda =
1742 (__u32)__pa(&p_buf-> header.flag);
1743 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1744 p_buf-> w_TIC_1.flags = 0;
1745 p_buf-> w_TIC_1.count = 0;
1746 } /* for all write_buffers */
1748 } /* else buffers are PAGE_SIZE or bigger */
1751 privptr->p_buff_write_num=claw_write_pages;
1752 privptr->write_free_count=privptr->p_env->write_buffers;
1756 * allocate read_pages_required and chain to free chain
1758 if (privptr->p_buff_read==NULL) {
1759 if (privptr->p_env->read_size < PAGE_SIZE) {
1760 privptr->p_buff_read=
1761 (void *)__get_free_pages(__GFP_DMA,
1762 (int)pages_to_order_of_mag(claw_read_pages) );
1763 if (privptr->p_buff_read==NULL) {
1764 free_pages((unsigned long)privptr->p_buff_ccw,
1765 (int)pages_to_order_of_mag(
1766 privptr->p_buff_ccw_num));
1767 /* free the write pages size is < page size */
1768 free_pages((unsigned long)privptr->p_buff_write,
1769 (int)pages_to_order_of_mag(
1770 privptr->p_buff_write_num));
1771 privptr->p_buff_ccw=NULL;
1772 privptr->p_buff_write=NULL;
1773 return -ENOMEM;
1775 memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE);
1776 privptr->p_buff_read_num=claw_read_pages;
1778 * Build CLAW read free chain
1781 p_buff=privptr->p_buff_read;
1782 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1783 p_buf = p_free_chain;
1784 p_free_chain = p_buf->next;
1786 if (p_last_CCWB==NULL) {
1787 p_buf->next=NULL;
1788 real_TIC_address=0;
1789 p_last_CCWB=p_buf;
1791 else {
1792 p_buf->next=p_first_CCWB;
1793 real_TIC_address=
1794 (__u32)__pa(&p_first_CCWB -> read );
1797 p_first_CCWB=p_buf;
1799 p_buf->p_buffer=(struct clawbuf *)p_buff;
1800 /* initialize read command */
1801 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1802 p_buf-> read.cda = (__u32)__pa(p_buff);
1803 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1804 p_buf-> read.count = privptr->p_env->read_size;
1806 /* initialize read_h command */
1807 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1808 p_buf-> read_h.cda =
1809 (__u32)__pa(&(p_buf->header));
1810 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1811 p_buf-> read_h.count = sizeof(struct clawh);
1813 /* initialize Signal command */
1814 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1815 p_buf-> signal.cda =
1816 (__u32)__pa(&(pClawH->flag));
1817 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1818 p_buf-> signal.count = 1;
1820 /* initialize r_TIC_1 command */
1821 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1822 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1823 p_buf-> r_TIC_1.flags = 0;
1824 p_buf-> r_TIC_1.count = 0;
1826 /* initialize r_read_FF command */
1827 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1828 p_buf-> r_read_FF.cda =
1829 (__u32)__pa(&(pClawH->flag));
1830 p_buf-> r_read_FF.flags =
1831 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
1832 p_buf-> r_read_FF.count = 1;
1834 /* initialize r_TIC_2 */
1835 memcpy(&p_buf->r_TIC_2,
1836 &p_buf->r_TIC_1, sizeof(struct ccw1));
1838 /* initialize Header */
1839 p_buf->header.length=0xffff;
1840 p_buf->header.opcode=0xff;
1841 p_buf->header.flag=CLAW_PENDING;
1843 if (((unsigned long)p_buff+privptr->p_env->read_size) >=
1844 ((unsigned long)(p_buff+2*(privptr->p_env->read_size) -1)
1845 & PAGE_MASK) ) {
1846 p_buff= p_buff+privptr->p_env->read_size;
1848 else {
1849 p_buff=
1850 (void *)((unsigned long)
1851 (p_buff+2*(privptr->p_env->read_size) -1)
1852 & PAGE_MASK) ;
1854 } /* for read_buffers */
1855 } /* read_size < PAGE_SIZE */
1856 else { /* read Size >= PAGE_SIZE */
1857 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1858 p_buff = (void *)__get_free_pages(__GFP_DMA,
1859 (int)pages_to_order_of_mag(privptr->p_buff_pages_perread) );
1860 if (p_buff==NULL) {
1861 free_pages((unsigned long)privptr->p_buff_ccw,
1862 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
1863 /* free the write pages */
1864 p_buf=privptr->p_buff_write;
1865 while (p_buf!=NULL) {
1866 free_pages((unsigned long)p_buf->p_buffer,
1867 (int)pages_to_order_of_mag(
1868 privptr->p_buff_pages_perwrite ));
1869 p_buf=p_buf->next;
1871 /* free any read pages already alloc */
1872 p_buf=privptr->p_buff_read;
1873 while (p_buf!=NULL) {
1874 free_pages((unsigned long)p_buf->p_buffer,
1875 (int)pages_to_order_of_mag(
1876 privptr->p_buff_pages_perread ));
1877 p_buf=p_buf->next;
1879 privptr->p_buff_ccw=NULL;
1880 privptr->p_buff_write=NULL;
1881 return -ENOMEM;
1883 memset(p_buff, 0x00, privptr->p_env->read_size);
1884 p_buf = p_free_chain;
1885 privptr->p_buff_read = p_buf;
1886 p_free_chain = p_buf->next;
1888 if (p_last_CCWB==NULL) {
1889 p_buf->next=NULL;
1890 real_TIC_address=0;
1891 p_last_CCWB=p_buf;
1893 else {
1894 p_buf->next=p_first_CCWB;
1895 real_TIC_address=
1896 (addr_t)__pa(
1897 &p_first_CCWB -> read );
1900 p_first_CCWB=p_buf;
1901 /* save buff address */
1902 p_buf->p_buffer=(struct clawbuf *)p_buff;
1903 /* initialize read command */
1904 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1905 p_buf-> read.cda = (__u32)__pa(p_buff);
1906 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1907 p_buf-> read.count = privptr->p_env->read_size;
1909 /* initialize read_h command */
1910 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1911 p_buf-> read_h.cda =
1912 (__u32)__pa(&(p_buf->header));
1913 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1914 p_buf-> read_h.count = sizeof(struct clawh);
1916 /* initialize Signal command */
1917 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1918 p_buf-> signal.cda =
1919 (__u32)__pa(&(pClawH->flag));
1920 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1921 p_buf-> signal.count = 1;
1923 /* initialize r_TIC_1 command */
1924 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1925 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1926 p_buf-> r_TIC_1.flags = 0;
1927 p_buf-> r_TIC_1.count = 0;
1929 /* initialize r_read_FF command */
1930 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1931 p_buf-> r_read_FF.cda =
1932 (__u32)__pa(&(pClawH->flag));
1933 p_buf-> r_read_FF.flags =
1934 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
1935 p_buf-> r_read_FF.count = 1;
1937 /* initialize r_TIC_2 */
1938 memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1,
1939 sizeof(struct ccw1));
1941 /* initialize Header */
1942 p_buf->header.length=0xffff;
1943 p_buf->header.opcode=0xff;
1944 p_buf->header.flag=CLAW_PENDING;
1946 } /* For read_buffers */
1947 } /* read_size >= PAGE_SIZE */
1948 } /* pBuffread = NULL */
1949 add_claw_reads( dev ,p_first_CCWB , p_last_CCWB);
1950 privptr->buffs_alloc = 1;
1952 return 0;
1953 } /* end of init_ccw_bk */
1955 /*-------------------------------------------------------------------*
1957 * probe_error *
1959 *--------------------------------------------------------------------*/
1961 static void
1962 probe_error( struct ccwgroup_device *cgdev)
1964 struct claw_privbk *privptr;
1966 CLAW_DBF_TEXT(4, trace, "proberr");
1967 privptr = (struct claw_privbk *) cgdev->dev.driver_data;
1968 if (privptr != NULL) {
1969 cgdev->dev.driver_data = NULL;
1970 kfree(privptr->p_env);
1971 kfree(privptr->p_mtc_envelope);
1972 kfree(privptr);
1974 } /* probe_error */
1976 /*-------------------------------------------------------------------*
1977 * claw_process_control *
1980 *--------------------------------------------------------------------*/
1982 static int
1983 claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
1986 struct clawbuf *p_buf;
1987 struct clawctl ctlbk;
1988 struct clawctl *p_ctlbk;
1989 char temp_host_name[8];
1990 char temp_ws_name[8];
1991 struct claw_privbk *privptr;
1992 struct claw_env *p_env;
1993 struct sysval *p_sysval;
1994 struct conncmd *p_connect=NULL;
1995 int rc;
1996 struct chbk *p_ch = NULL;
1997 struct device *tdev;
1998 CLAW_DBF_TEXT(2, setup, "clw_cntl");
1999 udelay(1000); /* Wait a ms for the control packets to
2000 *catch up to each other */
2001 privptr = dev->ml_priv;
2002 p_env=privptr->p_env;
2003 tdev = &privptr->channel[READ].cdev->dev;
2004 memcpy( &temp_host_name, p_env->host_name, 8);
2005 memcpy( &temp_ws_name, p_env->adapter_name , 8);
2006 printk(KERN_INFO "%s: CLAW device %.8s: "
2007 "Received Control Packet\n",
2008 dev->name, temp_ws_name);
2009 if (privptr->release_pend==1) {
2010 return 0;
2012 p_buf=p_ccw->p_buffer;
2013 p_ctlbk=&ctlbk;
2014 if (p_env->packing == DO_PACKED) { /* packing in progress?*/
2015 memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl));
2016 } else {
2017 memcpy(p_ctlbk, p_buf, sizeof(struct clawctl));
2019 switch (p_ctlbk->command)
2021 case SYSTEM_VALIDATE_REQUEST:
2022 if (p_ctlbk->version != CLAW_VERSION_ID) {
2023 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2024 CLAW_RC_WRONG_VERSION);
2025 printk("%s: %d is wrong version id. "
2026 "Expected %d\n",
2027 dev->name, p_ctlbk->version,
2028 CLAW_VERSION_ID);
2030 p_sysval = (struct sysval *)&(p_ctlbk->data);
2031 printk("%s: Recv Sys Validate Request: "
2032 "Vers=%d,link_id=%d,Corr=%d,WS name=%."
2033 "8s,Host name=%.8s\n",
2034 dev->name, p_ctlbk->version,
2035 p_ctlbk->linkid,
2036 p_ctlbk->correlator,
2037 p_sysval->WS_name,
2038 p_sysval->host_name);
2039 if (memcmp(temp_host_name, p_sysval->host_name, 8)) {
2040 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2041 CLAW_RC_NAME_MISMATCH);
2042 CLAW_DBF_TEXT(2, setup, "HSTBAD");
2043 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name);
2044 CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name);
2045 printk(KERN_INFO "%s: Host name mismatch\n",
2046 dev->name);
2047 printk(KERN_INFO "%s: Received :%s: "
2048 "expected :%s: \n",
2049 dev->name,
2050 p_sysval->host_name,
2051 temp_host_name);
2053 if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) {
2054 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2055 CLAW_RC_NAME_MISMATCH);
2056 CLAW_DBF_TEXT(2, setup, "WSNBAD");
2057 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name);
2058 CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name);
2059 printk(KERN_INFO "%s: WS name mismatch\n",
2060 dev->name);
2061 printk(KERN_INFO "%s: Received :%s: "
2062 "expected :%s: \n",
2063 dev->name,
2064 p_sysval->WS_name,
2065 temp_ws_name);
2067 if ((p_sysval->write_frame_size < p_env->write_size) &&
2068 (p_env->packing == 0)) {
2069 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2070 CLAW_RC_HOST_RCV_TOO_SMALL);
2071 printk(KERN_INFO "%s: host write size is too "
2072 "small\n", dev->name);
2073 CLAW_DBF_TEXT(2, setup, "wrtszbad");
2075 if ((p_sysval->read_frame_size < p_env->read_size) &&
2076 (p_env->packing == 0)) {
2077 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2078 CLAW_RC_HOST_RCV_TOO_SMALL);
2079 printk(KERN_INFO "%s: host read size is too "
2080 "small\n", dev->name);
2081 CLAW_DBF_TEXT(2, setup, "rdsizbad");
2083 claw_snd_sys_validate_rsp(dev, p_ctlbk, 0);
2084 printk(KERN_INFO "%s: CLAW device %.8s: System validate "
2085 "completed.\n", dev->name, temp_ws_name);
2086 printk("%s: sys Validate Rsize:%d Wsize:%d\n", dev->name,
2087 p_sysval->read_frame_size, p_sysval->write_frame_size);
2088 privptr->system_validate_comp = 1;
2089 if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0)
2090 p_env->packing = PACKING_ASK;
2091 claw_strt_conn_req(dev);
2092 break;
2093 case SYSTEM_VALIDATE_RESPONSE:
2094 p_sysval = (struct sysval *)&(p_ctlbk->data);
2095 printk("%s: Recv Sys Validate Resp: Vers=%d,Corr=%d,RC=%d,"
2096 "WS name=%.8s,Host name=%.8s\n",
2097 dev->name,
2098 p_ctlbk->version,
2099 p_ctlbk->correlator,
2100 p_ctlbk->rc,
2101 p_sysval->WS_name,
2102 p_sysval->host_name);
2103 switch (p_ctlbk->rc) {
2104 case 0:
2105 printk(KERN_INFO "%s: CLAW device "
2106 "%.8s: System validate "
2107 "completed.\n",
2108 dev->name, temp_ws_name);
2109 if (privptr->system_validate_comp == 0)
2110 claw_strt_conn_req(dev);
2111 privptr->system_validate_comp = 1;
2112 break;
2113 case CLAW_RC_NAME_MISMATCH:
2114 printk(KERN_INFO "%s: Sys Validate "
2115 "Resp : Host, WS name is "
2116 "mismatch\n",
2117 dev->name);
2118 break;
2119 case CLAW_RC_WRONG_VERSION:
2120 printk(KERN_INFO "%s: Sys Validate "
2121 "Resp : Wrong version\n",
2122 dev->name);
2123 break;
2124 case CLAW_RC_HOST_RCV_TOO_SMALL:
2125 printk(KERN_INFO "%s: Sys Validate "
2126 "Resp : bad frame size\n",
2127 dev->name);
2128 break;
2129 default:
2130 printk(KERN_INFO "%s: Sys Validate "
2131 "error code=%d \n",
2132 dev->name, p_ctlbk->rc);
2133 break;
2135 break;
2137 case CONNECTION_REQUEST:
2138 p_connect = (struct conncmd *)&(p_ctlbk->data);
2139 printk(KERN_INFO "%s: Recv Conn Req: Vers=%d,link_id=%d,"
2140 "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
2141 dev->name,
2142 p_ctlbk->version,
2143 p_ctlbk->linkid,
2144 p_ctlbk->correlator,
2145 p_connect->host_name,
2146 p_connect->WS_name);
2147 if (privptr->active_link_ID != 0) {
2148 claw_snd_disc(dev, p_ctlbk);
2149 printk(KERN_INFO "%s: Conn Req error : "
2150 "already logical link is active \n",
2151 dev->name);
2153 if (p_ctlbk->linkid != 1) {
2154 claw_snd_disc(dev, p_ctlbk);
2155 printk(KERN_INFO "%s: Conn Req error : "
2156 "req logical link id is not 1\n",
2157 dev->name);
2159 rc = find_link(dev, p_connect->host_name, p_connect->WS_name);
2160 if (rc != 0) {
2161 claw_snd_disc(dev, p_ctlbk);
2162 printk(KERN_INFO "%s: Conn Resp error: "
2163 "req appl name does not match\n",
2164 dev->name);
2166 claw_send_control(dev,
2167 CONNECTION_CONFIRM, p_ctlbk->linkid,
2168 p_ctlbk->correlator,
2169 0, p_connect->host_name,
2170 p_connect->WS_name);
2171 if (p_env->packing == PACKING_ASK) {
2172 p_env->packing = PACK_SEND;
2173 claw_snd_conn_req(dev, 0);
2175 printk(KERN_INFO "%s: CLAW device %.8s: Connection "
2176 "completed link_id=%d.\n",
2177 dev->name, temp_ws_name,
2178 p_ctlbk->linkid);
2179 privptr->active_link_ID = p_ctlbk->linkid;
2180 p_ch = &privptr->channel[WRITE];
2181 wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
2182 break;
2183 case CONNECTION_RESPONSE:
2184 p_connect = (struct conncmd *)&(p_ctlbk->data);
2185 printk(KERN_INFO "%s: Revc Conn Resp: Vers=%d,link_id=%d,"
2186 "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
2187 dev->name,
2188 p_ctlbk->version,
2189 p_ctlbk->linkid,
2190 p_ctlbk->correlator,
2191 p_ctlbk->rc,
2192 p_connect->host_name,
2193 p_connect->WS_name);
2195 if (p_ctlbk->rc != 0) {
2196 printk(KERN_INFO "%s: Conn Resp error: rc=%d \n",
2197 dev->name, p_ctlbk->rc);
2198 return 1;
2200 rc = find_link(dev,
2201 p_connect->host_name, p_connect->WS_name);
2202 if (rc != 0) {
2203 claw_snd_disc(dev, p_ctlbk);
2204 printk(KERN_INFO "%s: Conn Resp error: "
2205 "req appl name does not match\n",
2206 dev->name);
2208 /* should be until CONNECTION_CONFIRM */
2209 privptr->active_link_ID = -(p_ctlbk->linkid);
2210 break;
2211 case CONNECTION_CONFIRM:
2212 p_connect = (struct conncmd *)&(p_ctlbk->data);
2213 printk(KERN_INFO "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
2214 "Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
2215 dev->name,
2216 p_ctlbk->version,
2217 p_ctlbk->linkid,
2218 p_ctlbk->correlator,
2219 p_connect->host_name,
2220 p_connect->WS_name);
2221 if (p_ctlbk->linkid == -(privptr->active_link_ID)) {
2222 privptr->active_link_ID = p_ctlbk->linkid;
2223 if (p_env->packing > PACKING_ASK) {
2224 printk(KERN_INFO "%s: Confirmed Now packing\n",
2225 dev->name);
2226 p_env->packing = DO_PACKED;
2228 p_ch = &privptr->channel[WRITE];
2229 wake_up(&p_ch->wait);
2230 } else {
2231 printk(KERN_INFO "%s: Conn confirm: "
2232 "unexpected linkid=%d \n",
2233 dev->name, p_ctlbk->linkid);
2234 claw_snd_disc(dev, p_ctlbk);
2236 break;
2237 case DISCONNECT:
2238 printk(KERN_INFO "%s: Disconnect: "
2239 "Vers=%d,link_id=%d,Corr=%d\n",
2240 dev->name, p_ctlbk->version,
2241 p_ctlbk->linkid, p_ctlbk->correlator);
2242 if ((p_ctlbk->linkid == 2) &&
2243 (p_env->packing == PACK_SEND)) {
2244 privptr->active_link_ID = 1;
2245 p_env->packing = DO_PACKED;
2246 } else
2247 privptr->active_link_ID = 0;
2248 break;
2249 case CLAW_ERROR:
2250 printk(KERN_INFO "%s: CLAW ERROR detected\n",
2251 dev->name);
2252 break;
2253 default:
2254 printk(KERN_INFO "%s: Unexpected command code=%d \n",
2255 dev->name, p_ctlbk->command);
2256 break;
2259 return 0;
2260 } /* end of claw_process_control */
2263 /*-------------------------------------------------------------------*
2264 * claw_send_control *
2266 *--------------------------------------------------------------------*/
2268 static int
2269 claw_send_control(struct net_device *dev, __u8 type, __u8 link,
2270 __u8 correlator, __u8 rc, char *local_name, char *remote_name)
2272 struct claw_privbk *privptr;
2273 struct clawctl *p_ctl;
2274 struct sysval *p_sysval;
2275 struct conncmd *p_connect;
2276 struct sk_buff *skb;
2278 CLAW_DBF_TEXT(2, setup, "sndcntl");
2279 privptr = dev->ml_priv;
2280 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2282 p_ctl->command=type;
2283 p_ctl->version=CLAW_VERSION_ID;
2284 p_ctl->linkid=link;
2285 p_ctl->correlator=correlator;
2286 p_ctl->rc=rc;
2288 p_sysval=(struct sysval *)&p_ctl->data;
2289 p_connect=(struct conncmd *)&p_ctl->data;
2291 switch (p_ctl->command) {
2292 case SYSTEM_VALIDATE_REQUEST:
2293 case SYSTEM_VALIDATE_RESPONSE:
2294 memcpy(&p_sysval->host_name, local_name, 8);
2295 memcpy(&p_sysval->WS_name, remote_name, 8);
2296 if (privptr->p_env->packing > 0) {
2297 p_sysval->read_frame_size=DEF_PACK_BUFSIZE;
2298 p_sysval->write_frame_size=DEF_PACK_BUFSIZE;
2299 } else {
2300 /* how big is the biggest group of packets */
2301 p_sysval->read_frame_size=privptr->p_env->read_size;
2302 p_sysval->write_frame_size=privptr->p_env->write_size;
2304 memset(&p_sysval->reserved, 0x00, 4);
2305 break;
2306 case CONNECTION_REQUEST:
2307 case CONNECTION_RESPONSE:
2308 case CONNECTION_CONFIRM:
2309 case DISCONNECT:
2310 memcpy(&p_sysval->host_name, local_name, 8);
2311 memcpy(&p_sysval->WS_name, remote_name, 8);
2312 if (privptr->p_env->packing > 0) {
2313 /* How big is the biggest packet */
2314 p_connect->reserved1[0]=CLAW_FRAME_SIZE;
2315 p_connect->reserved1[1]=CLAW_FRAME_SIZE;
2316 } else {
2317 memset(&p_connect->reserved1, 0x00, 4);
2318 memset(&p_connect->reserved2, 0x00, 4);
2320 break;
2321 default:
2322 break;
2325 /* write Control Record to the device */
2328 skb = dev_alloc_skb(sizeof(struct clawctl));
2329 if (!skb) {
2330 return -ENOMEM;
2332 memcpy(skb_put(skb, sizeof(struct clawctl)),
2333 p_ctl, sizeof(struct clawctl));
2334 if (privptr->p_env->packing >= PACK_SEND)
2335 claw_hw_tx(skb, dev, 1);
2336 else
2337 claw_hw_tx(skb, dev, 0);
2338 return 0;
2339 } /* end of claw_send_control */
2341 /*-------------------------------------------------------------------*
2342 * claw_snd_conn_req *
2344 *--------------------------------------------------------------------*/
2345 static int
2346 claw_snd_conn_req(struct net_device *dev, __u8 link)
2348 int rc;
2349 struct claw_privbk *privptr = dev->ml_priv;
2350 struct clawctl *p_ctl;
2352 CLAW_DBF_TEXT(2, setup, "snd_conn");
2353 rc = 1;
2354 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2355 p_ctl->linkid = link;
2356 if ( privptr->system_validate_comp==0x00 ) {
2357 return rc;
2359 if (privptr->p_env->packing == PACKING_ASK )
2360 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2361 WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED);
2362 if (privptr->p_env->packing == PACK_SEND) {
2363 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2364 WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME);
2366 if (privptr->p_env->packing == 0)
2367 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2368 HOST_APPL_NAME, privptr->p_env->api_type);
2369 return rc;
2371 } /* end of claw_snd_conn_req */
2374 /*-------------------------------------------------------------------*
2375 * claw_snd_disc *
2377 *--------------------------------------------------------------------*/
2379 static int
2380 claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
2382 int rc;
2383 struct conncmd * p_connect;
2385 CLAW_DBF_TEXT(2, setup, "snd_dsc");
2386 p_connect=(struct conncmd *)&p_ctl->data;
2388 rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid,
2389 p_ctl->correlator, 0,
2390 p_connect->host_name, p_connect->WS_name);
2391 return rc;
2392 } /* end of claw_snd_disc */
2395 /*-------------------------------------------------------------------*
2396 * claw_snd_sys_validate_rsp *
2398 *--------------------------------------------------------------------*/
2400 static int
2401 claw_snd_sys_validate_rsp(struct net_device *dev,
2402 struct clawctl *p_ctl, __u32 return_code)
2404 struct claw_env * p_env;
2405 struct claw_privbk *privptr;
2406 int rc;
2408 CLAW_DBF_TEXT(2, setup, "chkresp");
2409 privptr = dev->ml_priv;
2410 p_env=privptr->p_env;
2411 rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
2412 p_ctl->linkid,
2413 p_ctl->correlator,
2414 return_code,
2415 p_env->host_name,
2416 p_env->adapter_name );
2417 return rc;
2418 } /* end of claw_snd_sys_validate_rsp */
2420 /*-------------------------------------------------------------------*
2421 * claw_strt_conn_req *
2423 *--------------------------------------------------------------------*/
2425 static int
2426 claw_strt_conn_req(struct net_device *dev )
2428 int rc;
2430 CLAW_DBF_TEXT(2, setup, "conn_req");
2431 rc=claw_snd_conn_req(dev, 1);
2432 return rc;
2433 } /* end of claw_strt_conn_req */
2437 /*-------------------------------------------------------------------*
2438 * claw_stats *
2439 *-------------------------------------------------------------------*/
2441 static struct
2442 net_device_stats *claw_stats(struct net_device *dev)
2444 struct claw_privbk *privptr;
2446 CLAW_DBF_TEXT(4, trace, "stats");
2447 privptr = dev->ml_priv;
2448 return &privptr->stats;
2449 } /* end of claw_stats */
2452 /*-------------------------------------------------------------------*
2453 * unpack_read *
2455 *--------------------------------------------------------------------*/
2456 static void
2457 unpack_read(struct net_device *dev )
2459 struct sk_buff *skb;
2460 struct claw_privbk *privptr;
2461 struct claw_env *p_env;
2462 struct ccwbk *p_this_ccw;
2463 struct ccwbk *p_first_ccw;
2464 struct ccwbk *p_last_ccw;
2465 struct clawph *p_packh;
2466 void *p_packd;
2467 struct clawctl *p_ctlrec=NULL;
2468 struct device *p_dev;
2470 __u32 len_of_data;
2471 __u32 pack_off;
2472 __u8 link_num;
2473 __u8 mtc_this_frm=0;
2474 __u32 bytes_to_mov;
2475 int i=0;
2476 int p=0;
2478 CLAW_DBF_TEXT(4, trace, "unpkread");
2479 p_first_ccw=NULL;
2480 p_last_ccw=NULL;
2481 p_packh=NULL;
2482 p_packd=NULL;
2483 privptr = dev->ml_priv;
2485 p_dev = &privptr->channel[READ].cdev->dev;
2486 p_env = privptr->p_env;
2487 p_this_ccw=privptr->p_read_active_first;
2488 i=0;
2489 while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
2490 pack_off = 0;
2491 p = 0;
2492 p_this_ccw->header.flag=CLAW_PENDING;
2493 privptr->p_read_active_first=p_this_ccw->next;
2494 p_this_ccw->next=NULL;
2495 p_packh = (struct clawph *)p_this_ccw->p_buffer;
2496 if ((p_env->packing == PACK_SEND) &&
2497 (p_packh->len == 32) &&
2498 (p_packh->link_num == 0)) { /* is it a packed ctl rec? */
2499 p_packh++; /* peek past pack header */
2500 p_ctlrec = (struct clawctl *)p_packh;
2501 p_packh--; /* un peek */
2502 if ((p_ctlrec->command == CONNECTION_RESPONSE) ||
2503 (p_ctlrec->command == CONNECTION_CONFIRM))
2504 p_env->packing = DO_PACKED;
2506 if (p_env->packing == DO_PACKED)
2507 link_num=p_packh->link_num;
2508 else
2509 link_num=p_this_ccw->header.opcode / 8;
2510 if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
2511 mtc_this_frm=1;
2512 if (p_this_ccw->header.length!=
2513 privptr->p_env->read_size ) {
2514 printk(KERN_INFO " %s: Invalid frame detected "
2515 "length is %02x\n" ,
2516 dev->name, p_this_ccw->header.length);
2520 if (privptr->mtc_skipping) {
2522 * We're in the mode of skipping past a
2523 * multi-frame message
2524 * that we can't process for some reason or other.
2525 * The first frame without the More-To-Come flag is
2526 * the last frame of the skipped message.
2528 /* in case of More-To-Come not set in this frame */
2529 if (mtc_this_frm==0) {
2530 privptr->mtc_skipping=0; /* Ok, the end */
2531 privptr->mtc_logical_link=-1;
2533 goto NextFrame;
2536 if (link_num==0) {
2537 claw_process_control(dev, p_this_ccw);
2538 CLAW_DBF_TEXT(4, trace, "UnpkCntl");
2539 goto NextFrame;
2541 unpack_next:
2542 if (p_env->packing == DO_PACKED) {
2543 if (pack_off > p_env->read_size)
2544 goto NextFrame;
2545 p_packd = p_this_ccw->p_buffer+pack_off;
2546 p_packh = (struct clawph *) p_packd;
2547 if ((p_packh->len == 0) || /* all done with this frame? */
2548 (p_packh->flag != 0))
2549 goto NextFrame;
2550 bytes_to_mov = p_packh->len;
2551 pack_off += bytes_to_mov+sizeof(struct clawph);
2552 p++;
2553 } else {
2554 bytes_to_mov=p_this_ccw->header.length;
2556 if (privptr->mtc_logical_link<0) {
2559 * if More-To-Come is set in this frame then we don't know
2560 * length of entire message, and hence have to allocate
2561 * large buffer */
2563 /* We are starting a new envelope */
2564 privptr->mtc_offset=0;
2565 privptr->mtc_logical_link=link_num;
2568 if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) {
2569 /* error */
2570 privptr->stats.rx_frame_errors++;
2571 goto NextFrame;
2573 if (p_env->packing == DO_PACKED) {
2574 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2575 p_packd+sizeof(struct clawph), bytes_to_mov);
2577 } else {
2578 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2579 p_this_ccw->p_buffer, bytes_to_mov);
2581 if (mtc_this_frm==0) {
2582 len_of_data=privptr->mtc_offset+bytes_to_mov;
2583 skb=dev_alloc_skb(len_of_data);
2584 if (skb) {
2585 memcpy(skb_put(skb,len_of_data),
2586 privptr->p_mtc_envelope,
2587 len_of_data);
2588 skb->dev=dev;
2589 skb_reset_mac_header(skb);
2590 skb->protocol=htons(ETH_P_IP);
2591 skb->ip_summed=CHECKSUM_UNNECESSARY;
2592 privptr->stats.rx_packets++;
2593 privptr->stats.rx_bytes+=len_of_data;
2594 netif_rx(skb);
2596 else {
2597 privptr->stats.rx_dropped++;
2598 printk(KERN_WARNING "%s: %s() low on memory\n",
2599 dev->name,__func__);
2601 privptr->mtc_offset=0;
2602 privptr->mtc_logical_link=-1;
2604 else {
2605 privptr->mtc_offset+=bytes_to_mov;
2607 if (p_env->packing == DO_PACKED)
2608 goto unpack_next;
2609 NextFrame:
2611 * Remove ThisCCWblock from active read queue, and add it
2612 * to queue of free blocks to be reused.
2614 i++;
2615 p_this_ccw->header.length=0xffff;
2616 p_this_ccw->header.opcode=0xff;
2618 * add this one to the free queue for later reuse
2620 if (p_first_ccw==NULL) {
2621 p_first_ccw = p_this_ccw;
2623 else {
2624 p_last_ccw->next = p_this_ccw;
2626 p_last_ccw = p_this_ccw;
2628 * chain to next block on active read queue
2630 p_this_ccw = privptr->p_read_active_first;
2631 CLAW_DBF_TEXT_(4, trace, "rxpkt %d", p);
2632 } /* end of while */
2634 /* check validity */
2636 CLAW_DBF_TEXT_(4, trace, "rxfrm %d", i);
2637 add_claw_reads(dev, p_first_ccw, p_last_ccw);
2638 claw_strt_read(dev, LOCK_YES);
2639 return;
2640 } /* end of unpack_read */
2642 /*-------------------------------------------------------------------*
2643 * claw_strt_read *
2645 *--------------------------------------------------------------------*/
2646 static void
2647 claw_strt_read (struct net_device *dev, int lock )
2649 int rc = 0;
2650 __u32 parm;
2651 unsigned long saveflags = 0;
2652 struct claw_privbk *privptr = dev->ml_priv;
2653 struct ccwbk*p_ccwbk;
2654 struct chbk *p_ch;
2655 struct clawh *p_clawh;
2656 p_ch=&privptr->channel[READ];
2658 CLAW_DBF_TEXT(4, trace, "StRdNter");
2659 p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
2660 p_clawh->flag=CLAW_IDLE; /* 0x00 */
2662 if ((privptr->p_write_active_first!=NULL &&
2663 privptr->p_write_active_first->header.flag!=CLAW_PENDING) ||
2664 (privptr->p_read_active_first!=NULL &&
2665 privptr->p_read_active_first->header.flag!=CLAW_PENDING )) {
2666 p_clawh->flag=CLAW_BUSY; /* 0xff */
2668 if (lock==LOCK_YES) {
2669 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
2671 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2672 CLAW_DBF_TEXT(4, trace, "HotRead");
2673 p_ccwbk=privptr->p_read_active_first;
2674 parm = (unsigned long) p_ch;
2675 rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm,
2676 0xff, 0);
2677 if (rc != 0) {
2678 ccw_check_return_code(p_ch->cdev, rc);
2681 else {
2682 CLAW_DBF_TEXT(2, trace, "ReadAct");
2685 if (lock==LOCK_YES) {
2686 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
2688 CLAW_DBF_TEXT(4, trace, "StRdExit");
2689 return;
2690 } /* end of claw_strt_read */
2692 /*-------------------------------------------------------------------*
2693 * claw_strt_out_IO *
2695 *--------------------------------------------------------------------*/
2697 static void
2698 claw_strt_out_IO( struct net_device *dev )
2700 int rc = 0;
2701 unsigned long parm;
2702 struct claw_privbk *privptr;
2703 struct chbk *p_ch;
2704 struct ccwbk *p_first_ccw;
2706 if (!dev) {
2707 return;
2709 privptr = (struct claw_privbk *)dev->ml_priv;
2710 p_ch=&privptr->channel[WRITE];
2712 CLAW_DBF_TEXT(4, trace, "strt_io");
2713 p_first_ccw=privptr->p_write_active_first;
2715 if (p_ch->claw_state == CLAW_STOP)
2716 return;
2717 if (p_first_ccw == NULL) {
2718 return;
2720 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2721 parm = (unsigned long) p_ch;
2722 CLAW_DBF_TEXT(2, trace, "StWrtIO");
2723 rc = ccw_device_start (p_ch->cdev,&p_first_ccw->write, parm,
2724 0xff, 0);
2725 if (rc != 0) {
2726 ccw_check_return_code(p_ch->cdev, rc);
2729 dev->trans_start = jiffies;
2730 return;
2731 } /* end of claw_strt_out_IO */
2733 /*-------------------------------------------------------------------*
2734 * Free write buffers *
2736 *--------------------------------------------------------------------*/
2738 static void
2739 claw_free_wrt_buf( struct net_device *dev )
2742 struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv;
2743 struct ccwbk*p_first_ccw;
2744 struct ccwbk*p_last_ccw;
2745 struct ccwbk*p_this_ccw;
2746 struct ccwbk*p_next_ccw;
2748 CLAW_DBF_TEXT(4, trace, "freewrtb");
2749 /* scan the write queue to free any completed write packets */
2750 p_first_ccw=NULL;
2751 p_last_ccw=NULL;
2752 p_this_ccw=privptr->p_write_active_first;
2753 while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
2755 p_next_ccw = p_this_ccw->next;
2756 if (((p_next_ccw!=NULL) &&
2757 (p_next_ccw->header.flag!=CLAW_PENDING)) ||
2758 ((p_this_ccw == privptr->p_write_active_last) &&
2759 (p_this_ccw->header.flag!=CLAW_PENDING))) {
2760 /* The next CCW is OK or this is */
2761 /* the last CCW...free it @A1A */
2762 privptr->p_write_active_first=p_this_ccw->next;
2763 p_this_ccw->header.flag=CLAW_PENDING;
2764 p_this_ccw->next=privptr->p_write_free_chain;
2765 privptr->p_write_free_chain=p_this_ccw;
2766 ++privptr->write_free_count;
2767 privptr->stats.tx_bytes+= p_this_ccw->write.count;
2768 p_this_ccw=privptr->p_write_active_first;
2769 privptr->stats.tx_packets++;
2771 else {
2772 break;
2775 if (privptr->write_free_count!=0) {
2776 claw_clearbit_busy(TB_NOBUFFER,dev);
2778 /* whole chain removed? */
2779 if (privptr->p_write_active_first==NULL) {
2780 privptr->p_write_active_last=NULL;
2782 CLAW_DBF_TEXT_(4, trace, "FWC=%d", privptr->write_free_count);
2783 return;
2786 /*-------------------------------------------------------------------*
2787 * claw free netdevice *
2789 *--------------------------------------------------------------------*/
2790 static void
2791 claw_free_netdevice(struct net_device * dev, int free_dev)
2793 struct claw_privbk *privptr;
2795 CLAW_DBF_TEXT(2, setup, "free_dev");
2796 if (!dev)
2797 return;
2798 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2799 privptr = dev->ml_priv;
2800 if (dev->flags & IFF_RUNNING)
2801 claw_release(dev);
2802 if (privptr) {
2803 privptr->channel[READ].ndev = NULL; /* say it's free */
2805 dev->ml_priv = NULL;
2806 #ifdef MODULE
2807 if (free_dev) {
2808 free_netdev(dev);
2810 #endif
2811 CLAW_DBF_TEXT(2, setup, "free_ok");
2815 * Claw init netdevice
2816 * Initialize everything of the net device except the name and the
2817 * channel structs.
2819 static void
2820 claw_init_netdevice(struct net_device * dev)
2822 CLAW_DBF_TEXT(2, setup, "init_dev");
2823 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2824 dev->mtu = CLAW_DEFAULT_MTU_SIZE;
2825 dev->hard_start_xmit = claw_tx;
2826 dev->open = claw_open;
2827 dev->stop = claw_release;
2828 dev->get_stats = claw_stats;
2829 dev->change_mtu = claw_change_mtu;
2830 dev->hard_header_len = 0;
2831 dev->addr_len = 0;
2832 dev->type = ARPHRD_SLIP;
2833 dev->tx_queue_len = 1300;
2834 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2835 CLAW_DBF_TEXT(2, setup, "initok");
2836 return;
2840 * Init a new channel in the privptr->channel[i].
2842 * @param cdev The ccw_device to be added.
2844 * @return 0 on success, !0 on error.
2846 static int
2847 add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
2849 struct chbk *p_ch;
2850 struct ccw_dev_id dev_id;
2852 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev));
2853 privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
2854 p_ch = &privptr->channel[i];
2855 p_ch->cdev = cdev;
2856 snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev));
2857 ccw_device_get_id(cdev, &dev_id);
2858 p_ch->devno = dev_id.devno;
2859 if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
2860 return -ENOMEM;
2862 return 0;
2868 * Setup an interface.
2870 * @param cgdev Device to be setup.
2872 * @returns 0 on success, !0 on failure.
2874 static int
2875 claw_new_device(struct ccwgroup_device *cgdev)
2877 struct claw_privbk *privptr;
2878 struct claw_env *p_env;
2879 struct net_device *dev;
2880 int ret;
2881 struct ccw_dev_id dev_id;
2883 printk(KERN_INFO "claw: add for %s\n",
2884 dev_name(&cgdev->cdev[READ]->dev));
2885 CLAW_DBF_TEXT(2, setup, "new_dev");
2886 privptr = cgdev->dev.driver_data;
2887 cgdev->cdev[READ]->dev.driver_data = privptr;
2888 cgdev->cdev[WRITE]->dev.driver_data = privptr;
2889 if (!privptr)
2890 return -ENODEV;
2891 p_env = privptr->p_env;
2892 ccw_device_get_id(cgdev->cdev[READ], &dev_id);
2893 p_env->devno[READ] = dev_id.devno;
2894 ccw_device_get_id(cgdev->cdev[WRITE], &dev_id);
2895 p_env->devno[WRITE] = dev_id.devno;
2896 ret = add_channel(cgdev->cdev[0],0,privptr);
2897 if (ret == 0)
2898 ret = add_channel(cgdev->cdev[1],1,privptr);
2899 if (ret != 0) {
2900 printk(KERN_WARNING
2901 "add channel failed with ret = %d\n", ret);
2902 goto out;
2904 ret = ccw_device_set_online(cgdev->cdev[READ]);
2905 if (ret != 0) {
2906 printk(KERN_WARNING
2907 "claw: ccw_device_set_online %s READ failed "
2908 "with ret = %d\n", dev_name(&cgdev->cdev[READ]->dev),
2909 ret);
2910 goto out;
2912 ret = ccw_device_set_online(cgdev->cdev[WRITE]);
2913 if (ret != 0) {
2914 printk(KERN_WARNING
2915 "claw: ccw_device_set_online %s WRITE failed "
2916 "with ret = %d\n", dev_name(&cgdev->cdev[WRITE]->dev),
2917 ret);
2918 goto out;
2920 dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
2921 if (!dev) {
2922 printk(KERN_WARNING "%s:alloc_netdev failed\n",__func__);
2923 goto out;
2925 dev->ml_priv = privptr;
2926 cgdev->dev.driver_data = privptr;
2927 cgdev->cdev[READ]->dev.driver_data = privptr;
2928 cgdev->cdev[WRITE]->dev.driver_data = privptr;
2929 /* sysfs magic */
2930 SET_NETDEV_DEV(dev, &cgdev->dev);
2931 if (register_netdev(dev) != 0) {
2932 claw_free_netdevice(dev, 1);
2933 CLAW_DBF_TEXT(2, trace, "regfail");
2934 goto out;
2936 dev->flags &=~IFF_RUNNING;
2937 if (privptr->buffs_alloc == 0) {
2938 ret=init_ccw_bk(dev);
2939 if (ret !=0) {
2940 unregister_netdev(dev);
2941 claw_free_netdevice(dev,1);
2942 CLAW_DBF_TEXT(2, trace, "ccwmem");
2943 goto out;
2946 privptr->channel[READ].ndev = dev;
2947 privptr->channel[WRITE].ndev = dev;
2948 privptr->p_env->ndev = dev;
2950 printk(KERN_INFO "%s:readsize=%d writesize=%d "
2951 "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
2952 dev->name, p_env->read_size,
2953 p_env->write_size, p_env->read_buffers,
2954 p_env->write_buffers, p_env->devno[READ],
2955 p_env->devno[WRITE]);
2956 printk(KERN_INFO "%s:host_name:%.8s, adapter_name "
2957 ":%.8s api_type: %.8s\n",
2958 dev->name, p_env->host_name,
2959 p_env->adapter_name , p_env->api_type);
2960 return 0;
2961 out:
2962 ccw_device_set_offline(cgdev->cdev[1]);
2963 ccw_device_set_offline(cgdev->cdev[0]);
2964 return -ENODEV;
2967 static void
2968 claw_purge_skb_queue(struct sk_buff_head *q)
2970 struct sk_buff *skb;
2972 CLAW_DBF_TEXT(4, trace, "purgque");
2973 while ((skb = skb_dequeue(q))) {
2974 atomic_dec(&skb->users);
2975 dev_kfree_skb_any(skb);
2980 * Shutdown an interface.
2982 * @param cgdev Device to be shut down.
2984 * @returns 0 on success, !0 on failure.
2986 static int
2987 claw_shutdown_device(struct ccwgroup_device *cgdev)
2989 struct claw_privbk *priv;
2990 struct net_device *ndev;
2991 int ret;
2993 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
2994 priv = cgdev->dev.driver_data;
2995 if (!priv)
2996 return -ENODEV;
2997 ndev = priv->channel[READ].ndev;
2998 if (ndev) {
2999 /* Close the device */
3000 printk(KERN_INFO
3001 "%s: shuting down \n",ndev->name);
3002 if (ndev->flags & IFF_RUNNING)
3003 ret = claw_release(ndev);
3004 ndev->flags &=~IFF_RUNNING;
3005 unregister_netdev(ndev);
3006 ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
3007 claw_free_netdevice(ndev, 1);
3008 priv->channel[READ].ndev = NULL;
3009 priv->channel[WRITE].ndev = NULL;
3010 priv->p_env->ndev = NULL;
3012 ccw_device_set_offline(cgdev->cdev[1]);
3013 ccw_device_set_offline(cgdev->cdev[0]);
3014 return 0;
3017 static void
3018 claw_remove_device(struct ccwgroup_device *cgdev)
3020 struct claw_privbk *priv;
3022 BUG_ON(!cgdev);
3023 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3024 priv = cgdev->dev.driver_data;
3025 BUG_ON(!priv);
3026 printk(KERN_INFO "claw: %s() called %s will be removed.\n",
3027 __func__, dev_name(&cgdev->cdev[0]->dev));
3028 if (cgdev->state == CCWGROUP_ONLINE)
3029 claw_shutdown_device(cgdev);
3030 claw_remove_files(&cgdev->dev);
3031 kfree(priv->p_mtc_envelope);
3032 priv->p_mtc_envelope=NULL;
3033 kfree(priv->p_env);
3034 priv->p_env=NULL;
3035 kfree(priv->channel[0].irb);
3036 priv->channel[0].irb=NULL;
3037 kfree(priv->channel[1].irb);
3038 priv->channel[1].irb=NULL;
3039 kfree(priv);
3040 cgdev->dev.driver_data=NULL;
3041 cgdev->cdev[READ]->dev.driver_data = NULL;
3042 cgdev->cdev[WRITE]->dev.driver_data = NULL;
3043 put_device(&cgdev->dev);
3045 return;
3050 * sysfs attributes
3052 static ssize_t
3053 claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
3055 struct claw_privbk *priv;
3056 struct claw_env * p_env;
3058 priv = dev->driver_data;
3059 if (!priv)
3060 return -ENODEV;
3061 p_env = priv->p_env;
3062 return sprintf(buf, "%s\n",p_env->host_name);
3065 static ssize_t
3066 claw_hname_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
3068 struct claw_privbk *priv;
3069 struct claw_env * p_env;
3071 priv = dev->driver_data;
3072 if (!priv)
3073 return -ENODEV;
3074 p_env = priv->p_env;
3075 if (count > MAX_NAME_LEN+1)
3076 return -EINVAL;
3077 memset(p_env->host_name, 0x20, MAX_NAME_LEN);
3078 strncpy(p_env->host_name,buf, count);
3079 p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */
3080 p_env->host_name[MAX_NAME_LEN] = 0x00;
3081 CLAW_DBF_TEXT(2, setup, "HstnSet");
3082 CLAW_DBF_TEXT_(2, setup, "%s", p_env->host_name);
3084 return count;
3087 static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write);
3089 static ssize_t
3090 claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
3092 struct claw_privbk *priv;
3093 struct claw_env * p_env;
3095 priv = dev->driver_data;
3096 if (!priv)
3097 return -ENODEV;
3098 p_env = priv->p_env;
3099 return sprintf(buf, "%s\n", p_env->adapter_name);
3102 static ssize_t
3103 claw_adname_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
3105 struct claw_privbk *priv;
3106 struct claw_env * p_env;
3108 priv = dev->driver_data;
3109 if (!priv)
3110 return -ENODEV;
3111 p_env = priv->p_env;
3112 if (count > MAX_NAME_LEN+1)
3113 return -EINVAL;
3114 memset(p_env->adapter_name, 0x20, MAX_NAME_LEN);
3115 strncpy(p_env->adapter_name,buf, count);
3116 p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */
3117 p_env->adapter_name[MAX_NAME_LEN] = 0x00;
3118 CLAW_DBF_TEXT(2, setup, "AdnSet");
3119 CLAW_DBF_TEXT_(2, setup, "%s", p_env->adapter_name);
3121 return count;
3124 static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write);
3126 static ssize_t
3127 claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
3129 struct claw_privbk *priv;
3130 struct claw_env * p_env;
3132 priv = dev->driver_data;
3133 if (!priv)
3134 return -ENODEV;
3135 p_env = priv->p_env;
3136 return sprintf(buf, "%s\n",
3137 p_env->api_type);
3140 static ssize_t
3141 claw_apname_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
3143 struct claw_privbk *priv;
3144 struct claw_env * p_env;
3146 priv = dev->driver_data;
3147 if (!priv)
3148 return -ENODEV;
3149 p_env = priv->p_env;
3150 if (count > MAX_NAME_LEN+1)
3151 return -EINVAL;
3152 memset(p_env->api_type, 0x20, MAX_NAME_LEN);
3153 strncpy(p_env->api_type,buf, count);
3154 p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */
3155 p_env->api_type[MAX_NAME_LEN] = 0x00;
3156 if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
3157 p_env->read_size=DEF_PACK_BUFSIZE;
3158 p_env->write_size=DEF_PACK_BUFSIZE;
3159 p_env->packing=PACKING_ASK;
3160 CLAW_DBF_TEXT(2, setup, "PACKING");
3162 else {
3163 p_env->packing=0;
3164 p_env->read_size=CLAW_FRAME_SIZE;
3165 p_env->write_size=CLAW_FRAME_SIZE;
3166 CLAW_DBF_TEXT(2, setup, "ApiSet");
3168 CLAW_DBF_TEXT_(2, setup, "%s", p_env->api_type);
3169 return count;
3172 static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write);
3174 static ssize_t
3175 claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3177 struct claw_privbk *priv;
3178 struct claw_env * p_env;
3180 priv = dev->driver_data;
3181 if (!priv)
3182 return -ENODEV;
3183 p_env = priv->p_env;
3184 return sprintf(buf, "%d\n", p_env->write_buffers);
3187 static ssize_t
3188 claw_wbuff_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
3190 struct claw_privbk *priv;
3191 struct claw_env * p_env;
3192 int nnn,max;
3194 priv = dev->driver_data;
3195 if (!priv)
3196 return -ENODEV;
3197 p_env = priv->p_env;
3198 sscanf(buf, "%i", &nnn);
3199 if (p_env->packing) {
3200 max = 64;
3202 else {
3203 max = 512;
3205 if ((nnn > max ) || (nnn < 2))
3206 return -EINVAL;
3207 p_env->write_buffers = nnn;
3208 CLAW_DBF_TEXT(2, setup, "Wbufset");
3209 CLAW_DBF_TEXT_(2, setup, "WB=%d", p_env->write_buffers);
3210 return count;
3213 static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write);
3215 static ssize_t
3216 claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3218 struct claw_privbk *priv;
3219 struct claw_env * p_env;
3221 priv = dev->driver_data;
3222 if (!priv)
3223 return -ENODEV;
3224 p_env = priv->p_env;
3225 return sprintf(buf, "%d\n", p_env->read_buffers);
3228 static ssize_t
3229 claw_rbuff_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
3231 struct claw_privbk *priv;
3232 struct claw_env *p_env;
3233 int nnn,max;
3235 priv = dev->driver_data;
3236 if (!priv)
3237 return -ENODEV;
3238 p_env = priv->p_env;
3239 sscanf(buf, "%i", &nnn);
3240 if (p_env->packing) {
3241 max = 64;
3243 else {
3244 max = 512;
3246 if ((nnn > max ) || (nnn < 2))
3247 return -EINVAL;
3248 p_env->read_buffers = nnn;
3249 CLAW_DBF_TEXT(2, setup, "Rbufset");
3250 CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers);
3251 return count;
3254 static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
3256 static struct attribute *claw_attr[] = {
3257 &dev_attr_read_buffer.attr,
3258 &dev_attr_write_buffer.attr,
3259 &dev_attr_adapter_name.attr,
3260 &dev_attr_api_type.attr,
3261 &dev_attr_host_name.attr,
3262 NULL,
3265 static struct attribute_group claw_attr_group = {
3266 .attrs = claw_attr,
3269 static int
3270 claw_add_files(struct device *dev)
3272 CLAW_DBF_TEXT(2, setup, "add_file");
3273 return sysfs_create_group(&dev->kobj, &claw_attr_group);
3276 static void
3277 claw_remove_files(struct device *dev)
3279 CLAW_DBF_TEXT(2, setup, "rem_file");
3280 sysfs_remove_group(&dev->kobj, &claw_attr_group);
3283 /*--------------------------------------------------------------------*
3284 * claw_init and cleanup *
3285 *---------------------------------------------------------------------*/
3287 static void __exit
3288 claw_cleanup(void)
3290 unregister_cu3088_discipline(&claw_group_driver);
3291 claw_unregister_debug_facility();
3292 printk(KERN_INFO "claw: Driver unloaded\n");
3297 * Initialize module.
3298 * This is called just after the module is loaded.
3300 * @return 0 on success, !0 on error.
3302 static int __init
3303 claw_init(void)
3305 int ret = 0;
3306 printk(KERN_INFO "claw: starting driver\n");
3308 ret = claw_register_debug_facility();
3309 if (ret) {
3310 printk(KERN_WARNING "claw: %s() debug_register failed %d\n",
3311 __func__,ret);
3312 return ret;
3314 CLAW_DBF_TEXT(2, setup, "init_mod");
3315 ret = register_cu3088_discipline(&claw_group_driver);
3316 if (ret) {
3317 CLAW_DBF_TEXT(2, setup, "init_bad");
3318 claw_unregister_debug_facility();
3319 printk(KERN_WARNING "claw; %s() cu3088 register failed %d\n",
3320 __func__,ret);
3322 return ret;
3325 module_init(claw_init);
3326 module_exit(claw_cleanup);
3328 MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
3329 MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \
3330 "Copyright 2000,2008 IBM Corporation\n");
3331 MODULE_LICENSE("GPL");