s390/ccwgroup: remove ccwgroup_create_from_string
[linux-2.6/btrfs-unstable.git] / drivers / s390 / net / claw.c
blob634c7e8e631de9f77fd326c3f1e4d5071f08ce93
1 /*
2 * drivers/s390/net/claw.c
3 * ESCON CLAW network driver
5 * Linux for zSeries version
6 * Copyright IBM Corp. 2002, 2009
7 * Author(s) Original code written by:
8 * Kazuo Iimura <iimura@jp.ibm.com>
9 * Rewritten by
10 * Andy Richter <richtera@us.ibm.com>
11 * Marc Price <mwprice@us.ibm.com>
13 * sysfs parms:
14 * group x.x.rrrr,x.x.wwww
15 * read_buffer nnnnnnn
16 * write_buffer nnnnnn
17 * host_name aaaaaaaa
18 * adapter_name aaaaaaaa
19 * api_type aaaaaaaa
21 * eg.
22 * group 0.0.0200 0.0.0201
23 * read_buffer 25
24 * write_buffer 20
25 * host_name LINUX390
26 * adapter_name RS6K
27 * api_type TCPIP
29 * where
31 * The device id is decided by the order entries
32 * are added to the group the first is claw0 the second claw1
33 * up to CLAW_MAX_DEV
35 * rrrr - the first of 2 consecutive device addresses used for the
36 * CLAW protocol.
37 * The specified address is always used as the input (Read)
38 * channel and the next address is used as the output channel.
40 * wwww - the second of 2 consecutive device addresses used for
41 * the CLAW protocol.
42 * The specified address is always used as the output
43 * channel and the previous address is used as the input channel.
45 * read_buffer - specifies number of input buffers to allocate.
46 * write_buffer - specifies number of output buffers to allocate.
47 * host_name - host name
48 * adaptor_name - adaptor name
49 * api_type - API type TCPIP or API will be sent and expected
50 * as ws_name
52 * Note the following requirements:
53 * 1) host_name must match the configured adapter_name on the remote side
54 * 2) adaptor_name must match the configured host name on the remote side
56 * Change History
57 * 1.00 Initial release shipped
58 * 1.10 Changes for Buffer allocation
59 * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower
60 * 1.25 Added Packing support
61 * 1.5
64 #define KMSG_COMPONENT "claw"
66 #include <asm/ccwdev.h>
67 #include <asm/ccwgroup.h>
68 #include <asm/debug.h>
69 #include <asm/idals.h>
70 #include <asm/io.h>
71 #include <linux/bitops.h>
72 #include <linux/ctype.h>
73 #include <linux/delay.h>
74 #include <linux/errno.h>
75 #include <linux/if_arp.h>
76 #include <linux/init.h>
77 #include <linux/interrupt.h>
78 #include <linux/ip.h>
79 #include <linux/kernel.h>
80 #include <linux/module.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/proc_fs.h>
84 #include <linux/sched.h>
85 #include <linux/signal.h>
86 #include <linux/skbuff.h>
87 #include <linux/slab.h>
88 #include <linux/string.h>
89 #include <linux/tcp.h>
90 #include <linux/timer.h>
91 #include <linux/types.h>
93 #include "claw.h"
96 CLAW uses the s390dbf file system see claw_trace and claw_setup
99 static char version[] __initdata = "CLAW driver";
100 static char debug_buffer[255];
102 * Debug Facility Stuff
104 static debug_info_t *claw_dbf_setup;
105 static debug_info_t *claw_dbf_trace;
108 * CLAW Debug Facility functions
110 static void
111 claw_unregister_debug_facility(void)
113 if (claw_dbf_setup)
114 debug_unregister(claw_dbf_setup);
115 if (claw_dbf_trace)
116 debug_unregister(claw_dbf_trace);
119 static int
120 claw_register_debug_facility(void)
122 claw_dbf_setup = debug_register("claw_setup", 2, 1, 8);
123 claw_dbf_trace = debug_register("claw_trace", 2, 2, 8);
124 if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) {
125 claw_unregister_debug_facility();
126 return -ENOMEM;
128 debug_register_view(claw_dbf_setup, &debug_hex_ascii_view);
129 debug_set_level(claw_dbf_setup, 2);
130 debug_register_view(claw_dbf_trace, &debug_hex_ascii_view);
131 debug_set_level(claw_dbf_trace, 2);
132 return 0;
135 static inline void
136 claw_set_busy(struct net_device *dev)
138 ((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
141 static inline void
142 claw_clear_busy(struct net_device *dev)
144 clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
145 netif_wake_queue(dev);
148 static inline int
149 claw_check_busy(struct net_device *dev)
151 return ((struct claw_privbk *) dev->ml_priv)->tbusy;
154 static inline void
155 claw_setbit_busy(int nr,struct net_device *dev)
157 netif_stop_queue(dev);
158 set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
161 static inline void
162 claw_clearbit_busy(int nr,struct net_device *dev)
164 clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
165 netif_wake_queue(dev);
168 static inline int
169 claw_test_and_setbit_busy(int nr,struct net_device *dev)
171 netif_stop_queue(dev);
172 return test_and_set_bit(nr,
173 (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy));
177 /* Functions for the DEV methods */
179 static int claw_probe(struct ccwgroup_device *cgdev);
180 static void claw_remove_device(struct ccwgroup_device *cgdev);
181 static void claw_purge_skb_queue(struct sk_buff_head *q);
182 static int claw_new_device(struct ccwgroup_device *cgdev);
183 static int claw_shutdown_device(struct ccwgroup_device *cgdev);
184 static int claw_tx(struct sk_buff *skb, struct net_device *dev);
185 static int claw_change_mtu( struct net_device *dev, int new_mtu);
186 static int claw_open(struct net_device *dev);
187 static void claw_irq_handler(struct ccw_device *cdev,
188 unsigned long intparm, struct irb *irb);
189 static void claw_irq_tasklet ( unsigned long data );
190 static int claw_release(struct net_device *dev);
191 static void claw_write_retry ( struct chbk * p_ch );
192 static void claw_write_next ( struct chbk * p_ch );
193 static void claw_timer ( struct chbk * p_ch );
195 /* Functions */
196 static int add_claw_reads(struct net_device *dev,
197 struct ccwbk* p_first, struct ccwbk* p_last);
198 static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
199 static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
200 static int find_link(struct net_device *dev, char *host_name, char *ws_name );
201 static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
202 static int init_ccw_bk(struct net_device *dev);
203 static void probe_error( struct ccwgroup_device *cgdev);
204 static struct net_device_stats *claw_stats(struct net_device *dev);
205 static int pages_to_order_of_mag(int num_of_pages);
206 static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
207 /* sysfs Functions */
208 static ssize_t claw_hname_show(struct device *dev,
209 struct device_attribute *attr, char *buf);
210 static ssize_t claw_hname_write(struct device *dev,
211 struct device_attribute *attr,
212 const char *buf, size_t count);
213 static ssize_t claw_adname_show(struct device *dev,
214 struct device_attribute *attr, char *buf);
215 static ssize_t claw_adname_write(struct device *dev,
216 struct device_attribute *attr,
217 const char *buf, size_t count);
218 static ssize_t claw_apname_show(struct device *dev,
219 struct device_attribute *attr, char *buf);
220 static ssize_t claw_apname_write(struct device *dev,
221 struct device_attribute *attr,
222 const char *buf, size_t count);
223 static ssize_t claw_wbuff_show(struct device *dev,
224 struct device_attribute *attr, char *buf);
225 static ssize_t claw_wbuff_write(struct device *dev,
226 struct device_attribute *attr,
227 const char *buf, size_t count);
228 static ssize_t claw_rbuff_show(struct device *dev,
229 struct device_attribute *attr, char *buf);
230 static ssize_t claw_rbuff_write(struct device *dev,
231 struct device_attribute *attr,
232 const char *buf, size_t count);
234 /* Functions for System Validate */
235 static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
236 static int claw_send_control(struct net_device *dev, __u8 type, __u8 link,
237 __u8 correlator, __u8 rc , char *local_name, char *remote_name);
238 static int claw_snd_conn_req(struct net_device *dev, __u8 link);
239 static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl);
240 static int claw_snd_sys_validate_rsp(struct net_device *dev,
241 struct clawctl * p_ctl, __u32 return_code);
242 static int claw_strt_conn_req(struct net_device *dev );
243 static void claw_strt_read(struct net_device *dev, int lock);
244 static void claw_strt_out_IO(struct net_device *dev);
245 static void claw_free_wrt_buf(struct net_device *dev);
247 /* Functions for unpack reads */
248 static void unpack_read(struct net_device *dev);
250 static int claw_pm_prepare(struct ccwgroup_device *gdev)
252 return -EPERM;
255 /* the root device for claw group devices */
256 static struct device *claw_root_dev;
258 /* ccwgroup table */
260 static struct ccwgroup_driver claw_group_driver = {
261 .driver = {
262 .owner = THIS_MODULE,
263 .name = "claw",
265 .setup = claw_probe,
266 .remove = claw_remove_device,
267 .set_online = claw_new_device,
268 .set_offline = claw_shutdown_device,
269 .prepare = claw_pm_prepare,
272 static struct ccw_device_id claw_ids[] = {
273 {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
276 MODULE_DEVICE_TABLE(ccw, claw_ids);
278 static struct ccw_driver claw_ccw_driver = {
279 .driver = {
280 .owner = THIS_MODULE,
281 .name = "claw",
283 .ids = claw_ids,
284 .probe = ccwgroup_probe_ccwdev,
285 .remove = ccwgroup_remove_ccwdev,
286 .int_class = IOINT_CLW,
289 static ssize_t claw_driver_group_store(struct device_driver *ddrv,
290 const char *buf, size_t count)
292 int err;
293 err = ccwgroup_create_dev(claw_root_dev, &claw_group_driver, 2, buf);
294 return err ? err : count;
296 static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
298 static struct attribute *claw_group_attrs[] = {
299 &driver_attr_group.attr,
300 NULL,
303 static struct attribute_group claw_group_attr_group = {
304 .attrs = claw_group_attrs,
307 static const struct attribute_group *claw_group_attr_groups[] = {
308 &claw_group_attr_group,
309 NULL,
313 * Key functions
316 /*-------------------------------------------------------------------*
317 * claw_tx *
318 *-------------------------------------------------------------------*/
320 static int
321 claw_tx(struct sk_buff *skb, struct net_device *dev)
323 int rc;
324 struct claw_privbk *privptr = dev->ml_priv;
325 unsigned long saveflags;
326 struct chbk *p_ch;
328 CLAW_DBF_TEXT(4, trace, "claw_tx");
329 p_ch = &privptr->channel[WRITE_CHANNEL];
330 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
331 rc=claw_hw_tx( skb, dev, 1 );
332 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
333 CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc);
334 if (rc)
335 rc = NETDEV_TX_BUSY;
336 else
337 rc = NETDEV_TX_OK;
338 return rc;
339 } /* end of claw_tx */
341 /*------------------------------------------------------------------*
342 * pack the collect queue into an skb and return it *
343 * If not packing just return the top skb from the queue *
344 *------------------------------------------------------------------*/
346 static struct sk_buff *
347 claw_pack_skb(struct claw_privbk *privptr)
349 struct sk_buff *new_skb,*held_skb;
350 struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
351 struct claw_env *p_env = privptr->p_env;
352 int pkt_cnt,pk_ind,so_far;
354 new_skb = NULL; /* assume no dice */
355 pkt_cnt = 0;
356 CLAW_DBF_TEXT(4, trace, "PackSKBe");
357 if (!skb_queue_empty(&p_ch->collect_queue)) {
358 /* some data */
359 held_skb = skb_dequeue(&p_ch->collect_queue);
360 if (held_skb)
361 dev_kfree_skb_any(held_skb);
362 else
363 return NULL;
364 if (p_env->packing != DO_PACKED)
365 return held_skb;
366 /* get a new SKB we will pack at least one */
367 new_skb = dev_alloc_skb(p_env->write_size);
368 if (new_skb == NULL) {
369 atomic_inc(&held_skb->users);
370 skb_queue_head(&p_ch->collect_queue,held_skb);
371 return NULL;
373 /* we have packed packet and a place to put it */
374 pk_ind = 1;
375 so_far = 0;
376 new_skb->cb[1] = 'P'; /* every skb on queue has pack header */
377 while ((pk_ind) && (held_skb != NULL)) {
378 if (held_skb->len+so_far <= p_env->write_size-8) {
379 memcpy(skb_put(new_skb,held_skb->len),
380 held_skb->data,held_skb->len);
381 privptr->stats.tx_packets++;
382 so_far += held_skb->len;
383 pkt_cnt++;
384 dev_kfree_skb_any(held_skb);
385 held_skb = skb_dequeue(&p_ch->collect_queue);
386 if (held_skb)
387 atomic_dec(&held_skb->users);
388 } else {
389 pk_ind = 0;
390 atomic_inc(&held_skb->users);
391 skb_queue_head(&p_ch->collect_queue,held_skb);
395 CLAW_DBF_TEXT(4, trace, "PackSKBx");
396 return new_skb;
399 /*-------------------------------------------------------------------*
400 * claw_change_mtu *
402 *-------------------------------------------------------------------*/
404 static int
405 claw_change_mtu(struct net_device *dev, int new_mtu)
407 struct claw_privbk *privptr = dev->ml_priv;
408 int buff_size;
409 CLAW_DBF_TEXT(4, trace, "setmtu");
410 buff_size = privptr->p_env->write_size;
411 if ((new_mtu < 60) || (new_mtu > buff_size)) {
412 return -EINVAL;
414 dev->mtu = new_mtu;
415 return 0;
416 } /* end of claw_change_mtu */
419 /*-------------------------------------------------------------------*
420 * claw_open *
422 *-------------------------------------------------------------------*/
423 static int
424 claw_open(struct net_device *dev)
427 int rc;
428 int i;
429 unsigned long saveflags=0;
430 unsigned long parm;
431 struct claw_privbk *privptr;
432 DECLARE_WAITQUEUE(wait, current);
433 struct timer_list timer;
434 struct ccwbk *p_buf;
436 CLAW_DBF_TEXT(4, trace, "open");
437 privptr = (struct claw_privbk *)dev->ml_priv;
438 /* allocate and initialize CCW blocks */
439 if (privptr->buffs_alloc == 0) {
440 rc=init_ccw_bk(dev);
441 if (rc) {
442 CLAW_DBF_TEXT(2, trace, "openmem");
443 return -ENOMEM;
446 privptr->system_validate_comp=0;
447 privptr->release_pend=0;
448 if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
449 privptr->p_env->read_size=DEF_PACK_BUFSIZE;
450 privptr->p_env->write_size=DEF_PACK_BUFSIZE;
451 privptr->p_env->packing=PACKING_ASK;
452 } else {
453 privptr->p_env->packing=0;
454 privptr->p_env->read_size=CLAW_FRAME_SIZE;
455 privptr->p_env->write_size=CLAW_FRAME_SIZE;
457 claw_set_busy(dev);
458 tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
459 (unsigned long) &privptr->channel[READ_CHANNEL]);
460 for ( i = 0; i < 2; i++) {
461 CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
462 init_waitqueue_head(&privptr->channel[i].wait);
463 /* skb_queue_head_init(&p_ch->io_queue); */
464 if (i == WRITE_CHANNEL)
465 skb_queue_head_init(
466 &privptr->channel[WRITE_CHANNEL].collect_queue);
467 privptr->channel[i].flag_a = 0;
468 privptr->channel[i].IO_active = 0;
469 privptr->channel[i].flag &= ~CLAW_TIMER;
470 init_timer(&timer);
471 timer.function = (void *)claw_timer;
472 timer.data = (unsigned long)(&privptr->channel[i]);
473 timer.expires = jiffies + 15*HZ;
474 add_timer(&timer);
475 spin_lock_irqsave(get_ccwdev_lock(
476 privptr->channel[i].cdev), saveflags);
477 parm = (unsigned long) &privptr->channel[i];
478 privptr->channel[i].claw_state = CLAW_START_HALT_IO;
479 rc = 0;
480 add_wait_queue(&privptr->channel[i].wait, &wait);
481 rc = ccw_device_halt(
482 (struct ccw_device *)privptr->channel[i].cdev,parm);
483 set_current_state(TASK_INTERRUPTIBLE);
484 spin_unlock_irqrestore(
485 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
486 schedule();
487 set_current_state(TASK_RUNNING);
488 remove_wait_queue(&privptr->channel[i].wait, &wait);
489 if(rc != 0)
490 ccw_check_return_code(privptr->channel[i].cdev, rc);
491 if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
492 del_timer(&timer);
494 if ((((privptr->channel[READ_CHANNEL].last_dstat |
495 privptr->channel[WRITE_CHANNEL].last_dstat) &
496 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
497 (((privptr->channel[READ_CHANNEL].flag |
498 privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
499 dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
500 "%s: remote side is not ready\n", dev->name);
501 CLAW_DBF_TEXT(2, trace, "notrdy");
503 for ( i = 0; i < 2; i++) {
504 spin_lock_irqsave(
505 get_ccwdev_lock(privptr->channel[i].cdev),
506 saveflags);
507 parm = (unsigned long) &privptr->channel[i];
508 privptr->channel[i].claw_state = CLAW_STOP;
509 rc = ccw_device_halt(
510 (struct ccw_device *)&privptr->channel[i].cdev,
511 parm);
512 spin_unlock_irqrestore(
513 get_ccwdev_lock(privptr->channel[i].cdev),
514 saveflags);
515 if (rc != 0) {
516 ccw_check_return_code(
517 privptr->channel[i].cdev, rc);
520 free_pages((unsigned long)privptr->p_buff_ccw,
521 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
522 if (privptr->p_env->read_size < PAGE_SIZE) {
523 free_pages((unsigned long)privptr->p_buff_read,
524 (int)pages_to_order_of_mag(
525 privptr->p_buff_read_num));
527 else {
528 p_buf=privptr->p_read_active_first;
529 while (p_buf!=NULL) {
530 free_pages((unsigned long)p_buf->p_buffer,
531 (int)pages_to_order_of_mag(
532 privptr->p_buff_pages_perread ));
533 p_buf=p_buf->next;
536 if (privptr->p_env->write_size < PAGE_SIZE ) {
537 free_pages((unsigned long)privptr->p_buff_write,
538 (int)pages_to_order_of_mag(
539 privptr->p_buff_write_num));
541 else {
542 p_buf=privptr->p_write_active_first;
543 while (p_buf!=NULL) {
544 free_pages((unsigned long)p_buf->p_buffer,
545 (int)pages_to_order_of_mag(
546 privptr->p_buff_pages_perwrite ));
547 p_buf=p_buf->next;
550 privptr->buffs_alloc = 0;
551 privptr->channel[READ_CHANNEL].flag = 0x00;
552 privptr->channel[WRITE_CHANNEL].flag = 0x00;
553 privptr->p_buff_ccw=NULL;
554 privptr->p_buff_read=NULL;
555 privptr->p_buff_write=NULL;
556 claw_clear_busy(dev);
557 CLAW_DBF_TEXT(2, trace, "open EIO");
558 return -EIO;
561 /* Send SystemValidate command */
563 claw_clear_busy(dev);
564 CLAW_DBF_TEXT(4, trace, "openok");
565 return 0;
566 } /* end of claw_open */
568 /*-------------------------------------------------------------------*
570 * claw_irq_handler *
572 *--------------------------------------------------------------------*/
573 static void
574 claw_irq_handler(struct ccw_device *cdev,
575 unsigned long intparm, struct irb *irb)
577 struct chbk *p_ch = NULL;
578 struct claw_privbk *privptr = NULL;
579 struct net_device *dev = NULL;
580 struct claw_env *p_env;
581 struct chbk *p_ch_r=NULL;
583 CLAW_DBF_TEXT(4, trace, "clawirq");
584 /* Bypass all 'unsolicited interrupts' */
585 privptr = dev_get_drvdata(&cdev->dev);
586 if (!privptr) {
587 dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
588 " IRQ, c-%02x d-%02x\n",
589 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
590 CLAW_DBF_TEXT(2, trace, "badirq");
591 return;
594 /* Try to extract channel from driver data. */
595 if (privptr->channel[READ_CHANNEL].cdev == cdev)
596 p_ch = &privptr->channel[READ_CHANNEL];
597 else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
598 p_ch = &privptr->channel[WRITE_CHANNEL];
599 else {
600 dev_warn(&cdev->dev, "The device is not a CLAW device\n");
601 CLAW_DBF_TEXT(2, trace, "badchan");
602 return;
604 CLAW_DBF_TEXT_(4, trace, "IRQCH=%d", p_ch->flag);
606 dev = (struct net_device *) (p_ch->ndev);
607 p_env=privptr->p_env;
609 /* Copy interruption response block. */
610 memcpy(p_ch->irb, irb, sizeof(struct irb));
612 /* Check for good subchannel return code, otherwise info message */
613 if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
614 dev_info(&cdev->dev,
615 "%s: subchannel check for device: %04x -"
616 " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
617 dev->name, p_ch->devno,
618 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
619 irb->scsw.cmd.cpa);
620 CLAW_DBF_TEXT(2, trace, "chanchk");
621 /* return; */
624 /* Check the reason-code of a unit check */
625 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
626 ccw_check_unit_check(p_ch, irb->ecw[0]);
628 /* State machine to bring the connection up, down and to restart */
629 p_ch->last_dstat = irb->scsw.cmd.dstat;
631 switch (p_ch->claw_state) {
632 case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
633 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
634 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
635 (p_ch->irb->scsw.cmd.stctl ==
636 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))))
637 return;
638 wake_up(&p_ch->wait); /* wake up claw_release */
639 CLAW_DBF_TEXT(4, trace, "stop");
640 return;
641 case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */
642 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
643 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
644 (p_ch->irb->scsw.cmd.stctl ==
645 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
646 CLAW_DBF_TEXT(4, trace, "haltio");
647 return;
649 if (p_ch->flag == CLAW_READ) {
650 p_ch->claw_state = CLAW_START_READ;
651 wake_up(&p_ch->wait); /* wake claw_open (READ)*/
652 } else if (p_ch->flag == CLAW_WRITE) {
653 p_ch->claw_state = CLAW_START_WRITE;
654 /* send SYSTEM_VALIDATE */
655 claw_strt_read(dev, LOCK_NO);
656 claw_send_control(dev,
657 SYSTEM_VALIDATE_REQUEST,
658 0, 0, 0,
659 p_env->host_name,
660 p_env->adapter_name);
661 } else {
662 dev_warn(&cdev->dev, "The CLAW device received"
663 " an unexpected IRQ, "
664 "c-%02x d-%02x\n",
665 irb->scsw.cmd.cstat,
666 irb->scsw.cmd.dstat);
667 return;
669 CLAW_DBF_TEXT(4, trace, "haltio");
670 return;
671 case CLAW_START_READ:
672 CLAW_DBF_TEXT(4, trace, "ReadIRQ");
673 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
674 clear_bit(0, (void *)&p_ch->IO_active);
675 if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
676 (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
677 (p_ch->irb->ecw[0]) == 0) {
678 privptr->stats.rx_errors++;
679 dev_info(&cdev->dev,
680 "%s: Restart is required after remote "
681 "side recovers \n",
682 dev->name);
684 CLAW_DBF_TEXT(4, trace, "notrdy");
685 return;
687 if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) &&
688 (p_ch->irb->scsw.cmd.dstat == 0)) {
689 if (test_and_set_bit(CLAW_BH_ACTIVE,
690 (void *)&p_ch->flag_a) == 0)
691 tasklet_schedule(&p_ch->tasklet);
692 else
693 CLAW_DBF_TEXT(4, trace, "PCINoBH");
694 CLAW_DBF_TEXT(4, trace, "PCI_read");
695 return;
697 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
698 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
699 (p_ch->irb->scsw.cmd.stctl ==
700 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
701 CLAW_DBF_TEXT(4, trace, "SPend_rd");
702 return;
704 clear_bit(0, (void *)&p_ch->IO_active);
705 claw_clearbit_busy(TB_RETRY, dev);
706 if (test_and_set_bit(CLAW_BH_ACTIVE,
707 (void *)&p_ch->flag_a) == 0)
708 tasklet_schedule(&p_ch->tasklet);
709 else
710 CLAW_DBF_TEXT(4, trace, "RdBHAct");
711 CLAW_DBF_TEXT(4, trace, "RdIRQXit");
712 return;
713 case CLAW_START_WRITE:
714 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
715 dev_info(&cdev->dev,
716 "%s: Unit Check Occurred in "
717 "write channel\n", dev->name);
718 clear_bit(0, (void *)&p_ch->IO_active);
719 if (p_ch->irb->ecw[0] & 0x80) {
720 dev_info(&cdev->dev,
721 "%s: Resetting Event "
722 "occurred:\n", dev->name);
723 init_timer(&p_ch->timer);
724 p_ch->timer.function =
725 (void *)claw_write_retry;
726 p_ch->timer.data = (unsigned long)p_ch;
727 p_ch->timer.expires = jiffies + 10*HZ;
728 add_timer(&p_ch->timer);
729 dev_info(&cdev->dev,
730 "%s: write connection "
731 "restarting\n", dev->name);
733 CLAW_DBF_TEXT(4, trace, "rstrtwrt");
734 return;
736 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
737 clear_bit(0, (void *)&p_ch->IO_active);
738 dev_info(&cdev->dev,
739 "%s: Unit Exception "
740 "occurred in write channel\n",
741 dev->name);
743 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
744 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
745 (p_ch->irb->scsw.cmd.stctl ==
746 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
747 CLAW_DBF_TEXT(4, trace, "writeUE");
748 return;
750 clear_bit(0, (void *)&p_ch->IO_active);
751 if (claw_test_and_setbit_busy(TB_TX, dev) == 0) {
752 claw_write_next(p_ch);
753 claw_clearbit_busy(TB_TX, dev);
754 claw_clear_busy(dev);
756 p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
757 if (test_and_set_bit(CLAW_BH_ACTIVE,
758 (void *)&p_ch_r->flag_a) == 0)
759 tasklet_schedule(&p_ch_r->tasklet);
760 CLAW_DBF_TEXT(4, trace, "StWtExit");
761 return;
762 default:
763 dev_warn(&cdev->dev,
764 "The CLAW device for %s received an unexpected IRQ\n",
765 dev->name);
766 CLAW_DBF_TEXT(2, trace, "badIRQ");
767 return;
770 } /* end of claw_irq_handler */
773 /*-------------------------------------------------------------------*
774 * claw_irq_tasklet *
776 *--------------------------------------------------------------------*/
777 static void
778 claw_irq_tasklet ( unsigned long data )
780 struct chbk * p_ch;
781 struct net_device *dev;
783 p_ch = (struct chbk *) data;
784 dev = (struct net_device *)p_ch->ndev;
785 CLAW_DBF_TEXT(4, trace, "IRQtask");
786 unpack_read(dev);
787 clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
788 CLAW_DBF_TEXT(4, trace, "TskletXt");
789 return;
790 } /* end of claw_irq_bh */
792 /*-------------------------------------------------------------------*
793 * claw_release *
795 *--------------------------------------------------------------------*/
796 static int
797 claw_release(struct net_device *dev)
799 int rc;
800 int i;
801 unsigned long saveflags;
802 unsigned long parm;
803 struct claw_privbk *privptr;
804 DECLARE_WAITQUEUE(wait, current);
805 struct ccwbk* p_this_ccw;
806 struct ccwbk* p_buf;
808 if (!dev)
809 return 0;
810 privptr = (struct claw_privbk *)dev->ml_priv;
811 if (!privptr)
812 return 0;
813 CLAW_DBF_TEXT(4, trace, "release");
814 privptr->release_pend=1;
815 claw_setbit_busy(TB_STOP,dev);
816 for ( i = 1; i >=0 ; i--) {
817 spin_lock_irqsave(
818 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
819 /* del_timer(&privptr->channel[READ_CHANNEL].timer); */
820 privptr->channel[i].claw_state = CLAW_STOP;
821 privptr->channel[i].IO_active = 0;
822 parm = (unsigned long) &privptr->channel[i];
823 if (i == WRITE_CHANNEL)
824 claw_purge_skb_queue(
825 &privptr->channel[WRITE_CHANNEL].collect_queue);
826 rc = ccw_device_halt (privptr->channel[i].cdev, parm);
827 if (privptr->system_validate_comp==0x00) /* never opened? */
828 init_waitqueue_head(&privptr->channel[i].wait);
829 add_wait_queue(&privptr->channel[i].wait, &wait);
830 set_current_state(TASK_INTERRUPTIBLE);
831 spin_unlock_irqrestore(
832 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
833 schedule();
834 set_current_state(TASK_RUNNING);
835 remove_wait_queue(&privptr->channel[i].wait, &wait);
836 if (rc != 0) {
837 ccw_check_return_code(privptr->channel[i].cdev, rc);
840 if (privptr->pk_skb != NULL) {
841 dev_kfree_skb_any(privptr->pk_skb);
842 privptr->pk_skb = NULL;
844 if(privptr->buffs_alloc != 1) {
845 CLAW_DBF_TEXT(4, trace, "none2fre");
846 return 0;
848 CLAW_DBF_TEXT(4, trace, "freebufs");
849 if (privptr->p_buff_ccw != NULL) {
850 free_pages((unsigned long)privptr->p_buff_ccw,
851 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
853 CLAW_DBF_TEXT(4, trace, "freeread");
854 if (privptr->p_env->read_size < PAGE_SIZE) {
855 if (privptr->p_buff_read != NULL) {
856 free_pages((unsigned long)privptr->p_buff_read,
857 (int)pages_to_order_of_mag(privptr->p_buff_read_num));
860 else {
861 p_buf=privptr->p_read_active_first;
862 while (p_buf!=NULL) {
863 free_pages((unsigned long)p_buf->p_buffer,
864 (int)pages_to_order_of_mag(
865 privptr->p_buff_pages_perread ));
866 p_buf=p_buf->next;
869 CLAW_DBF_TEXT(4, trace, "freewrit");
870 if (privptr->p_env->write_size < PAGE_SIZE ) {
871 free_pages((unsigned long)privptr->p_buff_write,
872 (int)pages_to_order_of_mag(privptr->p_buff_write_num));
874 else {
875 p_buf=privptr->p_write_active_first;
876 while (p_buf!=NULL) {
877 free_pages((unsigned long)p_buf->p_buffer,
878 (int)pages_to_order_of_mag(
879 privptr->p_buff_pages_perwrite ));
880 p_buf=p_buf->next;
883 CLAW_DBF_TEXT(4, trace, "clearptr");
884 privptr->buffs_alloc = 0;
885 privptr->p_buff_ccw=NULL;
886 privptr->p_buff_read=NULL;
887 privptr->p_buff_write=NULL;
888 privptr->system_validate_comp=0;
889 privptr->release_pend=0;
890 /* Remove any writes that were pending and reset all reads */
891 p_this_ccw=privptr->p_read_active_first;
892 while (p_this_ccw!=NULL) {
893 p_this_ccw->header.length=0xffff;
894 p_this_ccw->header.opcode=0xff;
895 p_this_ccw->header.flag=0x00;
896 p_this_ccw=p_this_ccw->next;
899 while (privptr->p_write_active_first!=NULL) {
900 p_this_ccw=privptr->p_write_active_first;
901 p_this_ccw->header.flag=CLAW_PENDING;
902 privptr->p_write_active_first=p_this_ccw->next;
903 p_this_ccw->next=privptr->p_write_free_chain;
904 privptr->p_write_free_chain=p_this_ccw;
905 ++privptr->write_free_count;
907 privptr->p_write_active_last=NULL;
908 privptr->mtc_logical_link = -1;
909 privptr->mtc_skipping = 1;
910 privptr->mtc_offset=0;
912 if (((privptr->channel[READ_CHANNEL].last_dstat |
913 privptr->channel[WRITE_CHANNEL].last_dstat) &
914 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
915 dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
916 "Deactivating %s completed with incorrect"
917 " subchannel status "
918 "(read %02x, write %02x)\n",
919 dev->name,
920 privptr->channel[READ_CHANNEL].last_dstat,
921 privptr->channel[WRITE_CHANNEL].last_dstat);
922 CLAW_DBF_TEXT(2, trace, "badclose");
924 CLAW_DBF_TEXT(4, trace, "rlsexit");
925 return 0;
926 } /* end of claw_release */
928 /*-------------------------------------------------------------------*
929 * claw_write_retry *
931 *--------------------------------------------------------------------*/
933 static void
934 claw_write_retry ( struct chbk *p_ch )
937 struct net_device *dev=p_ch->ndev;
939 CLAW_DBF_TEXT(4, trace, "w_retry");
940 if (p_ch->claw_state == CLAW_STOP) {
941 return;
943 claw_strt_out_IO( dev );
944 CLAW_DBF_TEXT(4, trace, "rtry_xit");
945 return;
946 } /* end of claw_write_retry */
949 /*-------------------------------------------------------------------*
950 * claw_write_next *
952 *--------------------------------------------------------------------*/
954 static void
955 claw_write_next ( struct chbk * p_ch )
958 struct net_device *dev;
959 struct claw_privbk *privptr=NULL;
960 struct sk_buff *pk_skb;
962 CLAW_DBF_TEXT(4, trace, "claw_wrt");
963 if (p_ch->claw_state == CLAW_STOP)
964 return;
965 dev = (struct net_device *) p_ch->ndev;
966 privptr = (struct claw_privbk *) dev->ml_priv;
967 claw_free_wrt_buf( dev );
968 if ((privptr->write_free_count > 0) &&
969 !skb_queue_empty(&p_ch->collect_queue)) {
970 pk_skb = claw_pack_skb(privptr);
971 while (pk_skb != NULL) {
972 claw_hw_tx(pk_skb, dev, 1);
973 if (privptr->write_free_count > 0) {
974 pk_skb = claw_pack_skb(privptr);
975 } else
976 pk_skb = NULL;
979 if (privptr->p_write_active_first!=NULL) {
980 claw_strt_out_IO(dev);
982 return;
983 } /* end of claw_write_next */
985 /*-------------------------------------------------------------------*
987 * claw_timer *
988 *--------------------------------------------------------------------*/
990 static void
991 claw_timer ( struct chbk * p_ch )
993 CLAW_DBF_TEXT(4, trace, "timer");
994 p_ch->flag |= CLAW_TIMER;
995 wake_up(&p_ch->wait);
996 return;
997 } /* end of claw_timer */
1001 * functions
1005 /*-------------------------------------------------------------------*
1007 * pages_to_order_of_mag *
1009 * takes a number of pages from 1 to 512 and returns the *
1010 * log(num_pages)/log(2) get_free_pages() needs a base 2 order *
1011 * of magnitude get_free_pages() has an upper order of 9 *
1012 *--------------------------------------------------------------------*/
1014 static int
1015 pages_to_order_of_mag(int num_of_pages)
1017 int order_of_mag=1; /* assume 2 pages */
1018 int nump;
1020 CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages);
1021 if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */
1022 /* 512 pages = 2Meg on 4k page systems */
1023 if (num_of_pages >= 512) {return 9; }
1024 /* we have two or more pages order is at least 1 */
1025 for (nump=2 ;nump <= 512;nump*=2) {
1026 if (num_of_pages <= nump)
1027 break;
1028 order_of_mag +=1;
1030 if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */
1031 CLAW_DBF_TEXT_(5, trace, "mag%d", order_of_mag);
1032 return order_of_mag;
1035 /*-------------------------------------------------------------------*
1037 * add_claw_reads *
1039 *--------------------------------------------------------------------*/
1040 static int
1041 add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1042 struct ccwbk* p_last)
1044 struct claw_privbk *privptr;
1045 struct ccw1 temp_ccw;
1046 struct endccw * p_end;
1047 CLAW_DBF_TEXT(4, trace, "addreads");
1048 privptr = dev->ml_priv;
1049 p_end = privptr->p_end_ccw;
1051 /* first CCW and last CCW contains a new set of read channel programs
1052 * to apend the running channel programs
1054 if ( p_first==NULL) {
1055 CLAW_DBF_TEXT(4, trace, "addexit");
1056 return 0;
1059 /* set up ending CCW sequence for this segment */
1060 if (p_end->read1) {
1061 p_end->read1=0x00; /* second ending CCW is now active */
1062 /* reset ending CCWs and setup TIC CCWs */
1063 p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1064 p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1065 p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1);
1066 p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1);
1067 p_end->read2_nop2.cda=0;
1068 p_end->read2_nop2.count=1;
1070 else {
1071 p_end->read1=0x01; /* first ending CCW is now active */
1072 /* reset ending CCWs and setup TIC CCWs */
1073 p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1074 p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1075 p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1);
1076 p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1);
1077 p_end->read1_nop2.cda=0;
1078 p_end->read1_nop2.count=1;
1081 if ( privptr-> p_read_active_first ==NULL ) {
1082 privptr->p_read_active_first = p_first; /* set new first */
1083 privptr->p_read_active_last = p_last; /* set new last */
1085 else {
1087 /* set up TIC ccw */
1088 temp_ccw.cda= (__u32)__pa(&p_first->read);
1089 temp_ccw.count=0;
1090 temp_ccw.flags=0;
1091 temp_ccw.cmd_code = CCW_CLAW_CMD_TIC;
1094 if (p_end->read1) {
1096 /* first set of CCW's is chained to the new read */
1097 /* chain, so the second set is chained to the active chain. */
1098 /* Therefore modify the second set to point to the new */
1099 /* read chain set up TIC CCWs */
1100 /* make sure we update the CCW so channel doesn't fetch it */
1101 /* when it's only half done */
1102 memcpy( &p_end->read2_nop2, &temp_ccw ,
1103 sizeof(struct ccw1));
1104 privptr->p_read_active_last->r_TIC_1.cda=
1105 (__u32)__pa(&p_first->read);
1106 privptr->p_read_active_last->r_TIC_2.cda=
1107 (__u32)__pa(&p_first->read);
1109 else {
1110 /* make sure we update the CCW so channel doesn't */
1111 /* fetch it when it is only half done */
1112 memcpy( &p_end->read1_nop2, &temp_ccw ,
1113 sizeof(struct ccw1));
1114 privptr->p_read_active_last->r_TIC_1.cda=
1115 (__u32)__pa(&p_first->read);
1116 privptr->p_read_active_last->r_TIC_2.cda=
1117 (__u32)__pa(&p_first->read);
1119 /* chain in new set of blocks */
1120 privptr->p_read_active_last->next = p_first;
1121 privptr->p_read_active_last=p_last;
1122 } /* end of if ( privptr-> p_read_active_first ==NULL) */
1123 CLAW_DBF_TEXT(4, trace, "addexit");
1124 return 0;
1125 } /* end of add_claw_reads */
1127 /*-------------------------------------------------------------------*
1128 * ccw_check_return_code *
1130 *-------------------------------------------------------------------*/
1132 static void
1133 ccw_check_return_code(struct ccw_device *cdev, int return_code)
1135 CLAW_DBF_TEXT(4, trace, "ccwret");
1136 if (return_code != 0) {
1137 switch (return_code) {
1138 case -EBUSY: /* BUSY is a transient state no action needed */
1139 break;
1140 case -ENODEV:
1141 dev_err(&cdev->dev, "The remote channel adapter is not"
1142 " available\n");
1143 break;
1144 case -EINVAL:
1145 dev_err(&cdev->dev,
1146 "The status of the remote channel adapter"
1147 " is not valid\n");
1148 break;
1149 default:
1150 dev_err(&cdev->dev, "The common device layer"
1151 " returned error code %d\n",
1152 return_code);
1155 CLAW_DBF_TEXT(4, trace, "ccwret");
1156 } /* end of ccw_check_return_code */
1158 /*-------------------------------------------------------------------*
1159 * ccw_check_unit_check *
1160 *--------------------------------------------------------------------*/
1162 static void
1163 ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1165 struct net_device *ndev = p_ch->ndev;
1166 struct device *dev = &p_ch->cdev->dev;
1168 CLAW_DBF_TEXT(4, trace, "unitchek");
1169 dev_warn(dev, "The communication peer of %s disconnected\n",
1170 ndev->name);
1172 if (sense & 0x40) {
1173 if (sense & 0x01) {
1174 dev_warn(dev, "The remote channel adapter for"
1175 " %s has been reset\n",
1176 ndev->name);
1178 } else if (sense & 0x20) {
1179 if (sense & 0x04) {
1180 dev_warn(dev, "A data streaming timeout occurred"
1181 " for %s\n",
1182 ndev->name);
1183 } else if (sense & 0x10) {
1184 dev_warn(dev, "The remote channel adapter for %s"
1185 " is faulty\n",
1186 ndev->name);
1187 } else {
1188 dev_warn(dev, "A data transfer parity error occurred"
1189 " for %s\n",
1190 ndev->name);
1192 } else if (sense & 0x10) {
1193 dev_warn(dev, "A read data parity error occurred"
1194 " for %s\n",
1195 ndev->name);
1198 } /* end of ccw_check_unit_check */
1200 /*-------------------------------------------------------------------*
1201 * find_link *
1202 *--------------------------------------------------------------------*/
1203 static int
1204 find_link(struct net_device *dev, char *host_name, char *ws_name )
1206 struct claw_privbk *privptr;
1207 struct claw_env *p_env;
1208 int rc=0;
1210 CLAW_DBF_TEXT(2, setup, "findlink");
1211 privptr = dev->ml_priv;
1212 p_env=privptr->p_env;
1213 switch (p_env->packing)
1215 case PACKING_ASK:
1216 if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) ||
1217 (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 ))
1218 rc = EINVAL;
1219 break;
1220 case DO_PACKED:
1221 case PACK_SEND:
1222 if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) ||
1223 (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 ))
1224 rc = EINVAL;
1225 break;
1226 default:
1227 if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) ||
1228 (memcmp(p_env->api_type , ws_name, 8)!=0))
1229 rc = EINVAL;
1230 break;
1233 return rc;
1234 } /* end of find_link */
1236 /*-------------------------------------------------------------------*
1237 * claw_hw_tx *
1240 *-------------------------------------------------------------------*/
1242 static int
1243 claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1245 int rc=0;
1246 struct claw_privbk *privptr;
1247 struct ccwbk *p_this_ccw;
1248 struct ccwbk *p_first_ccw;
1249 struct ccwbk *p_last_ccw;
1250 __u32 numBuffers;
1251 signed long len_of_data;
1252 unsigned long bytesInThisBuffer;
1253 unsigned char *pDataAddress;
1254 struct endccw *pEnd;
1255 struct ccw1 tempCCW;
1256 struct claw_env *p_env;
1257 struct clawph *pk_head;
1258 struct chbk *ch;
1260 CLAW_DBF_TEXT(4, trace, "hw_tx");
1261 privptr = (struct claw_privbk *)(dev->ml_priv);
1262 p_env =privptr->p_env;
1263 claw_free_wrt_buf(dev); /* Clean up free chain if posible */
1264 /* scan the write queue to free any completed write packets */
1265 p_first_ccw=NULL;
1266 p_last_ccw=NULL;
1267 if ((p_env->packing >= PACK_SEND) &&
1268 (skb->cb[1] != 'P')) {
1269 skb_push(skb,sizeof(struct clawph));
1270 pk_head=(struct clawph *)skb->data;
1271 pk_head->len=skb->len-sizeof(struct clawph);
1272 if (pk_head->len%4) {
1273 pk_head->len+= 4-(pk_head->len%4);
1274 skb_pad(skb,4-(pk_head->len%4));
1275 skb_put(skb,4-(pk_head->len%4));
1277 if (p_env->packing == DO_PACKED)
1278 pk_head->link_num = linkid;
1279 else
1280 pk_head->link_num = 0;
1281 pk_head->flag = 0x00;
1282 skb_pad(skb,4);
1283 skb->cb[1] = 'P';
1285 if (linkid == 0) {
1286 if (claw_check_busy(dev)) {
1287 if (privptr->write_free_count!=0) {
1288 claw_clear_busy(dev);
1290 else {
1291 claw_strt_out_IO(dev );
1292 claw_free_wrt_buf( dev );
1293 if (privptr->write_free_count==0) {
1294 ch = &privptr->channel[WRITE_CHANNEL];
1295 atomic_inc(&skb->users);
1296 skb_queue_tail(&ch->collect_queue, skb);
1297 goto Done;
1299 else {
1300 claw_clear_busy(dev);
1304 /* tx lock */
1305 if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
1306 ch = &privptr->channel[WRITE_CHANNEL];
1307 atomic_inc(&skb->users);
1308 skb_queue_tail(&ch->collect_queue, skb);
1309 claw_strt_out_IO(dev );
1310 rc=-EBUSY;
1311 goto Done2;
1314 /* See how many write buffers are required to hold this data */
1315 numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size);
1317 /* If that number of buffers isn't available, give up for now */
1318 if (privptr->write_free_count < numBuffers ||
1319 privptr->p_write_free_chain == NULL ) {
1321 claw_setbit_busy(TB_NOBUFFER,dev);
1322 ch = &privptr->channel[WRITE_CHANNEL];
1323 atomic_inc(&skb->users);
1324 skb_queue_tail(&ch->collect_queue, skb);
1325 CLAW_DBF_TEXT(2, trace, "clawbusy");
1326 goto Done2;
1328 pDataAddress=skb->data;
1329 len_of_data=skb->len;
1331 while (len_of_data > 0) {
1332 p_this_ccw=privptr->p_write_free_chain; /* get a block */
1333 if (p_this_ccw == NULL) { /* lost the race */
1334 ch = &privptr->channel[WRITE_CHANNEL];
1335 atomic_inc(&skb->users);
1336 skb_queue_tail(&ch->collect_queue, skb);
1337 goto Done2;
1339 privptr->p_write_free_chain=p_this_ccw->next;
1340 p_this_ccw->next=NULL;
1341 --privptr->write_free_count; /* -1 */
1342 if (len_of_data >= privptr->p_env->write_size)
1343 bytesInThisBuffer = privptr->p_env->write_size;
1344 else
1345 bytesInThisBuffer = len_of_data;
1346 memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer);
1347 len_of_data-=bytesInThisBuffer;
1348 pDataAddress+=(unsigned long)bytesInThisBuffer;
1349 /* setup write CCW */
1350 p_this_ccw->write.cmd_code = (linkid * 8) +1;
1351 if (len_of_data>0) {
1352 p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG;
1354 p_this_ccw->write.count=bytesInThisBuffer;
1355 /* now add to end of this chain */
1356 if (p_first_ccw==NULL) {
1357 p_first_ccw=p_this_ccw;
1359 if (p_last_ccw!=NULL) {
1360 p_last_ccw->next=p_this_ccw;
1361 /* set up TIC ccws */
1362 p_last_ccw->w_TIC_1.cda=
1363 (__u32)__pa(&p_this_ccw->write);
1365 p_last_ccw=p_this_ccw; /* save new last block */
1368 /* FirstCCW and LastCCW now contain a new set of write channel
1369 * programs to append to the running channel program
1372 if (p_first_ccw!=NULL) {
1373 /* setup ending ccw sequence for this segment */
1374 pEnd=privptr->p_end_ccw;
1375 if (pEnd->write1) {
1376 pEnd->write1=0x00; /* second end ccw is now active */
1377 /* set up Tic CCWs */
1378 p_last_ccw->w_TIC_1.cda=
1379 (__u32)__pa(&pEnd->write2_nop1);
1380 pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1381 pEnd->write2_nop2.flags =
1382 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1383 pEnd->write2_nop2.cda=0;
1384 pEnd->write2_nop2.count=1;
1386 else { /* end of if (pEnd->write1)*/
1387 pEnd->write1=0x01; /* first end ccw is now active */
1388 /* set up Tic CCWs */
1389 p_last_ccw->w_TIC_1.cda=
1390 (__u32)__pa(&pEnd->write1_nop1);
1391 pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1392 pEnd->write1_nop2.flags =
1393 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1394 pEnd->write1_nop2.cda=0;
1395 pEnd->write1_nop2.count=1;
1396 } /* end if if (pEnd->write1) */
1398 if (privptr->p_write_active_first==NULL ) {
1399 privptr->p_write_active_first=p_first_ccw;
1400 privptr->p_write_active_last=p_last_ccw;
1402 else {
1403 /* set up Tic CCWs */
1405 tempCCW.cda=(__u32)__pa(&p_first_ccw->write);
1406 tempCCW.count=0;
1407 tempCCW.flags=0;
1408 tempCCW.cmd_code=CCW_CLAW_CMD_TIC;
1410 if (pEnd->write1) {
1413 * first set of ending CCW's is chained to the new write
1414 * chain, so the second set is chained to the active chain
1415 * Therefore modify the second set to point the new write chain.
1416 * make sure we update the CCW atomically
1417 * so channel does not fetch it when it's only half done
1419 memcpy( &pEnd->write2_nop2, &tempCCW ,
1420 sizeof(struct ccw1));
1421 privptr->p_write_active_last->w_TIC_1.cda=
1422 (__u32)__pa(&p_first_ccw->write);
1424 else {
1426 /*make sure we update the CCW atomically
1427 *so channel does not fetch it when it's only half done
1429 memcpy(&pEnd->write1_nop2, &tempCCW ,
1430 sizeof(struct ccw1));
1431 privptr->p_write_active_last->w_TIC_1.cda=
1432 (__u32)__pa(&p_first_ccw->write);
1434 } /* end if if (pEnd->write1) */
1436 privptr->p_write_active_last->next=p_first_ccw;
1437 privptr->p_write_active_last=p_last_ccw;
1440 } /* endif (p_first_ccw!=NULL) */
1441 dev_kfree_skb_any(skb);
1442 claw_strt_out_IO(dev );
1443 /* if write free count is zero , set NOBUFFER */
1444 if (privptr->write_free_count==0) {
1445 claw_setbit_busy(TB_NOBUFFER,dev);
1447 Done2:
1448 claw_clearbit_busy(TB_TX,dev);
1449 Done:
1450 return(rc);
1451 } /* end of claw_hw_tx */
1453 /*-------------------------------------------------------------------*
1455 * init_ccw_bk *
1457 *--------------------------------------------------------------------*/
1459 static int
1460 init_ccw_bk(struct net_device *dev)
1463 __u32 ccw_blocks_required;
1464 __u32 ccw_blocks_perpage;
1465 __u32 ccw_pages_required;
1466 __u32 claw_reads_perpage=1;
1467 __u32 claw_read_pages;
1468 __u32 claw_writes_perpage=1;
1469 __u32 claw_write_pages;
1470 void *p_buff=NULL;
1471 struct ccwbk*p_free_chain;
1472 struct ccwbk*p_buf;
1473 struct ccwbk*p_last_CCWB;
1474 struct ccwbk*p_first_CCWB;
1475 struct endccw *p_endccw=NULL;
1476 addr_t real_address;
1477 struct claw_privbk *privptr = dev->ml_priv;
1478 struct clawh *pClawH=NULL;
1479 addr_t real_TIC_address;
1480 int i,j;
1481 CLAW_DBF_TEXT(4, trace, "init_ccw");
1483 /* initialize statistics field */
1484 privptr->active_link_ID=0;
1485 /* initialize ccwbk pointers */
1486 privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/
1487 privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/
1488 privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/
1489 privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/
1490 privptr->p_read_active_last=NULL; /* pointer to the last read ccw */
1491 privptr->p_end_ccw=NULL; /* pointer to ending ccw */
1492 privptr->p_claw_signal_blk=NULL; /* pointer to signal block */
1493 privptr->buffs_alloc = 0;
1494 memset(&privptr->end_ccw, 0x00, sizeof(struct endccw));
1495 memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl));
1496 /* initialize free write ccwbk counter */
1497 privptr->write_free_count=0; /* number of free bufs on write chain */
1498 p_last_CCWB = NULL;
1499 p_first_CCWB= NULL;
1501 * We need 1 CCW block for each read buffer, 1 for each
1502 * write buffer, plus 1 for ClawSignalBlock
1504 ccw_blocks_required =
1505 privptr->p_env->read_buffers+privptr->p_env->write_buffers+1;
1507 * compute number of CCW blocks that will fit in a page
1509 ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE;
1510 ccw_pages_required=
1511 DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage);
1514 * read and write sizes are set by 2 constants in claw.h
1515 * 4k and 32k. Unpacked values other than 4k are not going to
1516 * provide good performance. With packing buffers support 32k
1517 * buffers are used.
1519 if (privptr->p_env->read_size < PAGE_SIZE) {
1520 claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size;
1521 claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers,
1522 claw_reads_perpage);
1524 else { /* > or equal */
1525 privptr->p_buff_pages_perread =
1526 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1527 claw_read_pages = privptr->p_env->read_buffers *
1528 privptr->p_buff_pages_perread;
1530 if (privptr->p_env->write_size < PAGE_SIZE) {
1531 claw_writes_perpage =
1532 PAGE_SIZE / privptr->p_env->write_size;
1533 claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers,
1534 claw_writes_perpage);
1537 else { /* > or equal */
1538 privptr->p_buff_pages_perwrite =
1539 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1540 claw_write_pages = privptr->p_env->write_buffers *
1541 privptr->p_buff_pages_perwrite;
1544 * allocate ccw_pages_required
1546 if (privptr->p_buff_ccw==NULL) {
1547 privptr->p_buff_ccw=
1548 (void *)__get_free_pages(__GFP_DMA,
1549 (int)pages_to_order_of_mag(ccw_pages_required ));
1550 if (privptr->p_buff_ccw==NULL) {
1551 return -ENOMEM;
1553 privptr->p_buff_ccw_num=ccw_pages_required;
1555 memset(privptr->p_buff_ccw, 0x00,
1556 privptr->p_buff_ccw_num * PAGE_SIZE);
1559 * obtain ending ccw block address
1562 privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw;
1563 real_address = (__u32)__pa(privptr->p_end_ccw);
1564 /* Initialize ending CCW block */
1565 p_endccw=privptr->p_end_ccw;
1566 p_endccw->real=real_address;
1567 p_endccw->write1=0x00;
1568 p_endccw->read1=0x00;
1570 /* write1_nop1 */
1571 p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1572 p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1573 p_endccw->write1_nop1.count = 1;
1574 p_endccw->write1_nop1.cda = 0;
1576 /* write1_nop2 */
1577 p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1578 p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1579 p_endccw->write1_nop2.count = 1;
1580 p_endccw->write1_nop2.cda = 0;
1582 /* write2_nop1 */
1583 p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1584 p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1585 p_endccw->write2_nop1.count = 1;
1586 p_endccw->write2_nop1.cda = 0;
1588 /* write2_nop2 */
1589 p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1590 p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1591 p_endccw->write2_nop2.count = 1;
1592 p_endccw->write2_nop2.cda = 0;
1594 /* read1_nop1 */
1595 p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1596 p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1597 p_endccw->read1_nop1.count = 1;
1598 p_endccw->read1_nop1.cda = 0;
1600 /* read1_nop2 */
1601 p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1602 p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1603 p_endccw->read1_nop2.count = 1;
1604 p_endccw->read1_nop2.cda = 0;
1606 /* read2_nop1 */
1607 p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1608 p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1609 p_endccw->read2_nop1.count = 1;
1610 p_endccw->read2_nop1.cda = 0;
1612 /* read2_nop2 */
1613 p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1614 p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1615 p_endccw->read2_nop2.count = 1;
1616 p_endccw->read2_nop2.cda = 0;
1619 * Build a chain of CCWs
1622 p_buff=privptr->p_buff_ccw;
1624 p_free_chain=NULL;
1625 for (i=0 ; i < ccw_pages_required; i++ ) {
1626 real_address = (__u32)__pa(p_buff);
1627 p_buf=p_buff;
1628 for (j=0 ; j < ccw_blocks_perpage ; j++) {
1629 p_buf->next = p_free_chain;
1630 p_free_chain = p_buf;
1631 p_buf->real=(__u32)__pa(p_buf);
1632 ++p_buf;
1634 p_buff+=PAGE_SIZE;
1637 * Initialize ClawSignalBlock
1640 if (privptr->p_claw_signal_blk==NULL) {
1641 privptr->p_claw_signal_blk=p_free_chain;
1642 p_free_chain=p_free_chain->next;
1643 pClawH=(struct clawh *)privptr->p_claw_signal_blk;
1644 pClawH->length=0xffff;
1645 pClawH->opcode=0xff;
1646 pClawH->flag=CLAW_BUSY;
1650 * allocate write_pages_required and add to free chain
1652 if (privptr->p_buff_write==NULL) {
1653 if (privptr->p_env->write_size < PAGE_SIZE) {
1654 privptr->p_buff_write=
1655 (void *)__get_free_pages(__GFP_DMA,
1656 (int)pages_to_order_of_mag(claw_write_pages ));
1657 if (privptr->p_buff_write==NULL) {
1658 privptr->p_buff_ccw=NULL;
1659 return -ENOMEM;
1662 * Build CLAW write free chain
1666 memset(privptr->p_buff_write, 0x00,
1667 ccw_pages_required * PAGE_SIZE);
1668 privptr->p_write_free_chain=NULL;
1670 p_buff=privptr->p_buff_write;
1672 for (i=0 ; i< privptr->p_env->write_buffers ; i++) {
1673 p_buf = p_free_chain; /* get a CCW */
1674 p_free_chain = p_buf->next;
1675 p_buf->next =privptr->p_write_free_chain;
1676 privptr->p_write_free_chain = p_buf;
1677 p_buf-> p_buffer = (struct clawbuf *)p_buff;
1678 p_buf-> write.cda = (__u32)__pa(p_buff);
1679 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1680 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1681 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1682 p_buf-> w_read_FF.count = 1;
1683 p_buf-> w_read_FF.cda =
1684 (__u32)__pa(&p_buf-> header.flag);
1685 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1686 p_buf-> w_TIC_1.flags = 0;
1687 p_buf-> w_TIC_1.count = 0;
1689 if (((unsigned long)p_buff +
1690 privptr->p_env->write_size) >=
1691 ((unsigned long)(p_buff+2*
1692 (privptr->p_env->write_size) - 1) & PAGE_MASK)) {
1693 p_buff = p_buff+privptr->p_env->write_size;
1697 else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */
1699 privptr->p_write_free_chain=NULL;
1700 for (i = 0; i< privptr->p_env->write_buffers ; i++) {
1701 p_buff=(void *)__get_free_pages(__GFP_DMA,
1702 (int)pages_to_order_of_mag(
1703 privptr->p_buff_pages_perwrite) );
1704 if (p_buff==NULL) {
1705 free_pages((unsigned long)privptr->p_buff_ccw,
1706 (int)pages_to_order_of_mag(
1707 privptr->p_buff_ccw_num));
1708 privptr->p_buff_ccw=NULL;
1709 p_buf=privptr->p_buff_write;
1710 while (p_buf!=NULL) {
1711 free_pages((unsigned long)
1712 p_buf->p_buffer,
1713 (int)pages_to_order_of_mag(
1714 privptr->p_buff_pages_perwrite));
1715 p_buf=p_buf->next;
1717 return -ENOMEM;
1718 } /* Error on get_pages */
1719 memset(p_buff, 0x00, privptr->p_env->write_size );
1720 p_buf = p_free_chain;
1721 p_free_chain = p_buf->next;
1722 p_buf->next = privptr->p_write_free_chain;
1723 privptr->p_write_free_chain = p_buf;
1724 privptr->p_buff_write = p_buf;
1725 p_buf->p_buffer=(struct clawbuf *)p_buff;
1726 p_buf-> write.cda = (__u32)__pa(p_buff);
1727 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1728 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1729 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1730 p_buf-> w_read_FF.count = 1;
1731 p_buf-> w_read_FF.cda =
1732 (__u32)__pa(&p_buf-> header.flag);
1733 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1734 p_buf-> w_TIC_1.flags = 0;
1735 p_buf-> w_TIC_1.count = 0;
1736 } /* for all write_buffers */
1738 } /* else buffers are PAGE_SIZE or bigger */
1741 privptr->p_buff_write_num=claw_write_pages;
1742 privptr->write_free_count=privptr->p_env->write_buffers;
1746 * allocate read_pages_required and chain to free chain
1748 if (privptr->p_buff_read==NULL) {
1749 if (privptr->p_env->read_size < PAGE_SIZE) {
1750 privptr->p_buff_read=
1751 (void *)__get_free_pages(__GFP_DMA,
1752 (int)pages_to_order_of_mag(claw_read_pages) );
1753 if (privptr->p_buff_read==NULL) {
1754 free_pages((unsigned long)privptr->p_buff_ccw,
1755 (int)pages_to_order_of_mag(
1756 privptr->p_buff_ccw_num));
1757 /* free the write pages size is < page size */
1758 free_pages((unsigned long)privptr->p_buff_write,
1759 (int)pages_to_order_of_mag(
1760 privptr->p_buff_write_num));
1761 privptr->p_buff_ccw=NULL;
1762 privptr->p_buff_write=NULL;
1763 return -ENOMEM;
1765 memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE);
1766 privptr->p_buff_read_num=claw_read_pages;
1768 * Build CLAW read free chain
1771 p_buff=privptr->p_buff_read;
1772 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1773 p_buf = p_free_chain;
1774 p_free_chain = p_buf->next;
1776 if (p_last_CCWB==NULL) {
1777 p_buf->next=NULL;
1778 real_TIC_address=0;
1779 p_last_CCWB=p_buf;
1781 else {
1782 p_buf->next=p_first_CCWB;
1783 real_TIC_address=
1784 (__u32)__pa(&p_first_CCWB -> read );
1787 p_first_CCWB=p_buf;
1789 p_buf->p_buffer=(struct clawbuf *)p_buff;
1790 /* initialize read command */
1791 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1792 p_buf-> read.cda = (__u32)__pa(p_buff);
1793 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1794 p_buf-> read.count = privptr->p_env->read_size;
1796 /* initialize read_h command */
1797 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1798 p_buf-> read_h.cda =
1799 (__u32)__pa(&(p_buf->header));
1800 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1801 p_buf-> read_h.count = sizeof(struct clawh);
1803 /* initialize Signal command */
1804 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1805 p_buf-> signal.cda =
1806 (__u32)__pa(&(pClawH->flag));
1807 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1808 p_buf-> signal.count = 1;
1810 /* initialize r_TIC_1 command */
1811 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1812 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1813 p_buf-> r_TIC_1.flags = 0;
1814 p_buf-> r_TIC_1.count = 0;
1816 /* initialize r_read_FF command */
1817 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1818 p_buf-> r_read_FF.cda =
1819 (__u32)__pa(&(pClawH->flag));
1820 p_buf-> r_read_FF.flags =
1821 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
1822 p_buf-> r_read_FF.count = 1;
1824 /* initialize r_TIC_2 */
1825 memcpy(&p_buf->r_TIC_2,
1826 &p_buf->r_TIC_1, sizeof(struct ccw1));
1828 /* initialize Header */
1829 p_buf->header.length=0xffff;
1830 p_buf->header.opcode=0xff;
1831 p_buf->header.flag=CLAW_PENDING;
1833 if (((unsigned long)p_buff+privptr->p_env->read_size) >=
1834 ((unsigned long)(p_buff+2*(privptr->p_env->read_size)
1836 & PAGE_MASK)) {
1837 p_buff= p_buff+privptr->p_env->read_size;
1839 else {
1840 p_buff=
1841 (void *)((unsigned long)
1842 (p_buff+2*(privptr->p_env->read_size)-1)
1843 & PAGE_MASK) ;
1845 } /* for read_buffers */
1846 } /* read_size < PAGE_SIZE */
1847 else { /* read Size >= PAGE_SIZE */
1848 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1849 p_buff = (void *)__get_free_pages(__GFP_DMA,
1850 (int)pages_to_order_of_mag(
1851 privptr->p_buff_pages_perread));
1852 if (p_buff==NULL) {
1853 free_pages((unsigned long)privptr->p_buff_ccw,
1854 (int)pages_to_order_of_mag(privptr->
1855 p_buff_ccw_num));
1856 /* free the write pages */
1857 p_buf=privptr->p_buff_write;
1858 while (p_buf!=NULL) {
1859 free_pages(
1860 (unsigned long)p_buf->p_buffer,
1861 (int)pages_to_order_of_mag(
1862 privptr->p_buff_pages_perwrite));
1863 p_buf=p_buf->next;
1865 /* free any read pages already alloc */
1866 p_buf=privptr->p_buff_read;
1867 while (p_buf!=NULL) {
1868 free_pages(
1869 (unsigned long)p_buf->p_buffer,
1870 (int)pages_to_order_of_mag(
1871 privptr->p_buff_pages_perread));
1872 p_buf=p_buf->next;
1874 privptr->p_buff_ccw=NULL;
1875 privptr->p_buff_write=NULL;
1876 return -ENOMEM;
1878 memset(p_buff, 0x00, privptr->p_env->read_size);
1879 p_buf = p_free_chain;
1880 privptr->p_buff_read = p_buf;
1881 p_free_chain = p_buf->next;
1883 if (p_last_CCWB==NULL) {
1884 p_buf->next=NULL;
1885 real_TIC_address=0;
1886 p_last_CCWB=p_buf;
1888 else {
1889 p_buf->next=p_first_CCWB;
1890 real_TIC_address=
1891 (addr_t)__pa(
1892 &p_first_CCWB -> read );
1895 p_first_CCWB=p_buf;
1896 /* save buff address */
1897 p_buf->p_buffer=(struct clawbuf *)p_buff;
1898 /* initialize read command */
1899 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1900 p_buf-> read.cda = (__u32)__pa(p_buff);
1901 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1902 p_buf-> read.count = privptr->p_env->read_size;
1904 /* initialize read_h command */
1905 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1906 p_buf-> read_h.cda =
1907 (__u32)__pa(&(p_buf->header));
1908 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1909 p_buf-> read_h.count = sizeof(struct clawh);
1911 /* initialize Signal command */
1912 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1913 p_buf-> signal.cda =
1914 (__u32)__pa(&(pClawH->flag));
1915 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1916 p_buf-> signal.count = 1;
1918 /* initialize r_TIC_1 command */
1919 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1920 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1921 p_buf-> r_TIC_1.flags = 0;
1922 p_buf-> r_TIC_1.count = 0;
1924 /* initialize r_read_FF command */
1925 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1926 p_buf-> r_read_FF.cda =
1927 (__u32)__pa(&(pClawH->flag));
1928 p_buf-> r_read_FF.flags =
1929 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
1930 p_buf-> r_read_FF.count = 1;
1932 /* initialize r_TIC_2 */
1933 memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1,
1934 sizeof(struct ccw1));
1936 /* initialize Header */
1937 p_buf->header.length=0xffff;
1938 p_buf->header.opcode=0xff;
1939 p_buf->header.flag=CLAW_PENDING;
1941 } /* For read_buffers */
1942 } /* read_size >= PAGE_SIZE */
1943 } /* pBuffread = NULL */
1944 add_claw_reads( dev ,p_first_CCWB , p_last_CCWB);
1945 privptr->buffs_alloc = 1;
1947 return 0;
1948 } /* end of init_ccw_bk */
1950 /*-------------------------------------------------------------------*
1952 * probe_error *
1954 *--------------------------------------------------------------------*/
1956 static void
1957 probe_error( struct ccwgroup_device *cgdev)
1959 struct claw_privbk *privptr;
1961 CLAW_DBF_TEXT(4, trace, "proberr");
1962 privptr = dev_get_drvdata(&cgdev->dev);
1963 if (privptr != NULL) {
1964 dev_set_drvdata(&cgdev->dev, NULL);
1965 kfree(privptr->p_env);
1966 kfree(privptr->p_mtc_envelope);
1967 kfree(privptr);
1969 } /* probe_error */
1971 /*-------------------------------------------------------------------*
1972 * claw_process_control *
1975 *--------------------------------------------------------------------*/
1977 static int
1978 claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
1981 struct clawbuf *p_buf;
1982 struct clawctl ctlbk;
1983 struct clawctl *p_ctlbk;
1984 char temp_host_name[8];
1985 char temp_ws_name[8];
1986 struct claw_privbk *privptr;
1987 struct claw_env *p_env;
1988 struct sysval *p_sysval;
1989 struct conncmd *p_connect=NULL;
1990 int rc;
1991 struct chbk *p_ch = NULL;
1992 struct device *tdev;
1993 CLAW_DBF_TEXT(2, setup, "clw_cntl");
1994 udelay(1000); /* Wait a ms for the control packets to
1995 *catch up to each other */
1996 privptr = dev->ml_priv;
1997 p_env=privptr->p_env;
1998 tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
1999 memcpy( &temp_host_name, p_env->host_name, 8);
2000 memcpy( &temp_ws_name, p_env->adapter_name , 8);
2001 dev_info(tdev, "%s: CLAW device %.8s: "
2002 "Received Control Packet\n",
2003 dev->name, temp_ws_name);
2004 if (privptr->release_pend==1) {
2005 return 0;
2007 p_buf=p_ccw->p_buffer;
2008 p_ctlbk=&ctlbk;
2009 if (p_env->packing == DO_PACKED) { /* packing in progress?*/
2010 memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl));
2011 } else {
2012 memcpy(p_ctlbk, p_buf, sizeof(struct clawctl));
2014 switch (p_ctlbk->command)
2016 case SYSTEM_VALIDATE_REQUEST:
2017 if (p_ctlbk->version != CLAW_VERSION_ID) {
2018 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2019 CLAW_RC_WRONG_VERSION);
2020 dev_warn(tdev, "The communication peer of %s"
2021 " uses an incorrect API version %d\n",
2022 dev->name, p_ctlbk->version);
2024 p_sysval = (struct sysval *)&(p_ctlbk->data);
2025 dev_info(tdev, "%s: Recv Sys Validate Request: "
2026 "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s,"
2027 "Host name=%.8s\n",
2028 dev->name, p_ctlbk->version,
2029 p_ctlbk->linkid,
2030 p_ctlbk->correlator,
2031 p_sysval->WS_name,
2032 p_sysval->host_name);
2033 if (memcmp(temp_host_name, p_sysval->host_name, 8)) {
2034 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2035 CLAW_RC_NAME_MISMATCH);
2036 CLAW_DBF_TEXT(2, setup, "HSTBAD");
2037 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name);
2038 CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name);
2039 dev_warn(tdev,
2040 "Host name %s for %s does not match the"
2041 " remote adapter name %s\n",
2042 p_sysval->host_name,
2043 dev->name,
2044 temp_host_name);
2046 if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) {
2047 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2048 CLAW_RC_NAME_MISMATCH);
2049 CLAW_DBF_TEXT(2, setup, "WSNBAD");
2050 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name);
2051 CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name);
2052 dev_warn(tdev, "Adapter name %s for %s does not match"
2053 " the remote host name %s\n",
2054 p_sysval->WS_name,
2055 dev->name,
2056 temp_ws_name);
2058 if ((p_sysval->write_frame_size < p_env->write_size) &&
2059 (p_env->packing == 0)) {
2060 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2061 CLAW_RC_HOST_RCV_TOO_SMALL);
2062 dev_warn(tdev,
2063 "The local write buffer is smaller than the"
2064 " remote read buffer\n");
2065 CLAW_DBF_TEXT(2, setup, "wrtszbad");
2067 if ((p_sysval->read_frame_size < p_env->read_size) &&
2068 (p_env->packing == 0)) {
2069 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2070 CLAW_RC_HOST_RCV_TOO_SMALL);
2071 dev_warn(tdev,
2072 "The local read buffer is smaller than the"
2073 " remote write buffer\n");
2074 CLAW_DBF_TEXT(2, setup, "rdsizbad");
2076 claw_snd_sys_validate_rsp(dev, p_ctlbk, 0);
2077 dev_info(tdev,
2078 "CLAW device %.8s: System validate"
2079 " completed.\n", temp_ws_name);
2080 dev_info(tdev,
2081 "%s: sys Validate Rsize:%d Wsize:%d\n",
2082 dev->name, p_sysval->read_frame_size,
2083 p_sysval->write_frame_size);
2084 privptr->system_validate_comp = 1;
2085 if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0)
2086 p_env->packing = PACKING_ASK;
2087 claw_strt_conn_req(dev);
2088 break;
2089 case SYSTEM_VALIDATE_RESPONSE:
2090 p_sysval = (struct sysval *)&(p_ctlbk->data);
2091 dev_info(tdev,
2092 "Settings for %s validated (version=%d, "
2093 "remote device=%d, rc=%d, adapter name=%.8s, "
2094 "host name=%.8s)\n",
2095 dev->name,
2096 p_ctlbk->version,
2097 p_ctlbk->correlator,
2098 p_ctlbk->rc,
2099 p_sysval->WS_name,
2100 p_sysval->host_name);
2101 switch (p_ctlbk->rc) {
2102 case 0:
2103 dev_info(tdev, "%s: CLAW device "
2104 "%.8s: System validate completed.\n",
2105 dev->name, temp_ws_name);
2106 if (privptr->system_validate_comp == 0)
2107 claw_strt_conn_req(dev);
2108 privptr->system_validate_comp = 1;
2109 break;
2110 case CLAW_RC_NAME_MISMATCH:
2111 dev_warn(tdev, "Validating %s failed because of"
2112 " a host or adapter name mismatch\n",
2113 dev->name);
2114 break;
2115 case CLAW_RC_WRONG_VERSION:
2116 dev_warn(tdev, "Validating %s failed because of a"
2117 " version conflict\n",
2118 dev->name);
2119 break;
2120 case CLAW_RC_HOST_RCV_TOO_SMALL:
2121 dev_warn(tdev, "Validating %s failed because of a"
2122 " frame size conflict\n",
2123 dev->name);
2124 break;
2125 default:
2126 dev_warn(tdev, "The communication peer of %s rejected"
2127 " the connection\n",
2128 dev->name);
2129 break;
2131 break;
2133 case CONNECTION_REQUEST:
2134 p_connect = (struct conncmd *)&(p_ctlbk->data);
2135 dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d,"
2136 "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
2137 dev->name,
2138 p_ctlbk->version,
2139 p_ctlbk->linkid,
2140 p_ctlbk->correlator,
2141 p_connect->host_name,
2142 p_connect->WS_name);
2143 if (privptr->active_link_ID != 0) {
2144 claw_snd_disc(dev, p_ctlbk);
2145 dev_info(tdev, "%s rejected a connection request"
2146 " because it is already active\n",
2147 dev->name);
2149 if (p_ctlbk->linkid != 1) {
2150 claw_snd_disc(dev, p_ctlbk);
2151 dev_info(tdev, "%s rejected a request to open multiple"
2152 " connections\n",
2153 dev->name);
2155 rc = find_link(dev, p_connect->host_name, p_connect->WS_name);
2156 if (rc != 0) {
2157 claw_snd_disc(dev, p_ctlbk);
2158 dev_info(tdev, "%s rejected a connection request"
2159 " because of a type mismatch\n",
2160 dev->name);
2162 claw_send_control(dev,
2163 CONNECTION_CONFIRM, p_ctlbk->linkid,
2164 p_ctlbk->correlator,
2165 0, p_connect->host_name,
2166 p_connect->WS_name);
2167 if (p_env->packing == PACKING_ASK) {
2168 p_env->packing = PACK_SEND;
2169 claw_snd_conn_req(dev, 0);
2171 dev_info(tdev, "%s: CLAW device %.8s: Connection "
2172 "completed link_id=%d.\n",
2173 dev->name, temp_ws_name,
2174 p_ctlbk->linkid);
2175 privptr->active_link_ID = p_ctlbk->linkid;
2176 p_ch = &privptr->channel[WRITE_CHANNEL];
2177 wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
2178 break;
2179 case CONNECTION_RESPONSE:
2180 p_connect = (struct conncmd *)&(p_ctlbk->data);
2181 dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d,"
2182 "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
2183 dev->name,
2184 p_ctlbk->version,
2185 p_ctlbk->linkid,
2186 p_ctlbk->correlator,
2187 p_ctlbk->rc,
2188 p_connect->host_name,
2189 p_connect->WS_name);
2191 if (p_ctlbk->rc != 0) {
2192 dev_warn(tdev, "The communication peer of %s rejected"
2193 " a connection request\n",
2194 dev->name);
2195 return 1;
2197 rc = find_link(dev,
2198 p_connect->host_name, p_connect->WS_name);
2199 if (rc != 0) {
2200 claw_snd_disc(dev, p_ctlbk);
2201 dev_warn(tdev, "The communication peer of %s"
2202 " rejected a connection "
2203 "request because of a type mismatch\n",
2204 dev->name);
2206 /* should be until CONNECTION_CONFIRM */
2207 privptr->active_link_ID = -(p_ctlbk->linkid);
2208 break;
2209 case CONNECTION_CONFIRM:
2210 p_connect = (struct conncmd *)&(p_ctlbk->data);
2211 dev_info(tdev,
2212 "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
2213 "Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
2214 dev->name,
2215 p_ctlbk->version,
2216 p_ctlbk->linkid,
2217 p_ctlbk->correlator,
2218 p_connect->host_name,
2219 p_connect->WS_name);
2220 if (p_ctlbk->linkid == -(privptr->active_link_ID)) {
2221 privptr->active_link_ID = p_ctlbk->linkid;
2222 if (p_env->packing > PACKING_ASK) {
2223 dev_info(tdev,
2224 "%s: Confirmed Now packing\n", dev->name);
2225 p_env->packing = DO_PACKED;
2227 p_ch = &privptr->channel[WRITE_CHANNEL];
2228 wake_up(&p_ch->wait);
2229 } else {
2230 dev_warn(tdev, "Activating %s failed because of"
2231 " an incorrect link ID=%d\n",
2232 dev->name, p_ctlbk->linkid);
2233 claw_snd_disc(dev, p_ctlbk);
2235 break;
2236 case DISCONNECT:
2237 dev_info(tdev, "%s: Disconnect: "
2238 "Vers=%d,link_id=%d,Corr=%d\n",
2239 dev->name, p_ctlbk->version,
2240 p_ctlbk->linkid, p_ctlbk->correlator);
2241 if ((p_ctlbk->linkid == 2) &&
2242 (p_env->packing == PACK_SEND)) {
2243 privptr->active_link_ID = 1;
2244 p_env->packing = DO_PACKED;
2245 } else
2246 privptr->active_link_ID = 0;
2247 break;
2248 case CLAW_ERROR:
2249 dev_warn(tdev, "The communication peer of %s failed\n",
2250 dev->name);
2251 break;
2252 default:
2253 dev_warn(tdev, "The communication peer of %s sent"
2254 " an unknown command code\n",
2255 dev->name);
2256 break;
2259 return 0;
2260 } /* end of claw_process_control */
2263 /*-------------------------------------------------------------------*
2264 * claw_send_control *
2266 *--------------------------------------------------------------------*/
2268 static int
2269 claw_send_control(struct net_device *dev, __u8 type, __u8 link,
2270 __u8 correlator, __u8 rc, char *local_name, char *remote_name)
2272 struct claw_privbk *privptr;
2273 struct clawctl *p_ctl;
2274 struct sysval *p_sysval;
2275 struct conncmd *p_connect;
2276 struct sk_buff *skb;
2278 CLAW_DBF_TEXT(2, setup, "sndcntl");
2279 privptr = dev->ml_priv;
2280 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2282 p_ctl->command=type;
2283 p_ctl->version=CLAW_VERSION_ID;
2284 p_ctl->linkid=link;
2285 p_ctl->correlator=correlator;
2286 p_ctl->rc=rc;
2288 p_sysval=(struct sysval *)&p_ctl->data;
2289 p_connect=(struct conncmd *)&p_ctl->data;
2291 switch (p_ctl->command) {
2292 case SYSTEM_VALIDATE_REQUEST:
2293 case SYSTEM_VALIDATE_RESPONSE:
2294 memcpy(&p_sysval->host_name, local_name, 8);
2295 memcpy(&p_sysval->WS_name, remote_name, 8);
2296 if (privptr->p_env->packing > 0) {
2297 p_sysval->read_frame_size = DEF_PACK_BUFSIZE;
2298 p_sysval->write_frame_size = DEF_PACK_BUFSIZE;
2299 } else {
2300 /* how big is the biggest group of packets */
2301 p_sysval->read_frame_size =
2302 privptr->p_env->read_size;
2303 p_sysval->write_frame_size =
2304 privptr->p_env->write_size;
2306 memset(&p_sysval->reserved, 0x00, 4);
2307 break;
2308 case CONNECTION_REQUEST:
2309 case CONNECTION_RESPONSE:
2310 case CONNECTION_CONFIRM:
2311 case DISCONNECT:
2312 memcpy(&p_sysval->host_name, local_name, 8);
2313 memcpy(&p_sysval->WS_name, remote_name, 8);
2314 if (privptr->p_env->packing > 0) {
2315 /* How big is the biggest packet */
2316 p_connect->reserved1[0]=CLAW_FRAME_SIZE;
2317 p_connect->reserved1[1]=CLAW_FRAME_SIZE;
2318 } else {
2319 memset(&p_connect->reserved1, 0x00, 4);
2320 memset(&p_connect->reserved2, 0x00, 4);
2322 break;
2323 default:
2324 break;
2327 /* write Control Record to the device */
2330 skb = dev_alloc_skb(sizeof(struct clawctl));
2331 if (!skb) {
2332 return -ENOMEM;
2334 memcpy(skb_put(skb, sizeof(struct clawctl)),
2335 p_ctl, sizeof(struct clawctl));
2336 if (privptr->p_env->packing >= PACK_SEND)
2337 claw_hw_tx(skb, dev, 1);
2338 else
2339 claw_hw_tx(skb, dev, 0);
2340 return 0;
2341 } /* end of claw_send_control */
2343 /*-------------------------------------------------------------------*
2344 * claw_snd_conn_req *
2346 *--------------------------------------------------------------------*/
2347 static int
2348 claw_snd_conn_req(struct net_device *dev, __u8 link)
2350 int rc;
2351 struct claw_privbk *privptr = dev->ml_priv;
2352 struct clawctl *p_ctl;
2354 CLAW_DBF_TEXT(2, setup, "snd_conn");
2355 rc = 1;
2356 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2357 p_ctl->linkid = link;
2358 if ( privptr->system_validate_comp==0x00 ) {
2359 return rc;
2361 if (privptr->p_env->packing == PACKING_ASK )
2362 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2363 WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED);
2364 if (privptr->p_env->packing == PACK_SEND) {
2365 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2366 WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME);
2368 if (privptr->p_env->packing == 0)
2369 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2370 HOST_APPL_NAME, privptr->p_env->api_type);
2371 return rc;
2373 } /* end of claw_snd_conn_req */
2376 /*-------------------------------------------------------------------*
2377 * claw_snd_disc *
2379 *--------------------------------------------------------------------*/
2381 static int
2382 claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
2384 int rc;
2385 struct conncmd * p_connect;
2387 CLAW_DBF_TEXT(2, setup, "snd_dsc");
2388 p_connect=(struct conncmd *)&p_ctl->data;
2390 rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid,
2391 p_ctl->correlator, 0,
2392 p_connect->host_name, p_connect->WS_name);
2393 return rc;
2394 } /* end of claw_snd_disc */
2397 /*-------------------------------------------------------------------*
2398 * claw_snd_sys_validate_rsp *
2400 *--------------------------------------------------------------------*/
2402 static int
2403 claw_snd_sys_validate_rsp(struct net_device *dev,
2404 struct clawctl *p_ctl, __u32 return_code)
2406 struct claw_env * p_env;
2407 struct claw_privbk *privptr;
2408 int rc;
2410 CLAW_DBF_TEXT(2, setup, "chkresp");
2411 privptr = dev->ml_priv;
2412 p_env=privptr->p_env;
2413 rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
2414 p_ctl->linkid,
2415 p_ctl->correlator,
2416 return_code,
2417 p_env->host_name,
2418 p_env->adapter_name );
2419 return rc;
2420 } /* end of claw_snd_sys_validate_rsp */
2422 /*-------------------------------------------------------------------*
2423 * claw_strt_conn_req *
2425 *--------------------------------------------------------------------*/
2427 static int
2428 claw_strt_conn_req(struct net_device *dev )
2430 int rc;
2432 CLAW_DBF_TEXT(2, setup, "conn_req");
2433 rc=claw_snd_conn_req(dev, 1);
2434 return rc;
2435 } /* end of claw_strt_conn_req */
2439 /*-------------------------------------------------------------------*
2440 * claw_stats *
2441 *-------------------------------------------------------------------*/
2443 static struct
2444 net_device_stats *claw_stats(struct net_device *dev)
2446 struct claw_privbk *privptr;
2448 CLAW_DBF_TEXT(4, trace, "stats");
2449 privptr = dev->ml_priv;
2450 return &privptr->stats;
2451 } /* end of claw_stats */
2454 /*-------------------------------------------------------------------*
2455 * unpack_read *
2457 *--------------------------------------------------------------------*/
2458 static void
2459 unpack_read(struct net_device *dev )
2461 struct sk_buff *skb;
2462 struct claw_privbk *privptr;
2463 struct claw_env *p_env;
2464 struct ccwbk *p_this_ccw;
2465 struct ccwbk *p_first_ccw;
2466 struct ccwbk *p_last_ccw;
2467 struct clawph *p_packh;
2468 void *p_packd;
2469 struct clawctl *p_ctlrec=NULL;
2470 struct device *p_dev;
2472 __u32 len_of_data;
2473 __u32 pack_off;
2474 __u8 link_num;
2475 __u8 mtc_this_frm=0;
2476 __u32 bytes_to_mov;
2477 int i=0;
2478 int p=0;
2480 CLAW_DBF_TEXT(4, trace, "unpkread");
2481 p_first_ccw=NULL;
2482 p_last_ccw=NULL;
2483 p_packh=NULL;
2484 p_packd=NULL;
2485 privptr = dev->ml_priv;
2487 p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
2488 p_env = privptr->p_env;
2489 p_this_ccw=privptr->p_read_active_first;
2490 while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
2491 pack_off = 0;
2492 p = 0;
2493 p_this_ccw->header.flag=CLAW_PENDING;
2494 privptr->p_read_active_first=p_this_ccw->next;
2495 p_this_ccw->next=NULL;
2496 p_packh = (struct clawph *)p_this_ccw->p_buffer;
2497 if ((p_env->packing == PACK_SEND) &&
2498 (p_packh->len == 32) &&
2499 (p_packh->link_num == 0)) { /* is it a packed ctl rec? */
2500 p_packh++; /* peek past pack header */
2501 p_ctlrec = (struct clawctl *)p_packh;
2502 p_packh--; /* un peek */
2503 if ((p_ctlrec->command == CONNECTION_RESPONSE) ||
2504 (p_ctlrec->command == CONNECTION_CONFIRM))
2505 p_env->packing = DO_PACKED;
2507 if (p_env->packing == DO_PACKED)
2508 link_num=p_packh->link_num;
2509 else
2510 link_num=p_this_ccw->header.opcode / 8;
2511 if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
2512 mtc_this_frm=1;
2513 if (p_this_ccw->header.length!=
2514 privptr->p_env->read_size ) {
2515 dev_warn(p_dev,
2516 "The communication peer of %s"
2517 " sent a faulty"
2518 " frame of length %02x\n",
2519 dev->name, p_this_ccw->header.length);
2523 if (privptr->mtc_skipping) {
2525 * We're in the mode of skipping past a
2526 * multi-frame message
2527 * that we can't process for some reason or other.
2528 * The first frame without the More-To-Come flag is
2529 * the last frame of the skipped message.
2531 /* in case of More-To-Come not set in this frame */
2532 if (mtc_this_frm==0) {
2533 privptr->mtc_skipping=0; /* Ok, the end */
2534 privptr->mtc_logical_link=-1;
2536 goto NextFrame;
2539 if (link_num==0) {
2540 claw_process_control(dev, p_this_ccw);
2541 CLAW_DBF_TEXT(4, trace, "UnpkCntl");
2542 goto NextFrame;
2544 unpack_next:
2545 if (p_env->packing == DO_PACKED) {
2546 if (pack_off > p_env->read_size)
2547 goto NextFrame;
2548 p_packd = p_this_ccw->p_buffer+pack_off;
2549 p_packh = (struct clawph *) p_packd;
2550 if ((p_packh->len == 0) || /* done with this frame? */
2551 (p_packh->flag != 0))
2552 goto NextFrame;
2553 bytes_to_mov = p_packh->len;
2554 pack_off += bytes_to_mov+sizeof(struct clawph);
2555 p++;
2556 } else {
2557 bytes_to_mov=p_this_ccw->header.length;
2559 if (privptr->mtc_logical_link<0) {
2562 * if More-To-Come is set in this frame then we don't know
2563 * length of entire message, and hence have to allocate
2564 * large buffer */
2566 /* We are starting a new envelope */
2567 privptr->mtc_offset=0;
2568 privptr->mtc_logical_link=link_num;
2571 if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) {
2572 /* error */
2573 privptr->stats.rx_frame_errors++;
2574 goto NextFrame;
2576 if (p_env->packing == DO_PACKED) {
2577 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2578 p_packd+sizeof(struct clawph), bytes_to_mov);
2580 } else {
2581 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2582 p_this_ccw->p_buffer, bytes_to_mov);
2584 if (mtc_this_frm==0) {
2585 len_of_data=privptr->mtc_offset+bytes_to_mov;
2586 skb=dev_alloc_skb(len_of_data);
2587 if (skb) {
2588 memcpy(skb_put(skb,len_of_data),
2589 privptr->p_mtc_envelope,
2590 len_of_data);
2591 skb->dev=dev;
2592 skb_reset_mac_header(skb);
2593 skb->protocol=htons(ETH_P_IP);
2594 skb->ip_summed=CHECKSUM_UNNECESSARY;
2595 privptr->stats.rx_packets++;
2596 privptr->stats.rx_bytes+=len_of_data;
2597 netif_rx(skb);
2599 else {
2600 dev_info(p_dev, "Allocating a buffer for"
2601 " incoming data failed\n");
2602 privptr->stats.rx_dropped++;
2604 privptr->mtc_offset=0;
2605 privptr->mtc_logical_link=-1;
2607 else {
2608 privptr->mtc_offset+=bytes_to_mov;
2610 if (p_env->packing == DO_PACKED)
2611 goto unpack_next;
2612 NextFrame:
2614 * Remove ThisCCWblock from active read queue, and add it
2615 * to queue of free blocks to be reused.
2617 i++;
2618 p_this_ccw->header.length=0xffff;
2619 p_this_ccw->header.opcode=0xff;
2621 * add this one to the free queue for later reuse
2623 if (p_first_ccw==NULL) {
2624 p_first_ccw = p_this_ccw;
2626 else {
2627 p_last_ccw->next = p_this_ccw;
2629 p_last_ccw = p_this_ccw;
2631 * chain to next block on active read queue
2633 p_this_ccw = privptr->p_read_active_first;
2634 CLAW_DBF_TEXT_(4, trace, "rxpkt %d", p);
2635 } /* end of while */
2637 /* check validity */
2639 CLAW_DBF_TEXT_(4, trace, "rxfrm %d", i);
2640 add_claw_reads(dev, p_first_ccw, p_last_ccw);
2641 claw_strt_read(dev, LOCK_YES);
2642 return;
2643 } /* end of unpack_read */
2645 /*-------------------------------------------------------------------*
2646 * claw_strt_read *
2648 *--------------------------------------------------------------------*/
2649 static void
2650 claw_strt_read (struct net_device *dev, int lock )
2652 int rc = 0;
2653 __u32 parm;
2654 unsigned long saveflags = 0;
2655 struct claw_privbk *privptr = dev->ml_priv;
2656 struct ccwbk*p_ccwbk;
2657 struct chbk *p_ch;
2658 struct clawh *p_clawh;
2659 p_ch = &privptr->channel[READ_CHANNEL];
2661 CLAW_DBF_TEXT(4, trace, "StRdNter");
2662 p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
2663 p_clawh->flag=CLAW_IDLE; /* 0x00 */
2665 if ((privptr->p_write_active_first!=NULL &&
2666 privptr->p_write_active_first->header.flag!=CLAW_PENDING) ||
2667 (privptr->p_read_active_first!=NULL &&
2668 privptr->p_read_active_first->header.flag!=CLAW_PENDING )) {
2669 p_clawh->flag=CLAW_BUSY; /* 0xff */
2671 if (lock==LOCK_YES) {
2672 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
2674 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2675 CLAW_DBF_TEXT(4, trace, "HotRead");
2676 p_ccwbk=privptr->p_read_active_first;
2677 parm = (unsigned long) p_ch;
2678 rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm,
2679 0xff, 0);
2680 if (rc != 0) {
2681 ccw_check_return_code(p_ch->cdev, rc);
2684 else {
2685 CLAW_DBF_TEXT(2, trace, "ReadAct");
2688 if (lock==LOCK_YES) {
2689 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
2691 CLAW_DBF_TEXT(4, trace, "StRdExit");
2692 return;
2693 } /* end of claw_strt_read */
2695 /*-------------------------------------------------------------------*
2696 * claw_strt_out_IO *
2698 *--------------------------------------------------------------------*/
2700 static void
2701 claw_strt_out_IO( struct net_device *dev )
2703 int rc = 0;
2704 unsigned long parm;
2705 struct claw_privbk *privptr;
2706 struct chbk *p_ch;
2707 struct ccwbk *p_first_ccw;
2709 if (!dev) {
2710 return;
2712 privptr = (struct claw_privbk *)dev->ml_priv;
2713 p_ch = &privptr->channel[WRITE_CHANNEL];
2715 CLAW_DBF_TEXT(4, trace, "strt_io");
2716 p_first_ccw=privptr->p_write_active_first;
2718 if (p_ch->claw_state == CLAW_STOP)
2719 return;
2720 if (p_first_ccw == NULL) {
2721 return;
2723 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2724 parm = (unsigned long) p_ch;
2725 CLAW_DBF_TEXT(2, trace, "StWrtIO");
2726 rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm,
2727 0xff, 0);
2728 if (rc != 0) {
2729 ccw_check_return_code(p_ch->cdev, rc);
2732 dev->trans_start = jiffies;
2733 return;
2734 } /* end of claw_strt_out_IO */
2736 /*-------------------------------------------------------------------*
2737 * Free write buffers *
2739 *--------------------------------------------------------------------*/
2741 static void
2742 claw_free_wrt_buf( struct net_device *dev )
2745 struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv;
2746 struct ccwbk*p_this_ccw;
2747 struct ccwbk*p_next_ccw;
2749 CLAW_DBF_TEXT(4, trace, "freewrtb");
2750 /* scan the write queue to free any completed write packets */
2751 p_this_ccw=privptr->p_write_active_first;
2752 while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
2754 p_next_ccw = p_this_ccw->next;
2755 if (((p_next_ccw!=NULL) &&
2756 (p_next_ccw->header.flag!=CLAW_PENDING)) ||
2757 ((p_this_ccw == privptr->p_write_active_last) &&
2758 (p_this_ccw->header.flag!=CLAW_PENDING))) {
2759 /* The next CCW is OK or this is */
2760 /* the last CCW...free it @A1A */
2761 privptr->p_write_active_first=p_this_ccw->next;
2762 p_this_ccw->header.flag=CLAW_PENDING;
2763 p_this_ccw->next=privptr->p_write_free_chain;
2764 privptr->p_write_free_chain=p_this_ccw;
2765 ++privptr->write_free_count;
2766 privptr->stats.tx_bytes+= p_this_ccw->write.count;
2767 p_this_ccw=privptr->p_write_active_first;
2768 privptr->stats.tx_packets++;
2770 else {
2771 break;
2774 if (privptr->write_free_count!=0) {
2775 claw_clearbit_busy(TB_NOBUFFER,dev);
2777 /* whole chain removed? */
2778 if (privptr->p_write_active_first==NULL) {
2779 privptr->p_write_active_last=NULL;
2781 CLAW_DBF_TEXT_(4, trace, "FWC=%d", privptr->write_free_count);
2782 return;
2785 /*-------------------------------------------------------------------*
2786 * claw free netdevice *
2788 *--------------------------------------------------------------------*/
2789 static void
2790 claw_free_netdevice(struct net_device * dev, int free_dev)
2792 struct claw_privbk *privptr;
2794 CLAW_DBF_TEXT(2, setup, "free_dev");
2795 if (!dev)
2796 return;
2797 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2798 privptr = dev->ml_priv;
2799 if (dev->flags & IFF_RUNNING)
2800 claw_release(dev);
2801 if (privptr) {
2802 privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */
2804 dev->ml_priv = NULL;
2805 #ifdef MODULE
2806 if (free_dev) {
2807 free_netdev(dev);
2809 #endif
2810 CLAW_DBF_TEXT(2, setup, "free_ok");
2814 * Claw init netdevice
2815 * Initialize everything of the net device except the name and the
2816 * channel structs.
2818 static const struct net_device_ops claw_netdev_ops = {
2819 .ndo_open = claw_open,
2820 .ndo_stop = claw_release,
2821 .ndo_get_stats = claw_stats,
2822 .ndo_start_xmit = claw_tx,
2823 .ndo_change_mtu = claw_change_mtu,
2826 static void
2827 claw_init_netdevice(struct net_device * dev)
2829 CLAW_DBF_TEXT(2, setup, "init_dev");
2830 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2831 dev->mtu = CLAW_DEFAULT_MTU_SIZE;
2832 dev->hard_header_len = 0;
2833 dev->addr_len = 0;
2834 dev->type = ARPHRD_SLIP;
2835 dev->tx_queue_len = 1300;
2836 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2837 dev->netdev_ops = &claw_netdev_ops;
2838 CLAW_DBF_TEXT(2, setup, "initok");
2839 return;
2843 * Init a new channel in the privptr->channel[i].
2845 * @param cdev The ccw_device to be added.
2847 * @return 0 on success, !0 on error.
2849 static int
2850 add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
2852 struct chbk *p_ch;
2853 struct ccw_dev_id dev_id;
2855 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev));
2856 privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
2857 p_ch = &privptr->channel[i];
2858 p_ch->cdev = cdev;
2859 snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev));
2860 ccw_device_get_id(cdev, &dev_id);
2861 p_ch->devno = dev_id.devno;
2862 if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
2863 return -ENOMEM;
2865 return 0;
2871 * Setup an interface.
2873 * @param cgdev Device to be setup.
2875 * @returns 0 on success, !0 on failure.
2877 static int
2878 claw_new_device(struct ccwgroup_device *cgdev)
2880 struct claw_privbk *privptr;
2881 struct claw_env *p_env;
2882 struct net_device *dev;
2883 int ret;
2884 struct ccw_dev_id dev_id;
2886 dev_info(&cgdev->dev, "add for %s\n",
2887 dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
2888 CLAW_DBF_TEXT(2, setup, "new_dev");
2889 privptr = dev_get_drvdata(&cgdev->dev);
2890 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
2891 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
2892 if (!privptr)
2893 return -ENODEV;
2894 p_env = privptr->p_env;
2895 ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
2896 p_env->devno[READ_CHANNEL] = dev_id.devno;
2897 ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
2898 p_env->devno[WRITE_CHANNEL] = dev_id.devno;
2899 ret = add_channel(cgdev->cdev[0],0,privptr);
2900 if (ret == 0)
2901 ret = add_channel(cgdev->cdev[1],1,privptr);
2902 if (ret != 0) {
2903 dev_warn(&cgdev->dev, "Creating a CLAW group device"
2904 " failed with error code %d\n", ret);
2905 goto out;
2907 ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
2908 if (ret != 0) {
2909 dev_warn(&cgdev->dev,
2910 "Setting the read subchannel online"
2911 " failed with error code %d\n", ret);
2912 goto out;
2914 ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
2915 if (ret != 0) {
2916 dev_warn(&cgdev->dev,
2917 "Setting the write subchannel online "
2918 "failed with error code %d\n", ret);
2919 goto out;
2921 dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
2922 if (!dev) {
2923 dev_warn(&cgdev->dev,
2924 "Activating the CLAW device failed\n");
2925 goto out;
2927 dev->ml_priv = privptr;
2928 dev_set_drvdata(&cgdev->dev, privptr);
2929 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
2930 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
2931 /* sysfs magic */
2932 SET_NETDEV_DEV(dev, &cgdev->dev);
2933 if (register_netdev(dev) != 0) {
2934 claw_free_netdevice(dev, 1);
2935 CLAW_DBF_TEXT(2, trace, "regfail");
2936 goto out;
2938 dev->flags &=~IFF_RUNNING;
2939 if (privptr->buffs_alloc == 0) {
2940 ret=init_ccw_bk(dev);
2941 if (ret !=0) {
2942 unregister_netdev(dev);
2943 claw_free_netdevice(dev,1);
2944 CLAW_DBF_TEXT(2, trace, "ccwmem");
2945 goto out;
2948 privptr->channel[READ_CHANNEL].ndev = dev;
2949 privptr->channel[WRITE_CHANNEL].ndev = dev;
2950 privptr->p_env->ndev = dev;
2952 dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d "
2953 "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
2954 dev->name, p_env->read_size,
2955 p_env->write_size, p_env->read_buffers,
2956 p_env->write_buffers, p_env->devno[READ_CHANNEL],
2957 p_env->devno[WRITE_CHANNEL]);
2958 dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
2959 ":%.8s api_type: %.8s\n",
2960 dev->name, p_env->host_name,
2961 p_env->adapter_name , p_env->api_type);
2962 return 0;
2963 out:
2964 ccw_device_set_offline(cgdev->cdev[1]);
2965 ccw_device_set_offline(cgdev->cdev[0]);
2966 return -ENODEV;
2969 static void
2970 claw_purge_skb_queue(struct sk_buff_head *q)
2972 struct sk_buff *skb;
2974 CLAW_DBF_TEXT(4, trace, "purgque");
2975 while ((skb = skb_dequeue(q))) {
2976 atomic_dec(&skb->users);
2977 dev_kfree_skb_any(skb);
2982 * Shutdown an interface.
2984 * @param cgdev Device to be shut down.
2986 * @returns 0 on success, !0 on failure.
2988 static int
2989 claw_shutdown_device(struct ccwgroup_device *cgdev)
2991 struct claw_privbk *priv;
2992 struct net_device *ndev;
2993 int ret = 0;
2995 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
2996 priv = dev_get_drvdata(&cgdev->dev);
2997 if (!priv)
2998 return -ENODEV;
2999 ndev = priv->channel[READ_CHANNEL].ndev;
3000 if (ndev) {
3001 /* Close the device */
3002 dev_info(&cgdev->dev, "%s: shutting down\n",
3003 ndev->name);
3004 if (ndev->flags & IFF_RUNNING)
3005 ret = claw_release(ndev);
3006 ndev->flags &=~IFF_RUNNING;
3007 unregister_netdev(ndev);
3008 ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
3009 claw_free_netdevice(ndev, 1);
3010 priv->channel[READ_CHANNEL].ndev = NULL;
3011 priv->channel[WRITE_CHANNEL].ndev = NULL;
3012 priv->p_env->ndev = NULL;
3014 ccw_device_set_offline(cgdev->cdev[1]);
3015 ccw_device_set_offline(cgdev->cdev[0]);
3016 return ret;
3019 static void
3020 claw_remove_device(struct ccwgroup_device *cgdev)
3022 struct claw_privbk *priv;
3024 BUG_ON(!cgdev);
3025 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3026 priv = dev_get_drvdata(&cgdev->dev);
3027 BUG_ON(!priv);
3028 dev_info(&cgdev->dev, " will be removed.\n");
3029 if (cgdev->state == CCWGROUP_ONLINE)
3030 claw_shutdown_device(cgdev);
3031 kfree(priv->p_mtc_envelope);
3032 priv->p_mtc_envelope=NULL;
3033 kfree(priv->p_env);
3034 priv->p_env=NULL;
3035 kfree(priv->channel[0].irb);
3036 priv->channel[0].irb=NULL;
3037 kfree(priv->channel[1].irb);
3038 priv->channel[1].irb=NULL;
3039 kfree(priv);
3040 dev_set_drvdata(&cgdev->dev, NULL);
3041 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
3042 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
3043 put_device(&cgdev->dev);
3045 return;
3050 * sysfs attributes
3052 static ssize_t
3053 claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
3055 struct claw_privbk *priv;
3056 struct claw_env * p_env;
3058 priv = dev_get_drvdata(dev);
3059 if (!priv)
3060 return -ENODEV;
3061 p_env = priv->p_env;
3062 return sprintf(buf, "%s\n",p_env->host_name);
3065 static ssize_t
3066 claw_hname_write(struct device *dev, struct device_attribute *attr,
3067 const char *buf, size_t count)
3069 struct claw_privbk *priv;
3070 struct claw_env * p_env;
3072 priv = dev_get_drvdata(dev);
3073 if (!priv)
3074 return -ENODEV;
3075 p_env = priv->p_env;
3076 if (count > MAX_NAME_LEN+1)
3077 return -EINVAL;
3078 memset(p_env->host_name, 0x20, MAX_NAME_LEN);
3079 strncpy(p_env->host_name,buf, count);
3080 p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */
3081 p_env->host_name[MAX_NAME_LEN] = 0x00;
3082 CLAW_DBF_TEXT(2, setup, "HstnSet");
3083 CLAW_DBF_TEXT_(2, setup, "%s", p_env->host_name);
3085 return count;
3088 static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write);
3090 static ssize_t
3091 claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
3093 struct claw_privbk *priv;
3094 struct claw_env * p_env;
3096 priv = dev_get_drvdata(dev);
3097 if (!priv)
3098 return -ENODEV;
3099 p_env = priv->p_env;
3100 return sprintf(buf, "%s\n", p_env->adapter_name);
3103 static ssize_t
3104 claw_adname_write(struct device *dev, struct device_attribute *attr,
3105 const char *buf, size_t count)
3107 struct claw_privbk *priv;
3108 struct claw_env * p_env;
3110 priv = dev_get_drvdata(dev);
3111 if (!priv)
3112 return -ENODEV;
3113 p_env = priv->p_env;
3114 if (count > MAX_NAME_LEN+1)
3115 return -EINVAL;
3116 memset(p_env->adapter_name, 0x20, MAX_NAME_LEN);
3117 strncpy(p_env->adapter_name,buf, count);
3118 p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */
3119 p_env->adapter_name[MAX_NAME_LEN] = 0x00;
3120 CLAW_DBF_TEXT(2, setup, "AdnSet");
3121 CLAW_DBF_TEXT_(2, setup, "%s", p_env->adapter_name);
3123 return count;
3126 static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write);
3128 static ssize_t
3129 claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
3131 struct claw_privbk *priv;
3132 struct claw_env * p_env;
3134 priv = dev_get_drvdata(dev);
3135 if (!priv)
3136 return -ENODEV;
3137 p_env = priv->p_env;
3138 return sprintf(buf, "%s\n",
3139 p_env->api_type);
3142 static ssize_t
3143 claw_apname_write(struct device *dev, struct device_attribute *attr,
3144 const char *buf, size_t count)
3146 struct claw_privbk *priv;
3147 struct claw_env * p_env;
3149 priv = dev_get_drvdata(dev);
3150 if (!priv)
3151 return -ENODEV;
3152 p_env = priv->p_env;
3153 if (count > MAX_NAME_LEN+1)
3154 return -EINVAL;
3155 memset(p_env->api_type, 0x20, MAX_NAME_LEN);
3156 strncpy(p_env->api_type,buf, count);
3157 p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */
3158 p_env->api_type[MAX_NAME_LEN] = 0x00;
3159 if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
3160 p_env->read_size=DEF_PACK_BUFSIZE;
3161 p_env->write_size=DEF_PACK_BUFSIZE;
3162 p_env->packing=PACKING_ASK;
3163 CLAW_DBF_TEXT(2, setup, "PACKING");
3165 else {
3166 p_env->packing=0;
3167 p_env->read_size=CLAW_FRAME_SIZE;
3168 p_env->write_size=CLAW_FRAME_SIZE;
3169 CLAW_DBF_TEXT(2, setup, "ApiSet");
3171 CLAW_DBF_TEXT_(2, setup, "%s", p_env->api_type);
3172 return count;
3175 static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write);
3177 static ssize_t
3178 claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3180 struct claw_privbk *priv;
3181 struct claw_env * p_env;
3183 priv = dev_get_drvdata(dev);
3184 if (!priv)
3185 return -ENODEV;
3186 p_env = priv->p_env;
3187 return sprintf(buf, "%d\n", p_env->write_buffers);
3190 static ssize_t
3191 claw_wbuff_write(struct device *dev, struct device_attribute *attr,
3192 const char *buf, size_t count)
3194 struct claw_privbk *priv;
3195 struct claw_env * p_env;
3196 int nnn,max;
3198 priv = dev_get_drvdata(dev);
3199 if (!priv)
3200 return -ENODEV;
3201 p_env = priv->p_env;
3202 sscanf(buf, "%i", &nnn);
3203 if (p_env->packing) {
3204 max = 64;
3206 else {
3207 max = 512;
3209 if ((nnn > max ) || (nnn < 2))
3210 return -EINVAL;
3211 p_env->write_buffers = nnn;
3212 CLAW_DBF_TEXT(2, setup, "Wbufset");
3213 CLAW_DBF_TEXT_(2, setup, "WB=%d", p_env->write_buffers);
3214 return count;
3217 static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write);
3219 static ssize_t
3220 claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3222 struct claw_privbk *priv;
3223 struct claw_env * p_env;
3225 priv = dev_get_drvdata(dev);
3226 if (!priv)
3227 return -ENODEV;
3228 p_env = priv->p_env;
3229 return sprintf(buf, "%d\n", p_env->read_buffers);
3232 static ssize_t
3233 claw_rbuff_write(struct device *dev, struct device_attribute *attr,
3234 const char *buf, size_t count)
3236 struct claw_privbk *priv;
3237 struct claw_env *p_env;
3238 int nnn,max;
3240 priv = dev_get_drvdata(dev);
3241 if (!priv)
3242 return -ENODEV;
3243 p_env = priv->p_env;
3244 sscanf(buf, "%i", &nnn);
3245 if (p_env->packing) {
3246 max = 64;
3248 else {
3249 max = 512;
3251 if ((nnn > max ) || (nnn < 2))
3252 return -EINVAL;
3253 p_env->read_buffers = nnn;
3254 CLAW_DBF_TEXT(2, setup, "Rbufset");
3255 CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers);
3256 return count;
3258 static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
3260 static struct attribute *claw_attr[] = {
3261 &dev_attr_read_buffer.attr,
3262 &dev_attr_write_buffer.attr,
3263 &dev_attr_adapter_name.attr,
3264 &dev_attr_api_type.attr,
3265 &dev_attr_host_name.attr,
3266 NULL,
3268 static struct attribute_group claw_attr_group = {
3269 .attrs = claw_attr,
3271 static const struct attribute_group *claw_attr_groups[] = {
3272 &claw_attr_group,
3273 NULL,
3275 static const struct device_type claw_devtype = {
3276 .name = "claw",
3277 .groups = claw_attr_groups,
3280 /*----------------------------------------------------------------*
3281 * claw_probe *
3282 * this function is called for each CLAW device. *
3283 *----------------------------------------------------------------*/
3284 static int claw_probe(struct ccwgroup_device *cgdev)
3286 struct claw_privbk *privptr = NULL;
3288 CLAW_DBF_TEXT(2, setup, "probe");
3289 if (!get_device(&cgdev->dev))
3290 return -ENODEV;
3291 privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
3292 dev_set_drvdata(&cgdev->dev, privptr);
3293 if (privptr == NULL) {
3294 probe_error(cgdev);
3295 put_device(&cgdev->dev);
3296 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
3297 return -ENOMEM;
3299 privptr->p_mtc_envelope = kzalloc(MAX_ENVELOPE_SIZE, GFP_KERNEL);
3300 privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
3301 if ((privptr->p_mtc_envelope == NULL) || (privptr->p_env == NULL)) {
3302 probe_error(cgdev);
3303 put_device(&cgdev->dev);
3304 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
3305 return -ENOMEM;
3307 memcpy(privptr->p_env->adapter_name, WS_NAME_NOT_DEF, 8);
3308 memcpy(privptr->p_env->host_name, WS_NAME_NOT_DEF, 8);
3309 memcpy(privptr->p_env->api_type, WS_NAME_NOT_DEF, 8);
3310 privptr->p_env->packing = 0;
3311 privptr->p_env->write_buffers = 5;
3312 privptr->p_env->read_buffers = 5;
3313 privptr->p_env->read_size = CLAW_FRAME_SIZE;
3314 privptr->p_env->write_size = CLAW_FRAME_SIZE;
3315 privptr->p_env->p_priv = privptr;
3316 cgdev->cdev[0]->handler = claw_irq_handler;
3317 cgdev->cdev[1]->handler = claw_irq_handler;
3318 cgdev->dev.type = &claw_devtype;
3319 CLAW_DBF_TEXT(2, setup, "prbext 0");
3321 return 0;
3322 } /* end of claw_probe */
3324 /*--------------------------------------------------------------------*
3325 * claw_init and cleanup *
3326 *---------------------------------------------------------------------*/
3328 static void __exit
3329 claw_cleanup(void)
3331 driver_remove_file(&claw_group_driver.driver,
3332 &driver_attr_group);
3333 ccwgroup_driver_unregister(&claw_group_driver);
3334 ccw_driver_unregister(&claw_ccw_driver);
3335 root_device_unregister(claw_root_dev);
3336 claw_unregister_debug_facility();
3337 pr_info("Driver unloaded\n");
3342 * Initialize module.
3343 * This is called just after the module is loaded.
3345 * @return 0 on success, !0 on error.
3347 static int __init
3348 claw_init(void)
3350 int ret = 0;
3352 pr_info("Loading %s\n", version);
3353 ret = claw_register_debug_facility();
3354 if (ret) {
3355 pr_err("Registering with the S/390 debug feature"
3356 " failed with error code %d\n", ret);
3357 goto out_err;
3359 CLAW_DBF_TEXT(2, setup, "init_mod");
3360 claw_root_dev = root_device_register("claw");
3361 ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
3362 if (ret)
3363 goto register_err;
3364 ret = ccw_driver_register(&claw_ccw_driver);
3365 if (ret)
3366 goto ccw_err;
3367 claw_group_driver.driver.groups = claw_group_attr_groups;
3368 ret = ccwgroup_driver_register(&claw_group_driver);
3369 if (ret)
3370 goto ccwgroup_err;
3371 return 0;
3373 ccwgroup_err:
3374 ccw_driver_unregister(&claw_ccw_driver);
3375 ccw_err:
3376 root_device_unregister(claw_root_dev);
3377 register_err:
3378 CLAW_DBF_TEXT(2, setup, "init_bad");
3379 claw_unregister_debug_facility();
3380 out_err:
3381 pr_err("Initializing the claw device driver failed\n");
3382 return ret;
3385 module_init(claw_init);
3386 module_exit(claw_cleanup);
3388 MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
3389 MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \
3390 "Copyright 2000,2008 IBM Corporation\n");
3391 MODULE_LICENSE("GPL");