Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / drivers / i2o / i2o_lan.c
blob8b9e4c44813620895eab548b5b051bca282e8e6f
1 /*
2 * drivers/i2o/i2o_lan.c
4 * I2O LAN CLASS OSM May 26th 2000
6 * (C) Copyright 1999, 2000 University of Helsinki,
7 * Department of Computer Science
9 * This code is still under development / test.
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
16 * Authors: Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
17 * Fixes: Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
18 * Taneli Vähäkangas <Taneli.Vahakangas@cs.Helsinki.FI>
19 * Deepak Saxena <deepak@plexity.net>
21 * Tested: in FDDI environment (using SysKonnect's DDM)
22 * in Gigabit Eth environment (using SysKonnect's DDM)
23 * in Fast Ethernet environment (using Intel 82558 DDM)
25 * TODO: tests for other LAN classes (Token Ring, Fibre Channel)
28 #include <linux/config.h>
29 #include <linux/module.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/fddidevice.h>
34 #include <linux/trdevice.h>
35 #include <linux/fcdevice.h>
37 #include <linux/skbuff.h>
38 #include <linux/if_arp.h>
39 #include <linux/malloc.h>
40 #include <linux/init.h>
41 #include <linux/spinlock.h>
42 #include <linux/tqueue.h>
43 #include <asm/io.h>
45 #include <linux/errno.h>
47 #include <linux/i2o.h>
48 #include "i2o_lan.h"
50 //#define DRIVERDEBUG
51 #ifdef DRIVERDEBUG
52 #define dprintk(s, args...) printk(s, ## args)
53 #else
54 #define dprintk(s, args...)
55 #endif
57 /* The following module parameters are used as default values
58 * for per interface values located in the net_device private area.
59 * Private values are changed via /proc filesystem.
61 static u32 max_buckets_out = I2O_LAN_MAX_BUCKETS_OUT;
62 static u32 bucket_thresh = I2O_LAN_BUCKET_THRESH;
63 static u32 rx_copybreak = I2O_LAN_RX_COPYBREAK;
64 static u8 tx_batch_mode = I2O_LAN_TX_BATCH_MODE;
65 static u32 i2o_event_mask = I2O_LAN_EVENT_MASK;
67 #define MAX_LAN_CARDS 16
68 static struct net_device *i2o_landevs[MAX_LAN_CARDS+1];
69 static int unit = -1; /* device unit number */
71 static void i2o_lan_reply(struct i2o_handler *h, struct i2o_controller *iop, struct i2o_message *m);
72 static void i2o_lan_send_post_reply(struct i2o_handler *h, struct i2o_controller *iop, struct i2o_message *m);
73 static int i2o_lan_receive_post(struct net_device *dev);
74 static void i2o_lan_receive_post_reply(struct i2o_handler *h, struct i2o_controller *iop, struct i2o_message *m);
75 static void i2o_lan_release_buckets(struct net_device *dev, u32 *msg);
77 static int i2o_lan_reset(struct net_device *dev);
78 static void i2o_lan_handle_event(struct net_device *dev, u32 *msg);
80 /* Structures to register handlers for the incoming replies. */
82 static struct i2o_handler i2o_lan_send_handler = {
83 i2o_lan_send_post_reply, // For send replies
84 NULL,
85 NULL,
86 NULL,
87 "I2O LAN OSM send",
88 -1,
89 I2O_CLASS_LAN
91 static int lan_send_context;
93 static struct i2o_handler i2o_lan_receive_handler = {
94 i2o_lan_receive_post_reply, // For receive replies
95 NULL,
96 NULL,
97 NULL,
98 "I2O LAN OSM receive",
99 -1,
100 I2O_CLASS_LAN
102 static int lan_receive_context;
104 static struct i2o_handler i2o_lan_handler = {
105 i2o_lan_reply, // For other replies
106 NULL,
107 NULL,
108 NULL,
109 "I2O LAN OSM",
111 I2O_CLASS_LAN
113 static int lan_context;
115 DECLARE_TASK_QUEUE(i2o_post_buckets_task);
116 struct tq_struct run_i2o_post_buckets_task = {
117 routine: (void (*)(void *)) run_task_queue,
118 data: (void *) 0
121 /* Functions to handle message failures and transaction errors:
122 ==============================================================*/
125 * i2o_lan_handle_failure(): Fail bit has been set since IOP's message
126 * layer cannot deliver the request to the target, or the target cannot
127 * process the request.
129 static void i2o_lan_handle_failure(struct net_device *dev, u32 *msg)
131 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
132 struct i2o_device *i2o_dev = priv->i2o_dev;
133 struct i2o_controller *iop = i2o_dev->controller;
135 u32 *preserved_msg = (u32*)(iop->mem_offset + msg[7]);
136 u32 *sgl_elem = &preserved_msg[4];
137 struct sk_buff *skb = NULL;
138 u8 le_flag;
140 i2o_report_status(KERN_INFO, dev->name, msg);
142 /* If PacketSend failed, free sk_buffs reserved by upper layers */
144 if (msg[1] >> 24 == LAN_PACKET_SEND) {
145 do {
146 skb = (struct sk_buff *)(sgl_elem[1]);
147 dev_kfree_skb_irq(skb);
149 atomic_dec(&priv->tx_out);
151 le_flag = *sgl_elem >> 31;
152 sgl_elem +=3;
153 } while (le_flag == 0); /* Last element flag not set */
155 if (netif_queue_stopped(dev))
156 netif_wake_queue(dev);
159 /* If ReceivePost failed, free sk_buffs we have reserved */
161 if (msg[1] >> 24 == LAN_RECEIVE_POST) {
162 do {
163 skb = (struct sk_buff *)(sgl_elem[1]);
164 dev_kfree_skb_irq(skb);
166 atomic_dec(&priv->buckets_out);
168 le_flag = *sgl_elem >> 31;
169 sgl_elem +=3;
170 } while (le_flag == 0); /* Last element flag not set */
173 /* Release the preserved msg frame by resubmitting it as a NOP */
175 preserved_msg[0] = THREE_WORD_MSG_SIZE | SGL_OFFSET_0;
176 preserved_msg[1] = I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0;
177 preserved_msg[2] = 0;
178 i2o_post_message(iop, msg[7]);
181 * i2o_lan_handle_transaction_error(): IOP or DDM has rejected the request
182 * for general cause (format error, bad function code, insufficient resources,
183 * etc.). We get one transaction_error for each failed transaction.
185 static void i2o_lan_handle_transaction_error(struct net_device *dev, u32 *msg)
187 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
188 struct sk_buff *skb;
190 i2o_report_status(KERN_INFO, dev->name, msg);
192 /* If PacketSend was rejected, free sk_buff reserved by upper layers */
194 if (msg[1] >> 24 == LAN_PACKET_SEND) {
195 skb = (struct sk_buff *)(msg[3]); // TransactionContext
196 dev_kfree_skb_irq(skb);
197 atomic_dec(&priv->tx_out);
199 if (netif_queue_stopped(dev))
200 netif_wake_queue(dev);
203 /* If ReceivePost was rejected, free sk_buff we have reserved */
205 if (msg[1] >> 24 == LAN_RECEIVE_POST) {
206 skb = (struct sk_buff *)(msg[3]);
207 dev_kfree_skb_irq(skb);
208 atomic_dec(&priv->buckets_out);
213 * i2o_lan_handle_status(): Common parts of handling a not succeeded request
214 * (status != SUCCESS).
216 static int i2o_lan_handle_status(struct net_device *dev, u32 *msg)
218 /* Fail bit set? */
220 if (msg[0] & MSG_FAIL) {
221 i2o_lan_handle_failure(dev, msg);
222 return -1;
225 /* Message rejected for general cause? */
227 if ((msg[4]>>24) == I2O_REPLY_STATUS_TRANSACTION_ERROR) {
228 i2o_lan_handle_transaction_error(dev, msg);
229 return -1;
232 /* Else have to handle it in the callback function */
234 return 0;
237 /* Callback functions called from the interrupt routine:
238 =======================================================*/
241 * i2o_lan_send_post_reply(): Callback function to handle PostSend replies.
243 static void i2o_lan_send_post_reply(struct i2o_handler *h,
244 struct i2o_controller *iop, struct i2o_message *m)
246 u32 *msg = (u32 *)m;
247 u8 unit = (u8)(msg[2]>>16); // InitiatorContext
248 struct net_device *dev = i2o_landevs[unit];
249 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
250 u8 trl_count = msg[3] & 0x000000FF;
252 if ((msg[4] >> 24) != I2O_REPLY_STATUS_SUCCESS) {
253 if (i2o_lan_handle_status(dev, msg))
254 return;
257 #ifdef DRIVERDEBUG
258 i2o_report_status(KERN_INFO, dev->name, msg);
259 #endif
261 /* DDM has handled transmit request(s), free sk_buffs.
262 * We get similar single transaction reply also in error cases
263 * (except if msg failure or transaction error).
265 while (trl_count) {
266 dev_kfree_skb_irq((struct sk_buff *)msg[4 + trl_count]);
267 dprintk(KERN_INFO "%s: tx skb freed (trl_count=%d).\n",
268 dev->name, trl_count);
269 atomic_dec(&priv->tx_out);
270 trl_count--;
273 /* If priv->tx_out had reached tx_max_out, the queue was stopped */
275 if (netif_queue_stopped(dev))
276 netif_wake_queue(dev);
280 * i2o_lan_receive_post_reply(): Callback function to process incoming packets.
282 static void i2o_lan_receive_post_reply(struct i2o_handler *h,
283 struct i2o_controller *iop, struct i2o_message *m)
285 u32 *msg = (u32 *)m;
286 u8 unit = (u8)(msg[2]>>16); // InitiatorContext
287 struct net_device *dev = i2o_landevs[unit];
289 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
290 struct i2o_bucket_descriptor *bucket = (struct i2o_bucket_descriptor *)&msg[6];
291 struct i2o_packet_info *packet;
292 u8 trl_count = msg[3] & 0x000000FF;
293 struct sk_buff *skb, *old_skb;
294 unsigned long flags = 0;
296 if ((msg[4] >> 24) != I2O_REPLY_STATUS_SUCCESS) {
297 if (i2o_lan_handle_status(dev, msg))
298 return;
300 i2o_lan_release_buckets(dev, msg);
301 return;
304 #ifdef DRIVERDEBUG
305 i2o_report_status(KERN_INFO, dev->name, msg);
306 #endif
308 /* Else we are receiving incoming post. */
310 while (trl_count--) {
311 skb = (struct sk_buff *)bucket->context;
312 packet = (struct i2o_packet_info *)bucket->packet_info;
313 atomic_dec(&priv->buckets_out);
315 /* Sanity checks: Any weird characteristics in bucket? */
317 if (packet->flags & 0x0f || ! packet->flags & 0x40) {
318 if (packet->flags & 0x01)
319 printk(KERN_WARNING "%s: packet with errors, error code=0x%02x.\n",
320 dev->name, packet->status & 0xff);
322 /* The following shouldn't happen, unless parameters in
323 * LAN_OPERATION group are changed during the run time.
325 if (packet->flags & 0x0c)
326 printk(KERN_DEBUG "%s: multi-bucket packets not supported!\n",
327 dev->name);
329 if (! packet->flags & 0x40)
330 printk(KERN_DEBUG "%s: multiple packets in a bucket not supported!\n",
331 dev->name);
333 dev_kfree_skb_irq(skb);
335 bucket++;
336 continue;
339 /* Copy short packet to a new skb */
341 if (packet->len < priv->rx_copybreak) {
342 old_skb = skb;
343 skb = (struct sk_buff *)dev_alloc_skb(packet->len+2);
344 if (skb == NULL) {
345 printk(KERN_ERR "%s: Can't allocate skb.\n", dev->name);
346 return;
348 skb_reserve(skb, 2);
349 memcpy(skb_put(skb, packet->len), old_skb->data, packet->len);
351 spin_lock_irqsave(&priv->fbl_lock, flags);
352 if (priv->i2o_fbl_tail < I2O_LAN_MAX_BUCKETS_OUT)
353 priv->i2o_fbl[++priv->i2o_fbl_tail] = old_skb;
354 else
355 dev_kfree_skb_irq(old_skb);
357 spin_unlock_irqrestore(&priv->fbl_lock, flags);
358 } else
359 skb_put(skb, packet->len);
361 /* Deliver to upper layers */
363 skb->dev = dev;
364 skb->protocol = priv->type_trans(skb, dev);
365 netif_rx(skb);
367 dev->last_rx = jiffies;
369 dprintk(KERN_INFO "%s: Incoming packet (%d bytes) delivered "
370 "to upper level.\n", dev->name, packet->len);
372 bucket++; // to next Packet Descriptor Block
375 #ifdef DRIVERDEBUG
376 if (msg[5] == 0)
377 printk(KERN_INFO "%s: DDM out of buckets (priv->count = %d)!\n",
378 dev->name, atomic_read(&priv->buckets_out));
379 #endif
381 /* If DDM has already consumed bucket_thresh buckets, post new ones */
383 if (atomic_read(&priv->buckets_out) <= priv->max_buckets_out - priv->bucket_thresh) {
384 run_i2o_post_buckets_task.data = (void *)dev;
385 queue_task(&run_i2o_post_buckets_task, &tq_immediate);
386 mark_bh(IMMEDIATE_BH);
389 return;
393 * i2o_lan_reply(): Callback function to handle other incoming messages
394 * except SendPost and ReceivePost.
396 static void i2o_lan_reply(struct i2o_handler *h, struct i2o_controller *iop,
397 struct i2o_message *m)
399 u32 *msg = (u32 *)m;
400 u8 unit = (u8)(msg[2]>>16); // InitiatorContext
401 struct net_device *dev = i2o_landevs[unit];
403 if ((msg[4] >> 24) != I2O_REPLY_STATUS_SUCCESS) {
404 if (i2o_lan_handle_status(dev, msg))
405 return;
407 /* In other error cases just report and continue */
409 i2o_report_status(KERN_INFO, dev->name, msg);
412 #ifdef DRIVERDEBUG
413 i2o_report_status(KERN_INFO, dev->name, msg);
414 #endif
415 switch (msg[1] >> 24) {
416 case LAN_RESET:
417 case LAN_SUSPEND:
418 /* default reply without payload */
419 break;
421 case I2O_CMD_UTIL_EVT_REGISTER:
422 case I2O_CMD_UTIL_EVT_ACK:
423 i2o_lan_handle_event(dev, msg);
424 break;
426 case I2O_CMD_UTIL_PARAMS_SET:
427 /* default reply, results in ReplyPayload (not examined) */
428 switch (msg[3] >> 16) {
429 case 1: dprintk(KERN_INFO "%s: Reply to set MAC filter mask.\n",
430 dev->name);
431 break;
432 case 2: dprintk(KERN_INFO "%s: Reply to set MAC table.\n",
433 dev->name);
434 break;
435 default: printk(KERN_WARNING "%s: Bad group 0x%04X\n",
436 dev->name,msg[3] >> 16);
438 break;
440 default:
441 printk(KERN_ERR "%s: No handler for the reply.\n",
442 dev->name);
443 i2o_report_status(KERN_INFO, dev->name, msg);
447 /* Functions used by the above callback functions:
448 =================================================*/
450 * i2o_lan_release_buckets(): Free unused buckets (sk_buffs).
452 static void i2o_lan_release_buckets(struct net_device *dev, u32 *msg)
454 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
455 u8 trl_elem_size = (u8)(msg[3]>>8 & 0x000000FF);
456 u8 trl_count = (u8)(msg[3] & 0x000000FF);
457 u32 *pskb = &msg[6];
459 while (trl_count--) {
460 dprintk(KERN_DEBUG "%s: Releasing unused rx skb %p (trl_count=%d).\n",
461 dev->name, (struct sk_buff*)(*pskb),trl_count+1);
462 dev_kfree_skb_irq((struct sk_buff *)(*pskb));
463 pskb += 1 + trl_elem_size;
464 atomic_dec(&priv->buckets_out);
469 * i2o_lan_event_reply(): Handle events.
471 static void i2o_lan_handle_event(struct net_device *dev, u32 *msg)
473 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
474 struct i2o_device *i2o_dev = priv->i2o_dev;
475 struct i2o_controller *iop = i2o_dev->controller;
476 u32 max_evt_data_size =iop->status_block->inbound_frame_size-5;
477 struct i2o_reply {
478 u32 header[4];
479 u32 evt_indicator;
480 u32 data[max_evt_data_size];
481 } *evt = (struct i2o_reply *)msg;
482 int evt_data_len = ((msg[0]>>16) - 5) * 4; /* real size*/
484 printk(KERN_INFO "%s: I2O event - ", dev->name);
486 if (msg[1]>>24 == I2O_CMD_UTIL_EVT_ACK) {
487 printk("Event acknowledgement reply.\n");
488 return;
491 /* Else evt->function == I2O_CMD_UTIL_EVT_REGISTER) */
493 switch (evt->evt_indicator) {
494 case I2O_EVT_IND_STATE_CHANGE: {
495 struct state_data {
496 u16 status;
497 u8 state;
498 u8 data;
499 } *evt_data = (struct state_data *)(evt->data[0]);
501 printk("State chance 0x%08x.\n", evt->data[0]);
503 /* If the DDM is in error state, recovery may be
504 * possible if status = Transmit or Receive Control
505 * Unit Inoperable.
507 if (evt_data->state==0x05 && evt_data->status==0x0003)
508 i2o_lan_reset(dev);
509 break;
512 case I2O_EVT_IND_FIELD_MODIFIED: {
513 u16 *work16 = (u16 *)evt->data;
514 printk("Group 0x%04x, field %d changed.\n", work16[0], work16[1]);
515 break;
518 case I2O_EVT_IND_VENDOR_EVT: {
519 int i;
520 printk("Vendor event:\n");
521 for (i = 0; i < evt_data_len / 4; i++)
522 printk(" 0x%08x\n", evt->data[i]);
523 break;
526 case I2O_EVT_IND_DEVICE_RESET:
527 /* Spec 2.0 p. 6-121:
528 * The event of _DEVICE_RESET should also be responded
530 printk("Device reset.\n");
531 if (i2o_event_ack(iop, msg) < 0)
532 printk("%s: Event Acknowledge timeout.\n", dev->name);
533 break;
535 #if 0
536 case I2O_EVT_IND_EVT_MASK_MODIFIED:
537 printk("Event mask modified, 0x%08x.\n", evt->data[0]);
538 break;
540 case I2O_EVT_IND_GENERAL_WARNING:
541 printk("General warning 0x%04x.\n", evt->data[0]);
542 break;
544 case I2O_EVT_IND_CONFIGURATION_FLAG:
545 printk("Configuration requested.\n");
546 break;
548 case I2O_EVT_IND_CAPABILITY_CHANGE:
549 printk("Capability change 0x%04x.\n", evt->data[0]);
550 break;
552 case I2O_EVT_IND_DEVICE_STATE:
553 printk("Device state changed 0x%08x.\n", evt->data[0]);
554 break;
555 #endif
556 case I2O_LAN_EVT_LINK_DOWN:
557 netif_carrier_off(dev);
558 printk("Link to the physical device is lost.\n");
559 break;
561 case I2O_LAN_EVT_LINK_UP:
562 netif_carrier_on(dev);
563 printk("Link to the physical device is (re)established.\n");
564 break;
566 case I2O_LAN_EVT_MEDIA_CHANGE:
567 printk("Media change.\n");
568 break;
569 default:
570 printk("0x%08x. No handler.\n", evt->evt_indicator);
575 * i2o_lan_receive_post(): Post buckets to receive packets.
577 static int i2o_lan_receive_post(struct net_device *dev)
579 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
580 struct i2o_device *i2o_dev = priv->i2o_dev;
581 struct i2o_controller *iop = i2o_dev->controller;
582 struct sk_buff *skb;
583 u32 m, *msg;
584 u32 bucket_len = (dev->mtu + dev->hard_header_len);
585 u32 total = priv->max_buckets_out - atomic_read(&priv->buckets_out);
586 u32 bucket_count;
587 u32 *sgl_elem;
588 unsigned long flags;
590 /* Send (total/bucket_count) separate I2O requests */
592 while (total) {
593 m = I2O_POST_READ32(iop);
594 if (m == 0xFFFFFFFF)
595 return -ETIMEDOUT;
596 msg = (u32 *)(iop->mem_offset + m);
598 bucket_count = (total >= priv->sgl_max) ? priv->sgl_max : total;
599 total -= bucket_count;
600 atomic_add(bucket_count, &priv->buckets_out);
602 dprintk(KERN_INFO "%s: Sending %d buckets (size %d) to LAN DDM.\n",
603 dev->name, bucket_count, bucket_len);
605 /* Fill in the header */
607 __raw_writel(I2O_MESSAGE_SIZE(4 + 3 * bucket_count) | SGL_OFFSET_4, msg);
608 __raw_writel(LAN_RECEIVE_POST<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid, msg+1);
609 __raw_writel(priv->unit << 16 | lan_receive_context, msg+2);
610 __raw_writel(bucket_count, msg+3);
611 sgl_elem = &msg[4];
613 /* Fill in the payload - contains bucket_count SGL elements */
615 while (bucket_count--) {
616 spin_lock_irqsave(&priv->fbl_lock, flags);
617 if (priv->i2o_fbl_tail >= 0)
618 skb = priv->i2o_fbl[priv->i2o_fbl_tail--];
619 else {
620 skb = dev_alloc_skb(bucket_len + 2);
621 if (skb == NULL) {
622 spin_unlock_irqrestore(&priv->fbl_lock, flags);
623 return -ENOMEM;
625 skb_reserve(skb, 2);
627 spin_unlock_irqrestore(&priv->fbl_lock, flags);
629 __raw_writel(0x51000000 | bucket_len, sgl_elem);
630 __raw_writel((u32)skb, sgl_elem+1);
631 __raw_writel(virt_to_bus(skb->data), sgl_elem+2);
632 sgl_elem += 3;
635 /* set LE flag and post */
636 __raw_writel(__raw_readl(sgl_elem-3) | 0x80000000, (sgl_elem-3));
637 i2o_post_message(iop, m);
640 return 0;
643 /* Functions called from the network stack, and functions called by them:
644 ========================================================================*/
647 * i2o_lan_reset(): Reset the LAN adapter into the operational state and
648 * restore it to full operation.
650 static int i2o_lan_reset(struct net_device *dev)
652 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
653 struct i2o_device *i2o_dev = priv->i2o_dev;
654 struct i2o_controller *iop = i2o_dev->controller;
655 u32 msg[5];
657 dprintk(KERN_INFO "%s: LAN RESET MESSAGE.\n", dev->name);
658 msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
659 msg[1] = LAN_RESET<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid;
660 msg[2] = priv->unit << 16 | lan_context; // InitiatorContext
661 msg[3] = 0; // TransactionContext
662 msg[4] = 0; // Keep posted buckets
664 if (i2o_post_this(iop, msg, sizeof(msg)) < 0)
665 return -ETIMEDOUT;
667 return 0;
671 * i2o_lan_suspend(): Put LAN adapter into a safe, non-active state.
672 * IOP replies to any LAN class message with status error_no_data_transfer
673 * / suspended.
675 static int i2o_lan_suspend(struct net_device *dev)
677 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
678 struct i2o_device *i2o_dev = priv->i2o_dev;
679 struct i2o_controller *iop = i2o_dev->controller;
680 u32 msg[5];
682 dprintk(KERN_INFO "%s: LAN SUSPEND MESSAGE.\n", dev->name);
683 msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
684 msg[1] = LAN_SUSPEND<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid;
685 msg[2] = priv->unit << 16 | lan_context; // InitiatorContext
686 msg[3] = 0; // TransactionContext
687 msg[4] = 1 << 16; // return posted buckets
689 if (i2o_post_this(iop, msg, sizeof(msg)) < 0)
690 return -ETIMEDOUT;
692 return 0;
696 * i2o_set_ddm_parameters:
697 * These settings are done to ensure proper initial values for DDM.
698 * They can be changed via proc file system or vai configuration utility.
700 static void i2o_set_ddm_parameters(struct net_device *dev)
702 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
703 struct i2o_device *i2o_dev = priv->i2o_dev;
704 struct i2o_controller *iop = i2o_dev->controller;
705 u32 val;
708 * When PacketOrphanlimit is set to the maximum packet length,
709 * the packets will never be split into two separate buckets
711 val = dev->mtu + dev->hard_header_len;
712 if (i2o_set_scalar(iop, i2o_dev->lct_data.tid, 0x0004, 2, &val, sizeof(val)) < 0)
713 printk(KERN_WARNING "%s: Unable to set PacketOrphanLimit.\n",
714 dev->name);
715 else
716 dprintk(KERN_INFO "%s: PacketOrphanLimit set to %d.\n",
717 dev->name, val);
719 /* When RxMaxPacketsBucket = 1, DDM puts only one packet into bucket */
721 val = 1;
722 if (i2o_set_scalar(iop, i2o_dev->lct_data.tid, 0x0008, 4, &val, sizeof(val)) <0)
723 printk(KERN_WARNING "%s: Unable to set RxMaxPacketsBucket.\n",
724 dev->name);
725 else
726 dprintk(KERN_INFO "%s: RxMaxPacketsBucket set to %d.\n",
727 dev->name, val);
728 return;
731 /* Functions called from the network stack:
732 ==========================================*/
735 * i2o_lan_open(): Open the device to send/receive packets via
736 * the network device.
738 static int i2o_lan_open(struct net_device *dev)
740 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
741 struct i2o_device *i2o_dev = priv->i2o_dev;
742 struct i2o_controller *iop = i2o_dev->controller;
743 u32 mc_addr_group[64];
745 MOD_INC_USE_COUNT;
747 if (i2o_claim_device(i2o_dev, &i2o_lan_handler)) {
748 printk(KERN_WARNING "%s: Unable to claim the I2O LAN device.\n", dev->name);
749 MOD_DEC_USE_COUNT;
750 return -EAGAIN;
752 dprintk(KERN_INFO "%s: I2O LAN device (tid=%d) claimed by LAN OSM.\n",
753 dev->name, i2o_dev->lct_data.tid);
755 if (i2o_event_register(iop, i2o_dev->lct_data.tid,
756 priv->unit << 16 | lan_context, 0, priv->i2o_event_mask) < 0)
757 printk(KERN_WARNING "%s: Unable to set the event mask.\n", dev->name);
759 i2o_lan_reset(dev);
761 /* Get the max number of multicast addresses */
763 if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0001, -1,
764 &mc_addr_group, sizeof(mc_addr_group)) < 0 ) {
765 printk(KERN_WARNING "%s: Unable to query LAN_MAC_ADDRESS group.\n", dev->name);
766 MOD_DEC_USE_COUNT;
767 return -EAGAIN;
769 priv->max_size_mc_table = mc_addr_group[8];
771 /* Malloc space for free bucket list to resuse reveive post buckets */
773 priv->i2o_fbl = kmalloc(priv->max_buckets_out * sizeof(struct sk_buff *),
774 GFP_KERNEL);
775 if (priv->i2o_fbl == NULL) {
776 MOD_DEC_USE_COUNT;
777 return -ENOMEM;
779 priv->i2o_fbl_tail = -1;
780 priv->send_active = 0;
782 i2o_set_ddm_parameters(dev);
783 i2o_lan_receive_post(dev);
785 netif_start_queue(dev);
787 return 0;
791 * i2o_lan_close(): End the transfering.
793 static int i2o_lan_close(struct net_device *dev)
795 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
796 struct i2o_device *i2o_dev = priv->i2o_dev;
797 struct i2o_controller *iop = i2o_dev->controller;
798 int ret = 0;
800 netif_stop_queue(dev);
801 i2o_lan_suspend(dev);
803 if (i2o_event_register(iop, i2o_dev->lct_data.tid,
804 priv->unit << 16 | lan_context, 0, 0) < 0)
805 printk(KERN_WARNING "%s: Unable to clear the event mask.\n",
806 dev->name);
808 while (priv->i2o_fbl_tail >= 0)
809 dev_kfree_skb(priv->i2o_fbl[priv->i2o_fbl_tail--]);
811 kfree(priv->i2o_fbl);
813 if (i2o_release_device(i2o_dev, &i2o_lan_handler)) {
814 printk(KERN_WARNING "%s: Unable to unclaim I2O LAN device "
815 "(tid=%d).\n", dev->name, i2o_dev->lct_data.tid);
816 ret = -EBUSY;
819 MOD_DEC_USE_COUNT;
821 return ret;
825 * i2o_lan_tx_timeout(): Tx timeout handler.
827 static void i2o_lan_tx_timeout(struct net_device *dev)
829 if (!netif_queue_stopped(dev))
830 netif_start_queue(dev);
834 * i2o_lan_batch_send(): Send packets in batch.
835 * Both i2o_lan_sdu_send and i2o_lan_packet_send use this.
837 static void i2o_lan_batch_send(struct net_device *dev)
839 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
840 struct i2o_controller *iop = priv->i2o_dev->controller;
842 spin_lock_irq(&priv->tx_lock);
843 if (priv->tx_count != 0) {
844 dev->trans_start = jiffies;
845 i2o_post_message(iop, priv->m);
846 dprintk(KERN_DEBUG "%s: %d packets sent.\n", dev->name, priv->tx_count);
847 priv->tx_count = 0;
849 priv->send_active = 0;
850 spin_unlock_irq(&priv->tx_lock);
851 MOD_DEC_USE_COUNT;
854 #ifdef CONFIG_NET_FC
856 * i2o_lan_sdu_send(): Send a packet, MAC header added by the DDM.
857 * Must be supported by Fibre Channel, optional for Ethernet/802.3,
858 * Token Ring, FDDI
860 static int i2o_lan_sdu_send(struct sk_buff *skb, struct net_device *dev)
862 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
863 struct i2o_device *i2o_dev = priv->i2o_dev;
864 struct i2o_controller *iop = i2o_dev->controller;
865 int tickssofar = jiffies - dev->trans_start;
866 u32 m, *msg;
867 u32 *sgl_elem;
869 spin_lock_irq(&priv->tx_lock);
871 priv->tx_count++;
872 atomic_inc(&priv->tx_out);
875 * If tx_batch_mode = 0x00 forced to immediate mode
876 * If tx_batch_mode = 0x01 forced to batch mode
877 * If tx_batch_mode = 0x10 switch automatically, current mode immediate
878 * If tx_batch_mode = 0x11 switch automatically, current mode batch
879 * If gap between two packets is > 0 ticks, switch to immediate
881 if (priv->tx_batch_mode >> 1) // switch automatically
882 priv->tx_batch_mode = tickssofar ? 0x02 : 0x03;
884 if (priv->tx_count == 1) {
885 m = I2O_POST_READ32(iop);
886 if (m == 0xFFFFFFFF) {
887 spin_unlock_irq(&priv->tx_lock);
888 return 1;
890 msg = (u32 *)(iop->mem_offset + m);
891 priv->m = m;
893 __raw_writel(NINE_WORD_MSG_SIZE | 1<<12 | SGL_OFFSET_4, msg);
894 __raw_writel(LAN_PACKET_SEND<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid, msg+1);
895 __raw_writel(priv->unit << 16 | lan_send_context, msg+2); // InitiatorContext
896 __raw_writel(1 << 30 | 1 << 3, msg+3); // TransmitControlWord
898 __raw_writel(0xD7000000 | skb->len, msg+4); // MAC hdr included
899 __raw_writel((u32)skb, msg+5); // TransactionContext
900 __raw_writel(virt_to_bus(skb->data), msg+6);
901 __raw_writel((u32)skb->mac.raw, msg+7);
902 __raw_writel((u32)skb->mac.raw+4, msg+8);
904 if ((priv->tx_batch_mode & 0x01) && !priv->send_active) {
905 priv->send_active = 1;
906 MOD_INC_USE_COUNT;
907 if (schedule_task(&priv->i2o_batch_send_task) == 0)
908 MOD_DEC_USE_COUNT;
910 } else { /* Add new SGL element to the previous message frame */
912 msg = (u32 *)(iop->mem_offset + priv->m);
913 sgl_elem = &msg[priv->tx_count * 5 + 1];
915 __raw_writel(I2O_MESSAGE_SIZE((__raw_readl(msg)>>16) + 5) | 1<<12 | SGL_OFFSET_4, msg);
916 __raw_writel(__raw_readl(sgl_elem-5) & 0x7FFFFFFF, sgl_elem-5); /* clear LE flag */
917 __raw_writel(0xD5000000 | skb->len, sgl_elem);
918 __raw_writel((u32)skb, sgl_elem+1);
919 __raw_writel(virt_to_bus(skb->data), sgl_elem+2);
920 __raw_writel((u32)(skb->mac.raw), sgl_elem+3);
921 __raw_writel((u32)(skb->mac.raw)+1, sgl_elem+4);
924 /* If tx not in batch mode or frame is full, send immediatelly */
926 if (!(priv->tx_batch_mode & 0x01) || priv->tx_count == priv->sgl_max) {
927 dev->trans_start = jiffies;
928 i2o_post_message(iop, priv->m);
929 dprintk(KERN_DEBUG "%s: %d packets sent.\n", dev->name, priv->tx_count);
930 priv->tx_count = 0;
933 /* If DDMs TxMaxPktOut reached, stop queueing layer to send more */
935 if (atomic_read(&priv->tx_out) >= priv->tx_max_out)
936 netif_stop_queue(dev);
938 spin_unlock_irq(&priv->tx_lock);
939 return 0;
941 #endif CONFIG_NET_FC
944 * i2o_lan_packet_send(): Send a packet as is, including the MAC header.
946 * Must be supported by Ethernet/802.3, Token Ring, FDDI, optional for
947 * Fibre Channel
949 static int i2o_lan_packet_send(struct sk_buff *skb, struct net_device *dev)
951 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
952 struct i2o_device *i2o_dev = priv->i2o_dev;
953 struct i2o_controller *iop = i2o_dev->controller;
954 int tickssofar = jiffies - dev->trans_start;
955 u32 m, *msg;
956 u32 *sgl_elem;
958 spin_lock_irq(&priv->tx_lock);
960 priv->tx_count++;
961 atomic_inc(&priv->tx_out);
964 * If tx_batch_mode = 0x00 forced to immediate mode
965 * If tx_batch_mode = 0x01 forced to batch mode
966 * If tx_batch_mode = 0x10 switch automatically, current mode immediate
967 * If tx_batch_mode = 0x11 switch automatically, current mode batch
968 * If gap between two packets is > 0 ticks, switch to immediate
970 if (priv->tx_batch_mode >> 1) // switch automatically
971 priv->tx_batch_mode = tickssofar ? 0x02 : 0x03;
973 if (priv->tx_count == 1) {
974 m = I2O_POST_READ32(iop);
975 if (m == 0xFFFFFFFF) {
976 spin_unlock_irq(&priv->tx_lock);
977 return 1;
979 msg = (u32 *)(iop->mem_offset + m);
980 priv->m = m;
982 __raw_writel(SEVEN_WORD_MSG_SIZE | 1<<12 | SGL_OFFSET_4, msg);
983 __raw_writel(LAN_PACKET_SEND<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid, msg+1);
984 __raw_writel(priv->unit << 16 | lan_send_context, msg+2); // InitiatorContext
985 __raw_writel(1 << 30 | 1 << 3, msg+3); // TransmitControlWord
986 // bit 30: reply as soon as transmission attempt is complete
987 // bit 3: Supress CRC generation
988 __raw_writel(0xD5000000 | skb->len, msg+4); // MAC hdr included
989 __raw_writel((u32)skb, msg+5); // TransactionContext
990 __raw_writel(virt_to_bus(skb->data), msg+6);
992 if ((priv->tx_batch_mode & 0x01) && !priv->send_active) {
993 priv->send_active = 1;
994 MOD_INC_USE_COUNT;
995 if (schedule_task(&priv->i2o_batch_send_task) == 0)
996 MOD_DEC_USE_COUNT;
998 } else { /* Add new SGL element to the previous message frame */
1000 msg = (u32 *)(iop->mem_offset + priv->m);
1001 sgl_elem = &msg[priv->tx_count * 3 + 1];
1003 __raw_writel(I2O_MESSAGE_SIZE((__raw_readl(msg)>>16) + 3) | 1<<12 | SGL_OFFSET_4, msg);
1004 __raw_writel(__raw_readl(sgl_elem-3) & 0x7FFFFFFF, sgl_elem-3); /* clear LE flag */
1005 __raw_writel(0xD5000000 | skb->len, sgl_elem);
1006 __raw_writel((u32)skb, sgl_elem+1);
1007 __raw_writel(virt_to_bus(skb->data), sgl_elem+2);
1010 /* If tx is in immediate mode or frame is full, send now */
1012 if (!(priv->tx_batch_mode & 0x01) || priv->tx_count == priv->sgl_max) {
1013 dev->trans_start = jiffies;
1014 i2o_post_message(iop, priv->m);
1015 dprintk(KERN_DEBUG "%s: %d packets sent.\n", dev->name, priv->tx_count);
1016 priv->tx_count = 0;
1019 /* If DDMs TxMaxPktOut reached, stop queueing layer to send more */
1021 if (atomic_read(&priv->tx_out) >= priv->tx_max_out)
1022 netif_stop_queue(dev);
1024 spin_unlock_irq(&priv->tx_lock);
1025 return 0;
1029 * i2o_lan_get_stats(): Fill in the statistics.
1031 static struct net_device_stats *i2o_lan_get_stats(struct net_device *dev)
1033 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
1034 struct i2o_device *i2o_dev = priv->i2o_dev;
1035 struct i2o_controller *iop = i2o_dev->controller;
1036 u64 val64[16];
1037 u64 supported_group[4] = { 0, 0, 0, 0 };
1039 if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0100, -1, val64,
1040 sizeof(val64)) < 0)
1041 printk(KERN_INFO "%s: Unable to query LAN_HISTORICAL_STATS.\n", dev->name);
1042 else {
1043 dprintk(KERN_DEBUG "%s: LAN_HISTORICAL_STATS queried.\n", dev->name);
1044 priv->stats.tx_packets = val64[0];
1045 priv->stats.tx_bytes = val64[1];
1046 priv->stats.rx_packets = val64[2];
1047 priv->stats.rx_bytes = val64[3];
1048 priv->stats.tx_errors = val64[4];
1049 priv->stats.rx_errors = val64[5];
1050 priv->stats.rx_dropped = val64[6];
1053 if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0180, -1,
1054 &supported_group, sizeof(supported_group)) < 0)
1055 printk(KERN_INFO "%s: Unable to query LAN_SUPPORTED_OPTIONAL_HISTORICAL_STATS.\n", dev->name);
1057 if (supported_group[2]) {
1058 if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0183, -1,
1059 val64, sizeof(val64)) < 0)
1060 printk(KERN_INFO "%s: Unable to query LAN_OPTIONAL_RX_HISTORICAL_STATS.\n", dev->name);
1061 else {
1062 dprintk(KERN_DEBUG "%s: LAN_OPTIONAL_RX_HISTORICAL_STATS queried.\n", dev->name);
1063 priv->stats.multicast = val64[4];
1064 priv->stats.rx_length_errors = val64[10];
1065 priv->stats.rx_crc_errors = val64[0];
1069 if (i2o_dev->lct_data.sub_class == I2O_LAN_ETHERNET) {
1070 u64 supported_stats = 0;
1071 if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0200, -1,
1072 val64, sizeof(val64)) < 0)
1073 printk(KERN_INFO "%s: Unable to query LAN_802_3_HISTORICAL_STATS.\n", dev->name);
1074 else {
1075 dprintk(KERN_DEBUG "%s: LAN_802_3_HISTORICAL_STATS queried.\n", dev->name);
1076 priv->stats.transmit_collision = val64[1] + val64[2];
1077 priv->stats.rx_frame_errors = val64[0];
1078 priv->stats.tx_carrier_errors = val64[6];
1081 if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0280, -1,
1082 &supported_stats, sizeof(supported_stats)) < 0)
1083 printk(KERN_INFO "%s: Unable to query LAN_SUPPORTED_802_3_HISTORICAL_STATS.\n", dev->name);
1085 if (supported_stats != 0) {
1086 if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0281, -1,
1087 val64, sizeof(val64)) < 0)
1088 printk(KERN_INFO "%s: Unable to query LAN_OPTIONAL_802_3_HISTORICAL_STATS.\n", dev->name);
1089 else {
1090 dprintk(KERN_DEBUG "%s: LAN_OPTIONAL_802_3_HISTORICAL_STATS queried.\n", dev->name);
1091 if (supported_stats & 0x1)
1092 priv->stats.rx_over_errors = val64[0];
1093 if (supported_stats & 0x4)
1094 priv->stats.tx_heartbeat_errors = val64[2];
1099 #ifdef CONFIG_TR
1100 if (i2o_dev->lct_data.sub_class == I2O_LAN_TR) {
1101 if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0300, -1,
1102 val64, sizeof(val64)) < 0)
1103 printk(KERN_INFO "%s: Unable to query LAN_802_5_HISTORICAL_STATS.\n", dev->name);
1104 else {
1105 struct tr_statistics *stats =
1106 (struct tr_statistics *)&priv->stats;
1107 dprintk(KERN_DEBUG "%s: LAN_802_5_HISTORICAL_STATS queried.\n", dev->name);
1109 stats->line_errors = val64[0];
1110 stats->internal_errors = val64[7];
1111 stats->burst_errors = val64[4];
1112 stats->A_C_errors = val64[2];
1113 stats->abort_delimiters = val64[3];
1114 stats->lost_frames = val64[1];
1115 /* stats->recv_congest_count = ?; FIXME ??*/
1116 stats->frame_copied_errors = val64[5];
1117 stats->frequency_errors = val64[6];
1118 stats->token_errors = val64[9];
1120 /* Token Ring optional stats not yet defined */
1122 #endif
1124 #ifdef CONFIG_FDDI
1125 if (i2o_dev->lct_data.sub_class == I2O_LAN_FDDI) {
1126 if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0400, -1,
1127 val64, sizeof(val64)) < 0)
1128 printk(KERN_INFO "%s: Unable to query LAN_FDDI_HISTORICAL_STATS.\n", dev->name);
1129 else {
1130 dprintk(KERN_DEBUG "%s: LAN_FDDI_HISTORICAL_STATS queried.\n", dev->name);
1131 priv->stats.smt_cf_state = val64[0];
1132 memcpy(priv->stats.mac_upstream_nbr, &val64[1], FDDI_K_ALEN);
1133 memcpy(priv->stats.mac_downstream_nbr, &val64[2], FDDI_K_ALEN);
1134 priv->stats.mac_error_cts = val64[3];
1135 priv->stats.mac_lost_cts = val64[4];
1136 priv->stats.mac_rmt_state = val64[5];
1137 memcpy(priv->stats.port_lct_fail_cts, &val64[6], 8);
1138 memcpy(priv->stats.port_lem_reject_cts, &val64[7], 8);
1139 memcpy(priv->stats.port_lem_cts, &val64[8], 8);
1140 memcpy(priv->stats.port_pcm_state, &val64[9], 8);
1142 /* FDDI optional stats not yet defined */
1144 #endif
1146 #ifdef CONFIG_NET_FC
1147 /* Fibre Channel Statistics not yet defined in 1.53 nor 2.0 */
1148 #endif
1150 return (struct net_device_stats *)&priv->stats;
1154 * i2o_lan_set_mc_filter(): Post a request to set multicast filter.
1156 int i2o_lan_set_mc_filter(struct net_device *dev, u32 filter_mask)
1158 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
1159 struct i2o_device *i2o_dev = priv->i2o_dev;
1160 struct i2o_controller *iop = i2o_dev->controller;
1161 u32 msg[10];
1163 msg[0] = TEN_WORD_MSG_SIZE | SGL_OFFSET_5;
1164 msg[1] = I2O_CMD_UTIL_PARAMS_SET << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid;
1165 msg[2] = priv->unit << 16 | lan_context;
1166 msg[3] = 0x0001 << 16 | 3 ; // TransactionContext: group&field
1167 msg[4] = 0;
1168 msg[5] = 0xCC000000 | 16; // Immediate data SGL
1169 msg[6] = 1; // OperationCount
1170 msg[7] = 0x0001<<16 | I2O_PARAMS_FIELD_SET; // Group, Operation
1171 msg[8] = 3 << 16 | 1; // FieldIndex, FieldCount
1172 msg[9] = filter_mask; // Value
1174 return i2o_post_this(iop, msg, sizeof(msg));
1178 * i2o_lan_set_mc_table(): Post a request to set LAN_MULTICAST_MAC_ADDRESS table.
1180 int i2o_lan_set_mc_table(struct net_device *dev)
1182 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
1183 struct i2o_device *i2o_dev = priv->i2o_dev;
1184 struct i2o_controller *iop = i2o_dev->controller;
1185 struct dev_mc_list *mc;
1186 u32 msg[10 + 2 * dev->mc_count];
1187 u8 *work8 = (u8 *)(msg + 10);
1189 msg[0] = I2O_MESSAGE_SIZE(10 + 2 * dev->mc_count) | SGL_OFFSET_5;
1190 msg[1] = I2O_CMD_UTIL_PARAMS_SET << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid;
1191 msg[2] = priv->unit << 16 | lan_context; // InitiatorContext
1192 msg[3] = 0x0002 << 16 | (u16)-1; // TransactionContext
1193 msg[4] = 0; // OperationFlags
1194 msg[5] = 0xCC000000 | (16 + 8 * dev->mc_count); // Immediate data SGL
1195 msg[6] = 2; // OperationCount
1196 msg[7] = 0x0002 << 16 | I2O_PARAMS_TABLE_CLEAR; // Group, Operation
1197 msg[8] = 0x0002 << 16 | I2O_PARAMS_ROW_ADD; // Group, Operation
1198 msg[9] = dev->mc_count << 16 | (u16)-1; // RowCount, FieldCount
1200 for (mc = dev->mc_list; mc ; mc = mc->next, work8 += 8) {
1201 memset(work8, 0, 8);
1202 memcpy(work8, mc->dmi_addr, mc->dmi_addrlen); // Values
1205 return i2o_post_this(iop, msg, sizeof(msg));
1209 * i2o_lan_set_multicast_list(): Enable a network device to receive packets
1210 * not send to the protocol address.
1212 static void i2o_lan_set_multicast_list(struct net_device *dev)
1214 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
1215 u32 filter_mask;
1217 if (dev->flags & IFF_PROMISC) {
1218 filter_mask = 0x00000002;
1219 dprintk(KERN_INFO "%s: Enabling promiscuous mode...\n", dev->name);
1220 } else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > priv->max_size_mc_table) {
1221 filter_mask = 0x00000004;
1222 dprintk(KERN_INFO "%s: Enabling all multicast mode...\n", dev->name);
1223 } else if (dev->mc_count) {
1224 filter_mask = 0x00000000;
1225 dprintk(KERN_INFO "%s: Enabling multicast mode...\n", dev->name);
1226 if (i2o_lan_set_mc_table(dev) < 0)
1227 printk(KERN_WARNING "%s: Unable to send MAC table.\n", dev->name);
1228 } else {
1229 filter_mask = 0x00000300; // Broadcast, Multicast disabled
1230 dprintk(KERN_INFO "%s: Enabling unicast mode...\n", dev->name);
1233 /* Finally copy new FilterMask to DDM */
1235 if (i2o_lan_set_mc_filter(dev, filter_mask) < 0)
1236 printk(KERN_WARNING "%s: Unable to send MAC FilterMask.\n", dev->name);
1240 * i2o_lan_change_mtu(): Change maximum transfer unit size.
1242 static int i2o_lan_change_mtu(struct net_device *dev, int new_mtu)
1244 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
1245 struct i2o_device *i2o_dev = priv->i2o_dev;
1246 u32 max_pkt_size;
1248 if (i2o_query_scalar(i2o_dev->controller, i2o_dev->lct_data.tid,
1249 0x0000, 6, &max_pkt_size, 4) < 0)
1250 return -EFAULT;
1252 if (new_mtu < 68 || new_mtu > 9000 || new_mtu > max_pkt_size)
1253 return -EINVAL;
1255 dev->mtu = new_mtu;
1257 i2o_lan_suspend(dev); // to SUSPENDED state, return buckets
1259 while (priv->i2o_fbl_tail >= 0) // free buffered buckets
1260 dev_kfree_skb(priv->i2o_fbl[priv->i2o_fbl_tail--]);
1262 i2o_lan_reset(dev); // to OPERATIONAL state
1263 i2o_set_ddm_parameters(dev); // reset some parameters
1264 i2o_lan_receive_post(dev); // post new buckets (new size)
1266 return 0;
1269 /* Functions to initialize I2O LAN OSM:
1270 ======================================*/
1273 * i2o_lan_register_device(): Register LAN class device to kernel.
1275 struct net_device *i2o_lan_register_device(struct i2o_device *i2o_dev)
1277 struct net_device *dev = NULL;
1278 struct i2o_lan_local *priv = NULL;
1279 u8 hw_addr[8];
1280 u32 tx_max_out = 0;
1281 unsigned short (*type_trans)(struct sk_buff *, struct net_device *);
1282 void (*unregister_dev)(struct net_device *dev);
1284 switch (i2o_dev->lct_data.sub_class) {
1285 case I2O_LAN_ETHERNET:
1286 dev = init_etherdev(NULL, sizeof(struct i2o_lan_local));
1287 if (dev == NULL)
1288 return NULL;
1289 type_trans = eth_type_trans;
1290 unregister_dev = unregister_netdev;
1291 break;
1293 #ifdef CONFIG_ANYLAN
1294 case I2O_LAN_100VG:
1295 printk(KERN_ERR "i2o_lan: 100base VG not yet supported.\n");
1296 return NULL;
1297 break;
1298 #endif
1300 #ifdef CONFIG_TR
1301 case I2O_LAN_TR:
1302 dev = init_trdev(NULL, sizeof(struct i2o_lan_local));
1303 if (dev==NULL)
1304 return NULL;
1305 type_trans = tr_type_trans;
1306 unregister_dev = unregister_trdev;
1307 break;
1308 #endif
1310 #ifdef CONFIG_FDDI
1311 case I2O_LAN_FDDI:
1313 int size = sizeof(struct net_device) + sizeof(struct i2o_lan_local);
1315 dev = (struct net_device *) kmalloc(size, GFP_KERNEL);
1316 if (dev == NULL)
1317 return NULL;
1318 memset((char *)dev, 0, size);
1319 dev->priv = (void *)(dev + 1);
1321 if (dev_alloc_name(dev, "fddi%d") < 0) {
1322 printk(KERN_WARNING "i2o_lan: Too many FDDI devices.\n");
1323 kfree(dev);
1324 return NULL;
1326 type_trans = fddi_type_trans;
1327 unregister_dev = (void *)unregister_netdevice;
1329 fddi_setup(dev);
1330 register_netdev(dev);
1332 break;
1333 #endif
1335 #ifdef CONFIG_NET_FC
1336 case I2O_LAN_FIBRE_CHANNEL:
1337 dev = init_fcdev(NULL, sizeof(struct i2o_lan_local));
1338 if (dev == NULL)
1339 return NULL;
1340 type_trans = NULL;
1341 /* FIXME: Move fc_type_trans() from drivers/net/fc/iph5526.c to net/802/fc.c
1342 * and export it in include/linux/fcdevice.h
1343 * type_trans = fc_type_trans;
1345 unregister_dev = (void *)unregister_fcdev;
1346 break;
1347 #endif
1349 case I2O_LAN_UNKNOWN:
1350 default:
1351 printk(KERN_ERR "i2o_lan: LAN type 0x%04x not supported.\n",
1352 i2o_dev->lct_data.sub_class);
1353 return NULL;
1356 priv = (struct i2o_lan_local *)dev->priv;
1357 priv->i2o_dev = i2o_dev;
1358 priv->type_trans = type_trans;
1359 priv->sgl_max = (i2o_dev->controller->status_block->inbound_frame_size - 4) / 3;
1360 atomic_set(&priv->buckets_out, 0);
1362 /* Set default values for user configurable parameters */
1363 /* Private values are changed via /proc file system */
1365 priv->max_buckets_out = max_buckets_out;
1366 priv->bucket_thresh = bucket_thresh;
1367 priv->rx_copybreak = rx_copybreak;
1368 priv->tx_batch_mode = tx_batch_mode & 0x03;
1369 priv->i2o_event_mask = i2o_event_mask;
1371 priv->tx_lock = SPIN_LOCK_UNLOCKED;
1372 priv->fbl_lock = SPIN_LOCK_UNLOCKED;
1374 unit++;
1375 i2o_landevs[unit] = dev;
1376 priv->unit = unit;
1378 if (i2o_query_scalar(i2o_dev->controller, i2o_dev->lct_data.tid,
1379 0x0001, 0, &hw_addr, sizeof(hw_addr)) < 0) {
1380 printk(KERN_ERR "%s: Unable to query hardware address.\n", dev->name);
1381 unit--;
1382 unregister_dev(dev);
1383 kfree(dev);
1384 return NULL;
1386 dprintk(KERN_DEBUG "%s: hwaddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1387 dev->name, hw_addr[0], hw_addr[1], hw_addr[2], hw_addr[3],
1388 hw_addr[4], hw_addr[5]);
1390 dev->addr_len = 6;
1391 memcpy(dev->dev_addr, hw_addr, 6);
1393 if (i2o_query_scalar(i2o_dev->controller, i2o_dev->lct_data.tid,
1394 0x0007, 2, &tx_max_out, sizeof(tx_max_out)) < 0) {
1395 printk(KERN_ERR "%s: Unable to query max TX queue.\n", dev->name);
1396 unit--;
1397 unregister_dev(dev);
1398 kfree(dev);
1399 return NULL;
1401 dprintk(KERN_INFO "%s: Max TX Outstanding = %d.\n", dev->name, tx_max_out);
1402 priv->tx_max_out = tx_max_out;
1403 atomic_set(&priv->tx_out, 0);
1404 priv->tx_count = 0;
1406 INIT_LIST_HEAD(&priv->i2o_batch_send_task.list);
1407 priv->i2o_batch_send_task.sync = 0;
1408 priv->i2o_batch_send_task.routine = (void *)i2o_lan_batch_send;
1409 priv->i2o_batch_send_task.data = (void *)dev;
1411 dev->open = i2o_lan_open;
1412 dev->stop = i2o_lan_close;
1413 dev->get_stats = i2o_lan_get_stats;
1414 dev->set_multicast_list = i2o_lan_set_multicast_list;
1415 dev->tx_timeout = i2o_lan_tx_timeout;
1416 dev->watchdog_timeo = I2O_LAN_TX_TIMEOUT;
1418 #ifdef CONFIG_NET_FC
1419 if (i2o_dev->lct_data.sub_class == I2O_LAN_FIBRE_CHANNEL)
1420 dev->hard_start_xmit = i2o_lan_sdu_send;
1421 else
1422 #endif
1423 dev->hard_start_xmit = i2o_lan_packet_send;
1425 if (i2o_dev->lct_data.sub_class == I2O_LAN_ETHERNET)
1426 dev->change_mtu = i2o_lan_change_mtu;
1428 return dev;
1431 #ifdef MODULE
1432 #define i2o_lan_init init_module
1433 #endif
1435 int __init i2o_lan_init(void)
1437 struct net_device *dev;
1438 int i;
1440 printk(KERN_INFO "I2O LAN OSM (C) 1999 University of Helsinki.\n");
1442 /* Module params are used as global defaults for private values */
1444 if (max_buckets_out > I2O_LAN_MAX_BUCKETS_OUT)
1445 max_buckets_out = I2O_LAN_MAX_BUCKETS_OUT;
1446 if (bucket_thresh > max_buckets_out)
1447 bucket_thresh = max_buckets_out;
1449 /* Install handlers for incoming replies */
1451 if (i2o_install_handler(&i2o_lan_send_handler) < 0) {
1452 printk(KERN_ERR "i2o_lan: Unable to register I2O LAN OSM.\n");
1453 return -EINVAL;
1455 lan_send_context = i2o_lan_send_handler.context;
1457 if (i2o_install_handler(&i2o_lan_receive_handler) < 0) {
1458 printk(KERN_ERR "i2o_lan: Unable to register I2O LAN OSM.\n");
1459 return -EINVAL;
1461 lan_receive_context = i2o_lan_receive_handler.context;
1463 if (i2o_install_handler(&i2o_lan_handler) < 0) {
1464 printk(KERN_ERR "i2o_lan: Unable to register I2O LAN OSM.\n");
1465 return -EINVAL;
1467 lan_context = i2o_lan_handler.context;
1469 for(i=0; i <= MAX_LAN_CARDS; i++)
1470 i2o_landevs[i] = NULL;
1472 for (i=0; i < MAX_I2O_CONTROLLERS; i++) {
1473 struct i2o_controller *iop = i2o_find_controller(i);
1474 struct i2o_device *i2o_dev;
1476 if (iop==NULL)
1477 continue;
1479 for (i2o_dev=iop->devices;i2o_dev != NULL;i2o_dev=i2o_dev->next) {
1481 if (i2o_dev->lct_data.class_id != I2O_CLASS_LAN)
1482 continue;
1484 /* Make sure device not already claimed by an ISM */
1485 if (i2o_dev->lct_data.user_tid != 0xFFF)
1486 continue;
1488 if (unit == MAX_LAN_CARDS) {
1489 i2o_unlock_controller(iop);
1490 printk(KERN_WARNING "i2o_lan: Too many I2O LAN devices.\n");
1491 return -EINVAL;
1494 dev = i2o_lan_register_device(i2o_dev);
1495 if (dev == NULL) {
1496 printk(KERN_ERR "i2o_lan: Unable to register I2O LAN device 0x%04x.\n",
1497 i2o_dev->lct_data.sub_class);
1498 continue;
1501 printk(KERN_INFO "%s: I2O LAN device registered, "
1502 "subclass = 0x%04x, unit = %d, tid = %d.\n",
1503 dev->name, i2o_dev->lct_data.sub_class,
1504 ((struct i2o_lan_local *)dev->priv)->unit,
1505 i2o_dev->lct_data.tid);
1508 i2o_unlock_controller(iop);
1511 dprintk(KERN_INFO "%d I2O LAN devices found and registered.\n", unit+1);
1513 return 0;
1516 #ifdef MODULE
1518 void cleanup_module(void)
1520 int i;
1522 for (i = 0; i <= unit; i++) {
1523 struct net_device *dev = i2o_landevs[i];
1524 struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
1525 struct i2o_device *i2o_dev = priv->i2o_dev;
1527 switch (i2o_dev->lct_data.sub_class) {
1528 case I2O_LAN_ETHERNET:
1529 unregister_netdev(dev);
1530 break;
1531 #ifdef CONFIG_FDDI
1532 case I2O_LAN_FDDI:
1533 unregister_netdevice(dev);
1534 break;
1535 #endif
1536 #ifdef CONFIG_TR
1537 case I2O_LAN_TR:
1538 unregister_trdev(dev);
1539 break;
1540 #endif
1541 #ifdef CONFIG_NET_FC
1542 case I2O_LAN_FIBRE_CHANNEL:
1543 unregister_fcdev(dev);
1544 break;
1545 #endif
1546 default:
1547 printk(KERN_WARNING "%s: Spurious I2O LAN subclass 0x%08x.\n",
1548 dev->name, i2o_dev->lct_data.sub_class);
1551 dprintk(KERN_INFO "%s: I2O LAN device unregistered.\n",
1552 dev->name);
1553 kfree(dev);
1556 i2o_remove_handler(&i2o_lan_handler);
1557 i2o_remove_handler(&i2o_lan_send_handler);
1558 i2o_remove_handler(&i2o_lan_receive_handler);
1561 EXPORT_NO_SYMBOLS;
1563 MODULE_AUTHOR("University of Helsinki, Department of Computer Science");
1564 MODULE_DESCRIPTION("I2O Lan OSM");
1566 MODULE_PARM(max_buckets_out, "1-" __MODULE_STRING(I2O_LAN_MAX_BUCKETS_OUT) "i");
1567 MODULE_PARM_DESC(max_buckets_out, "Total number of buckets to post (1-)");
1568 MODULE_PARM(bucket_thresh, "1-" __MODULE_STRING(I2O_LAN_MAX_BUCKETS_OUT) "i");
1569 MODULE_PARM_DESC(bucket_thresh, "Bucket post threshold (1-)");
1570 MODULE_PARM(rx_copybreak, "1-" "i");
1571 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy only small frames (1-)");
1572 MODULE_PARM(tx_batch_mode, "0-2" "i");
1573 MODULE_PARM_DESC(tx_batch_mode, "0=Send immediatelly, 1=Send in batches, 2=Switch automatically");
1575 #endif