1 /**************************************************************************/
3 /* IBM eServer i/pSeries Virtual Ethernet Device Driver */
4 /* Copyright (C) 2003 IBM Corp. */
5 /* Originally written by Dave Larson (larson1@us.ibm.com) */
6 /* Maintained by Santiago Leon (santil@us.ibm.com) */
8 /* This program is free software; you can redistribute it and/or modify */
9 /* it under the terms of the GNU General Public License as published by */
10 /* the Free Software Foundation; either version 2 of the License, or */
11 /* (at your option) any later version. */
13 /* This program is distributed in the hope that it will be useful, */
14 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
15 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
16 /* GNU General Public License for more details. */
18 /* You should have received a copy of the GNU General Public License */
19 /* along with this program; if not, write to the Free Software */
20 /* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
23 /* This module contains the implementation of a virtual ethernet device */
24 /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
25 /* option of the RS/6000 Platform Architechture to interface with virtual */
26 /* ethernet NICs that are presented to the partition by the hypervisor. */
28 /**************************************************************************/
31 - remove frag processing code - no longer needed
32 - add support for sysfs
33 - possibly remove procfs support
36 #include <linux/config.h>
37 #include <linux/module.h>
38 #include <linux/version.h>
39 #include <linux/types.h>
40 #include <linux/errno.h>
41 #include <linux/ioport.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/kernel.h>
44 #include <linux/netdevice.h>
45 #include <linux/etherdevice.h>
46 #include <linux/skbuff.h>
47 #include <linux/init.h>
48 #include <linux/delay.h>
50 #include <linux/ethtool.h>
51 #include <linux/proc_fs.h>
52 #include <asm/semaphore.h>
53 #include <asm/hvcall.h>
54 #include <asm/atomic.h>
55 #include <asm/iommu.h>
57 #include <asm/uaccess.h>
58 #include <linux/seq_file.h>
64 #define ibmveth_printk(fmt, args...) \
65 printk(KERN_INFO "%s: " fmt, __FILE__, ## args)
67 #define ibmveth_error_printk(fmt, args...) \
68 printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
71 #define ibmveth_debug_printk_no_adapter(fmt, args...) \
72 printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args)
73 #define ibmveth_debug_printk(fmt, args...) \
74 printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
75 #define ibmveth_assert(expr) \
77 printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \
81 #define ibmveth_debug_printk_no_adapter(fmt, args...)
82 #define ibmveth_debug_printk(fmt, args...)
83 #define ibmveth_assert(expr)
86 static int ibmveth_open(struct net_device
*dev
);
87 static int ibmveth_close(struct net_device
*dev
);
88 static int ibmveth_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
);
89 static int ibmveth_poll(struct net_device
*dev
, int *budget
);
90 static int ibmveth_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
91 static struct net_device_stats
*ibmveth_get_stats(struct net_device
*dev
);
92 static void ibmveth_set_multicast_list(struct net_device
*dev
);
93 static int ibmveth_change_mtu(struct net_device
*dev
, int new_mtu
);
94 static void ibmveth_proc_register_driver(void);
95 static void ibmveth_proc_unregister_driver(void);
96 static void ibmveth_proc_register_adapter(struct ibmveth_adapter
*adapter
);
97 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter
*adapter
);
98 static irqreturn_t
ibmveth_interrupt(int irq
, void *dev_instance
, struct pt_regs
*regs
);
99 static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter
*);
101 #ifdef CONFIG_PROC_FS
102 #define IBMVETH_PROC_DIR "ibmveth"
103 static struct proc_dir_entry
*ibmveth_proc_dir
;
106 static const char ibmveth_driver_name
[] = "ibmveth";
107 static const char ibmveth_driver_string
[] = "IBM i/pSeries Virtual Ethernet Driver";
108 #define ibmveth_driver_version "1.03"
110 MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>");
111 MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver");
112 MODULE_LICENSE("GPL");
113 MODULE_VERSION(ibmveth_driver_version
);
115 /* simple methods of getting data from the current rxq entry */
116 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter
*adapter
)
118 return (adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].toggle
== adapter
->rx_queue
.toggle
);
121 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter
*adapter
)
123 return (adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].valid
);
126 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter
*adapter
)
128 return (adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].offset
);
131 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter
*adapter
)
133 return (adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].length
);
136 /* setup the initial settings for a buffer pool */
137 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool
*pool
, u32 pool_index
, u32 pool_size
, u32 buff_size
)
139 pool
->size
= pool_size
;
140 pool
->index
= pool_index
;
141 pool
->buff_size
= buff_size
;
142 pool
->threshold
= pool_size
/ 2;
145 /* allocate and setup an buffer pool - called during open */
146 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool
*pool
)
150 pool
->free_map
= kmalloc(sizeof(u16
) * pool
->size
, GFP_KERNEL
);
152 if(!pool
->free_map
) {
156 pool
->dma_addr
= kmalloc(sizeof(dma_addr_t
) * pool
->size
, GFP_KERNEL
);
157 if(!pool
->dma_addr
) {
158 kfree(pool
->free_map
);
159 pool
->free_map
= NULL
;
163 pool
->skbuff
= kmalloc(sizeof(void*) * pool
->size
, GFP_KERNEL
);
166 kfree(pool
->dma_addr
);
167 pool
->dma_addr
= NULL
;
169 kfree(pool
->free_map
);
170 pool
->free_map
= NULL
;
174 memset(pool
->skbuff
, 0, sizeof(void*) * pool
->size
);
175 memset(pool
->dma_addr
, 0, sizeof(dma_addr_t
) * pool
->size
);
177 for(i
= 0; i
< pool
->size
; ++i
) {
178 pool
->free_map
[i
] = i
;
181 atomic_set(&pool
->available
, 0);
182 pool
->producer_index
= 0;
183 pool
->consumer_index
= 0;
188 /* replenish the buffers for a pool. note that we don't need to
189 * skb_reserve these since they are used for incoming...
191 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter
*adapter
, struct ibmveth_buff_pool
*pool
)
194 u32 count
= pool
->size
- atomic_read(&pool
->available
);
195 u32 buffers_added
= 0;
199 for(i
= 0; i
< count
; ++i
) {
201 unsigned int free_index
, index
;
203 union ibmveth_buf_desc desc
;
204 unsigned long lpar_rc
;
207 skb
= alloc_skb(pool
->buff_size
, GFP_ATOMIC
);
210 ibmveth_debug_printk("replenish: unable to allocate skb\n");
211 adapter
->replenish_no_mem
++;
215 free_index
= pool
->consumer_index
++ % pool
->size
;
216 index
= pool
->free_map
[free_index
];
218 ibmveth_assert(index
!= IBM_VETH_INVALID_MAP
);
219 ibmveth_assert(pool
->skbuff
[index
] == NULL
);
221 dma_addr
= dma_map_single(&adapter
->vdev
->dev
, skb
->data
,
222 pool
->buff_size
, DMA_FROM_DEVICE
);
224 pool
->free_map
[free_index
] = IBM_VETH_INVALID_MAP
;
225 pool
->dma_addr
[index
] = dma_addr
;
226 pool
->skbuff
[index
] = skb
;
228 correlator
= ((u64
)pool
->index
<< 32) | index
;
229 *(u64
*)skb
->data
= correlator
;
232 desc
.fields
.valid
= 1;
233 desc
.fields
.length
= pool
->buff_size
;
234 desc
.fields
.address
= dma_addr
;
236 lpar_rc
= h_add_logical_lan_buffer(adapter
->vdev
->unit_address
, desc
.desc
);
238 if(lpar_rc
!= H_Success
) {
239 pool
->free_map
[free_index
] = IBM_VETH_INVALID_MAP
;
240 pool
->skbuff
[index
] = NULL
;
241 pool
->consumer_index
--;
242 dma_unmap_single(&adapter
->vdev
->dev
,
243 pool
->dma_addr
[index
], pool
->buff_size
,
245 dev_kfree_skb_any(skb
);
246 adapter
->replenish_add_buff_failure
++;
250 adapter
->replenish_add_buff_success
++;
255 atomic_add(buffers_added
, &(pool
->available
));
258 /* check if replenishing is needed. */
259 static inline int ibmveth_is_replenishing_needed(struct ibmveth_adapter
*adapter
)
261 return ((atomic_read(&adapter
->rx_buff_pool
[0].available
) < adapter
->rx_buff_pool
[0].threshold
) ||
262 (atomic_read(&adapter
->rx_buff_pool
[1].available
) < adapter
->rx_buff_pool
[1].threshold
) ||
263 (atomic_read(&adapter
->rx_buff_pool
[2].available
) < adapter
->rx_buff_pool
[2].threshold
));
266 /* kick the replenish tasklet if we need replenishing and it isn't already running */
267 static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter
*adapter
)
269 if(ibmveth_is_replenishing_needed(adapter
) &&
270 (atomic_dec_if_positive(&adapter
->not_replenishing
) == 0)) {
271 schedule_work(&adapter
->replenish_task
);
275 /* replenish tasklet routine */
276 static void ibmveth_replenish_task(struct ibmveth_adapter
*adapter
)
278 adapter
->replenish_task_cycles
++;
280 ibmveth_replenish_buffer_pool(adapter
, &adapter
->rx_buff_pool
[0]);
281 ibmveth_replenish_buffer_pool(adapter
, &adapter
->rx_buff_pool
[1]);
282 ibmveth_replenish_buffer_pool(adapter
, &adapter
->rx_buff_pool
[2]);
284 adapter
->rx_no_buffer
= *(u64
*)(((char*)adapter
->buffer_list_addr
) + 4096 - 8);
286 atomic_inc(&adapter
->not_replenishing
);
288 ibmveth_schedule_replenishing(adapter
);
291 /* empty and free ana buffer pool - also used to do cleanup in error paths */
292 static void ibmveth_free_buffer_pool(struct ibmveth_adapter
*adapter
, struct ibmveth_buff_pool
*pool
)
297 kfree(pool
->free_map
);
298 pool
->free_map
= NULL
;
301 if(pool
->skbuff
&& pool
->dma_addr
) {
302 for(i
= 0; i
< pool
->size
; ++i
) {
303 struct sk_buff
*skb
= pool
->skbuff
[i
];
305 dma_unmap_single(&adapter
->vdev
->dev
,
309 dev_kfree_skb_any(skb
);
310 pool
->skbuff
[i
] = NULL
;
316 kfree(pool
->dma_addr
);
317 pool
->dma_addr
= NULL
;
326 /* remove a buffer from a pool */
327 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter
*adapter
, u64 correlator
)
329 unsigned int pool
= correlator
>> 32;
330 unsigned int index
= correlator
& 0xffffffffUL
;
331 unsigned int free_index
;
334 ibmveth_assert(pool
< IbmVethNumBufferPools
);
335 ibmveth_assert(index
< adapter
->rx_buff_pool
[pool
].size
);
337 skb
= adapter
->rx_buff_pool
[pool
].skbuff
[index
];
339 ibmveth_assert(skb
!= NULL
);
341 adapter
->rx_buff_pool
[pool
].skbuff
[index
] = NULL
;
343 dma_unmap_single(&adapter
->vdev
->dev
,
344 adapter
->rx_buff_pool
[pool
].dma_addr
[index
],
345 adapter
->rx_buff_pool
[pool
].buff_size
,
348 free_index
= adapter
->rx_buff_pool
[pool
].producer_index
++ % adapter
->rx_buff_pool
[pool
].size
;
349 adapter
->rx_buff_pool
[pool
].free_map
[free_index
] = index
;
353 atomic_dec(&(adapter
->rx_buff_pool
[pool
].available
));
356 /* get the current buffer on the rx queue */
357 static inline struct sk_buff
*ibmveth_rxq_get_buffer(struct ibmveth_adapter
*adapter
)
359 u64 correlator
= adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].correlator
;
360 unsigned int pool
= correlator
>> 32;
361 unsigned int index
= correlator
& 0xffffffffUL
;
363 ibmveth_assert(pool
< IbmVethNumBufferPools
);
364 ibmveth_assert(index
< adapter
->rx_buff_pool
[pool
].size
);
366 return adapter
->rx_buff_pool
[pool
].skbuff
[index
];
369 /* recycle the current buffer on the rx queue */
370 static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter
*adapter
)
372 u32 q_index
= adapter
->rx_queue
.index
;
373 u64 correlator
= adapter
->rx_queue
.queue_addr
[q_index
].correlator
;
374 unsigned int pool
= correlator
>> 32;
375 unsigned int index
= correlator
& 0xffffffffUL
;
376 union ibmveth_buf_desc desc
;
377 unsigned long lpar_rc
;
379 ibmveth_assert(pool
< IbmVethNumBufferPools
);
380 ibmveth_assert(index
< adapter
->rx_buff_pool
[pool
].size
);
383 desc
.fields
.valid
= 1;
384 desc
.fields
.length
= adapter
->rx_buff_pool
[pool
].buff_size
;
385 desc
.fields
.address
= adapter
->rx_buff_pool
[pool
].dma_addr
[index
];
387 lpar_rc
= h_add_logical_lan_buffer(adapter
->vdev
->unit_address
, desc
.desc
);
389 if(lpar_rc
!= H_Success
) {
390 ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc
);
391 ibmveth_remove_buffer_from_pool(adapter
, adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].correlator
);
394 if(++adapter
->rx_queue
.index
== adapter
->rx_queue
.num_slots
) {
395 adapter
->rx_queue
.index
= 0;
396 adapter
->rx_queue
.toggle
= !adapter
->rx_queue
.toggle
;
400 static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter
*adapter
)
402 ibmveth_remove_buffer_from_pool(adapter
, adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].correlator
);
404 if(++adapter
->rx_queue
.index
== adapter
->rx_queue
.num_slots
) {
405 adapter
->rx_queue
.index
= 0;
406 adapter
->rx_queue
.toggle
= !adapter
->rx_queue
.toggle
;
410 static void ibmveth_cleanup(struct ibmveth_adapter
*adapter
)
412 if(adapter
->buffer_list_addr
!= NULL
) {
413 if(!dma_mapping_error(adapter
->buffer_list_dma
)) {
414 dma_unmap_single(&adapter
->vdev
->dev
,
415 adapter
->buffer_list_dma
, 4096,
417 adapter
->buffer_list_dma
= DMA_ERROR_CODE
;
419 free_page((unsigned long)adapter
->buffer_list_addr
);
420 adapter
->buffer_list_addr
= NULL
;
423 if(adapter
->filter_list_addr
!= NULL
) {
424 if(!dma_mapping_error(adapter
->filter_list_dma
)) {
425 dma_unmap_single(&adapter
->vdev
->dev
,
426 adapter
->filter_list_dma
, 4096,
428 adapter
->filter_list_dma
= DMA_ERROR_CODE
;
430 free_page((unsigned long)adapter
->filter_list_addr
);
431 adapter
->filter_list_addr
= NULL
;
434 if(adapter
->rx_queue
.queue_addr
!= NULL
) {
435 if(!dma_mapping_error(adapter
->rx_queue
.queue_dma
)) {
436 dma_unmap_single(&adapter
->vdev
->dev
,
437 adapter
->rx_queue
.queue_dma
,
438 adapter
->rx_queue
.queue_len
,
440 adapter
->rx_queue
.queue_dma
= DMA_ERROR_CODE
;
442 kfree(adapter
->rx_queue
.queue_addr
);
443 adapter
->rx_queue
.queue_addr
= NULL
;
446 ibmveth_free_buffer_pool(adapter
, &adapter
->rx_buff_pool
[0]);
447 ibmveth_free_buffer_pool(adapter
, &adapter
->rx_buff_pool
[1]);
448 ibmveth_free_buffer_pool(adapter
, &adapter
->rx_buff_pool
[2]);
451 static int ibmveth_open(struct net_device
*netdev
)
453 struct ibmveth_adapter
*adapter
= netdev
->priv
;
456 unsigned long lpar_rc
;
458 union ibmveth_buf_desc rxq_desc
;
460 ibmveth_debug_printk("open starting\n");
463 adapter
->rx_buff_pool
[0].size
+
464 adapter
->rx_buff_pool
[1].size
+
465 adapter
->rx_buff_pool
[2].size
+ 1;
467 adapter
->buffer_list_addr
= (void*) get_zeroed_page(GFP_KERNEL
);
468 adapter
->filter_list_addr
= (void*) get_zeroed_page(GFP_KERNEL
);
470 if(!adapter
->buffer_list_addr
|| !adapter
->filter_list_addr
) {
471 ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
472 ibmveth_cleanup(adapter
);
476 adapter
->rx_queue
.queue_len
= sizeof(struct ibmveth_rx_q_entry
) * rxq_entries
;
477 adapter
->rx_queue
.queue_addr
= kmalloc(adapter
->rx_queue
.queue_len
, GFP_KERNEL
);
479 if(!adapter
->rx_queue
.queue_addr
) {
480 ibmveth_error_printk("unable to allocate rx queue pages\n");
481 ibmveth_cleanup(adapter
);
485 adapter
->buffer_list_dma
= dma_map_single(&adapter
->vdev
->dev
,
486 adapter
->buffer_list_addr
, 4096, DMA_BIDIRECTIONAL
);
487 adapter
->filter_list_dma
= dma_map_single(&adapter
->vdev
->dev
,
488 adapter
->filter_list_addr
, 4096, DMA_BIDIRECTIONAL
);
489 adapter
->rx_queue
.queue_dma
= dma_map_single(&adapter
->vdev
->dev
,
490 adapter
->rx_queue
.queue_addr
,
491 adapter
->rx_queue
.queue_len
, DMA_BIDIRECTIONAL
);
493 if((dma_mapping_error(adapter
->buffer_list_dma
) ) ||
494 (dma_mapping_error(adapter
->filter_list_dma
)) ||
495 (dma_mapping_error(adapter
->rx_queue
.queue_dma
))) {
496 ibmveth_error_printk("unable to map filter or buffer list pages\n");
497 ibmveth_cleanup(adapter
);
501 adapter
->rx_queue
.index
= 0;
502 adapter
->rx_queue
.num_slots
= rxq_entries
;
503 adapter
->rx_queue
.toggle
= 1;
505 if(ibmveth_alloc_buffer_pool(&adapter
->rx_buff_pool
[0]) ||
506 ibmveth_alloc_buffer_pool(&adapter
->rx_buff_pool
[1]) ||
507 ibmveth_alloc_buffer_pool(&adapter
->rx_buff_pool
[2]))
509 ibmveth_error_printk("unable to allocate buffer pools\n");
510 ibmveth_cleanup(adapter
);
514 memcpy(&mac_address
, netdev
->dev_addr
, netdev
->addr_len
);
515 mac_address
= mac_address
>> 16;
518 rxq_desc
.fields
.valid
= 1;
519 rxq_desc
.fields
.length
= adapter
->rx_queue
.queue_len
;
520 rxq_desc
.fields
.address
= adapter
->rx_queue
.queue_dma
;
522 ibmveth_debug_printk("buffer list @ 0x%p\n", adapter
->buffer_list_addr
);
523 ibmveth_debug_printk("filter list @ 0x%p\n", adapter
->filter_list_addr
);
524 ibmveth_debug_printk("receive q @ 0x%p\n", adapter
->rx_queue
.queue_addr
);
527 lpar_rc
= h_register_logical_lan(adapter
->vdev
->unit_address
,
528 adapter
->buffer_list_dma
,
530 adapter
->filter_list_dma
,
533 if(lpar_rc
!= H_Success
) {
534 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc
);
535 ibmveth_error_printk("buffer TCE:0x%x filter TCE:0x%x rxq desc:0x%lx MAC:0x%lx\n",
536 adapter
->buffer_list_dma
,
537 adapter
->filter_list_dma
,
540 ibmveth_cleanup(adapter
);
544 ibmveth_debug_printk("registering irq 0x%x\n", netdev
->irq
);
545 if((rc
= request_irq(netdev
->irq
, &ibmveth_interrupt
, 0, netdev
->name
, netdev
)) != 0) {
546 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev
->irq
, rc
);
548 rc
= h_free_logical_lan(adapter
->vdev
->unit_address
);
549 } while (H_isLongBusy(rc
) || (rc
== H_Busy
));
551 ibmveth_cleanup(adapter
);
555 netif_start_queue(netdev
);
557 ibmveth_debug_printk("scheduling initial replenish cycle\n");
558 ibmveth_schedule_replenishing(adapter
);
560 ibmveth_debug_printk("open complete\n");
565 static int ibmveth_close(struct net_device
*netdev
)
567 struct ibmveth_adapter
*adapter
= netdev
->priv
;
570 ibmveth_debug_printk("close starting\n");
572 netif_stop_queue(netdev
);
574 free_irq(netdev
->irq
, netdev
);
576 cancel_delayed_work(&adapter
->replenish_task
);
577 flush_scheduled_work();
580 lpar_rc
= h_free_logical_lan(adapter
->vdev
->unit_address
);
581 } while (H_isLongBusy(lpar_rc
) || (lpar_rc
== H_Busy
));
583 if(lpar_rc
!= H_Success
)
585 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
589 adapter
->rx_no_buffer
= *(u64
*)(((char*)adapter
->buffer_list_addr
) + 4096 - 8);
591 ibmveth_cleanup(adapter
);
593 ibmveth_debug_printk("close complete\n");
598 static int netdev_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
) {
599 cmd
->supported
= (SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg
| SUPPORTED_FIBRE
);
600 cmd
->advertising
= (ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg
| ADVERTISED_FIBRE
);
601 cmd
->speed
= SPEED_1000
;
602 cmd
->duplex
= DUPLEX_FULL
;
603 cmd
->port
= PORT_FIBRE
;
604 cmd
->phy_address
= 0;
605 cmd
->transceiver
= XCVR_INTERNAL
;
606 cmd
->autoneg
= AUTONEG_ENABLE
;
612 static void netdev_get_drvinfo (struct net_device
*dev
, struct ethtool_drvinfo
*info
) {
613 strncpy(info
->driver
, ibmveth_driver_name
, sizeof(info
->driver
) - 1);
614 strncpy(info
->version
, ibmveth_driver_version
, sizeof(info
->version
) - 1);
617 static u32
netdev_get_link(struct net_device
*dev
) {
621 static struct ethtool_ops netdev_ethtool_ops
= {
622 .get_drvinfo
= netdev_get_drvinfo
,
623 .get_settings
= netdev_get_settings
,
624 .get_link
= netdev_get_link
,
625 .get_sg
= ethtool_op_get_sg
,
626 .get_tx_csum
= ethtool_op_get_tx_csum
,
629 static int ibmveth_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
634 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
636 static int ibmveth_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
638 struct ibmveth_adapter
*adapter
= netdev
->priv
;
639 union ibmveth_buf_desc desc
[IbmVethMaxSendFrags
];
640 unsigned long lpar_rc
;
641 int nfrags
= 0, curfrag
;
642 unsigned long correlator
;
643 unsigned int retry_count
;
645 if ((skb_shinfo(skb
)->nr_frags
+ 1) > IbmVethMaxSendFrags
) {
646 adapter
->stats
.tx_dropped
++;
651 memset(&desc
, 0, sizeof(desc
));
653 /* nfrags = number of frags after the initial fragment */
654 nfrags
= skb_shinfo(skb
)->nr_frags
;
657 adapter
->tx_multidesc_send
++;
659 /* map the initial fragment */
660 desc
[0].fields
.length
= nfrags
? skb
->len
- skb
->data_len
: skb
->len
;
661 desc
[0].fields
.address
= dma_map_single(&adapter
->vdev
->dev
, skb
->data
,
662 desc
[0].fields
.length
, DMA_TO_DEVICE
);
663 desc
[0].fields
.valid
= 1;
665 if(dma_mapping_error(desc
[0].fields
.address
)) {
666 ibmveth_error_printk("tx: unable to map initial fragment\n");
667 adapter
->tx_map_failed
++;
668 adapter
->stats
.tx_dropped
++;
675 /* map fragments past the initial portion if there are any */
677 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[curfrag
];
678 desc
[curfrag
+1].fields
.address
679 = dma_map_single(&adapter
->vdev
->dev
,
680 page_address(frag
->page
) + frag
->page_offset
,
681 frag
->size
, DMA_TO_DEVICE
);
682 desc
[curfrag
+1].fields
.length
= frag
->size
;
683 desc
[curfrag
+1].fields
.valid
= 1;
685 if(dma_mapping_error(desc
[curfrag
+1].fields
.address
)) {
686 ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag
);
687 adapter
->tx_map_failed
++;
688 adapter
->stats
.tx_dropped
++;
689 /* Free all the mappings we just created */
690 while(curfrag
< nfrags
) {
691 dma_unmap_single(&adapter
->vdev
->dev
,
692 desc
[curfrag
+1].fields
.address
,
693 desc
[curfrag
+1].fields
.length
,
702 /* send the frame. Arbitrarily set retrycount to 1024 */
706 lpar_rc
= h_send_logical_lan(adapter
->vdev
->unit_address
,
714 } while ((lpar_rc
== H_Busy
) && (retry_count
--));
716 if(lpar_rc
!= H_Success
&& lpar_rc
!= H_Dropped
) {
718 ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc
);
719 for(i
= 0; i
< 6; i
++) {
720 ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i
,
721 desc
[i
].fields
.valid
, desc
[i
].fields
.length
, desc
[i
].fields
.address
);
723 adapter
->tx_send_failed
++;
724 adapter
->stats
.tx_dropped
++;
726 adapter
->stats
.tx_packets
++;
727 adapter
->stats
.tx_bytes
+= skb
->len
;
731 dma_unmap_single(&adapter
->vdev
->dev
,
732 desc
[nfrags
].fields
.address
,
733 desc
[nfrags
].fields
.length
, DMA_TO_DEVICE
);
734 } while(--nfrags
>= 0);
740 static int ibmveth_poll(struct net_device
*netdev
, int *budget
)
742 struct ibmveth_adapter
*adapter
= netdev
->priv
;
743 int max_frames_to_process
= netdev
->quota
;
744 int frames_processed
= 0;
746 unsigned long lpar_rc
;
750 struct net_device
*netdev
= adapter
->netdev
;
752 if(ibmveth_rxq_pending_buffer(adapter
)) {
757 if(!ibmveth_rxq_buffer_valid(adapter
)) {
758 wmb(); /* suggested by larson1 */
759 adapter
->rx_invalid_buffer
++;
760 ibmveth_debug_printk("recycling invalid buffer\n");
761 ibmveth_rxq_recycle_buffer(adapter
);
763 int length
= ibmveth_rxq_frame_length(adapter
);
764 int offset
= ibmveth_rxq_frame_offset(adapter
);
765 skb
= ibmveth_rxq_get_buffer(adapter
);
767 ibmveth_rxq_harvest_buffer(adapter
);
769 skb_reserve(skb
, offset
);
770 skb_put(skb
, length
);
772 skb
->protocol
= eth_type_trans(skb
, netdev
);
774 netif_receive_skb(skb
); /* send it up */
776 adapter
->stats
.rx_packets
++;
777 adapter
->stats
.rx_bytes
+= length
;
783 } while(more_work
&& (frames_processed
< max_frames_to_process
));
785 ibmveth_schedule_replenishing(adapter
);
788 /* more work to do - return that we are not done yet */
789 netdev
->quota
-= frames_processed
;
790 *budget
-= frames_processed
;
794 /* we think we are done - reenable interrupts, then check once more to make sure we are done */
795 lpar_rc
= h_vio_signal(adapter
->vdev
->unit_address
, VIO_IRQ_ENABLE
);
797 ibmveth_assert(lpar_rc
== H_Success
);
799 netif_rx_complete(netdev
);
801 if(ibmveth_rxq_pending_buffer(adapter
) && netif_rx_reschedule(netdev
, frames_processed
))
803 lpar_rc
= h_vio_signal(adapter
->vdev
->unit_address
, VIO_IRQ_DISABLE
);
804 ibmveth_assert(lpar_rc
== H_Success
);
809 netdev
->quota
-= frames_processed
;
810 *budget
-= frames_processed
;
812 /* we really are done */
816 static irqreturn_t
ibmveth_interrupt(int irq
, void *dev_instance
, struct pt_regs
*regs
)
818 struct net_device
*netdev
= dev_instance
;
819 struct ibmveth_adapter
*adapter
= netdev
->priv
;
820 unsigned long lpar_rc
;
822 if(netif_rx_schedule_prep(netdev
)) {
823 lpar_rc
= h_vio_signal(adapter
->vdev
->unit_address
, VIO_IRQ_DISABLE
);
824 ibmveth_assert(lpar_rc
== H_Success
);
825 __netif_rx_schedule(netdev
);
830 static struct net_device_stats
*ibmveth_get_stats(struct net_device
*dev
)
832 struct ibmveth_adapter
*adapter
= dev
->priv
;
833 return &adapter
->stats
;
836 static void ibmveth_set_multicast_list(struct net_device
*netdev
)
838 struct ibmveth_adapter
*adapter
= netdev
->priv
;
839 unsigned long lpar_rc
;
841 if((netdev
->flags
& IFF_PROMISC
) || (netdev
->mc_count
> adapter
->mcastFilterSize
)) {
842 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
843 IbmVethMcastEnableRecv
|
844 IbmVethMcastDisableFiltering
,
846 if(lpar_rc
!= H_Success
) {
847 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc
);
850 struct dev_mc_list
*mclist
= netdev
->mc_list
;
852 /* clear the filter table & disable filtering */
853 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
854 IbmVethMcastEnableRecv
|
855 IbmVethMcastDisableFiltering
|
856 IbmVethMcastClearFilterTable
,
858 if(lpar_rc
!= H_Success
) {
859 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc
);
861 /* add the addresses to the filter table */
862 for(i
= 0; i
< netdev
->mc_count
; ++i
, mclist
= mclist
->next
) {
863 // add the multicast address to the filter table
864 unsigned long mcast_addr
= 0;
865 memcpy(((char *)&mcast_addr
)+2, mclist
->dmi_addr
, 6);
866 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
867 IbmVethMcastAddFilter
,
869 if(lpar_rc
!= H_Success
) {
870 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc
);
874 /* re-enable filtering */
875 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
876 IbmVethMcastEnableFiltering
,
878 if(lpar_rc
!= H_Success
) {
879 ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc
);
884 static int ibmveth_change_mtu(struct net_device
*dev
, int new_mtu
)
886 if ((new_mtu
< 68) || (new_mtu
> (1<<20)))
892 static int __devinit
ibmveth_probe(struct vio_dev
*dev
, const struct vio_device_id
*id
)
895 struct net_device
*netdev
;
896 struct ibmveth_adapter
*adapter
;
898 unsigned char *mac_addr_p
;
899 unsigned int *mcastFilterSize_p
;
902 ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
905 mac_addr_p
= (unsigned char *) vio_get_attribute(dev
, VETH_MAC_ADDR
, 0);
907 printk(KERN_ERR
"(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
908 "attribute\n", __FILE__
, __LINE__
);
912 mcastFilterSize_p
= (unsigned int *) vio_get_attribute(dev
, VETH_MCAST_FILTER_SIZE
, 0);
913 if(!mcastFilterSize_p
) {
914 printk(KERN_ERR
"(%s:%3.3d) ERROR: Can't find "
915 "VETH_MCAST_FILTER_SIZE attribute\n",
920 netdev
= alloc_etherdev(sizeof(struct ibmveth_adapter
));
925 SET_MODULE_OWNER(netdev
);
927 adapter
= netdev
->priv
;
928 memset(adapter
, 0, sizeof(adapter
));
929 dev
->dev
.driver_data
= netdev
;
932 adapter
->netdev
= netdev
;
933 adapter
->mcastFilterSize
= *mcastFilterSize_p
;
935 /* Some older boxes running PHYP non-natively have an OF that
936 returns a 8-byte local-mac-address field (and the first
937 2 bytes have to be ignored) while newer boxes' OF return
938 a 6-byte field. Note that IEEE 1275 specifies that
939 local-mac-address must be a 6-byte field.
940 The RPA doc specifies that the first byte must be 10b, so
941 we'll just look for it to solve this 8 vs. 6 byte field issue */
943 if ((*mac_addr_p
& 0x3) != 0x02)
946 adapter
->mac_addr
= 0;
947 memcpy(&adapter
->mac_addr
, mac_addr_p
, 6);
949 adapter
->liobn
= dev
->iommu_table
->it_index
;
951 netdev
->irq
= dev
->irq
;
952 netdev
->open
= ibmveth_open
;
953 netdev
->poll
= ibmveth_poll
;
955 netdev
->stop
= ibmveth_close
;
956 netdev
->hard_start_xmit
= ibmveth_start_xmit
;
957 netdev
->get_stats
= ibmveth_get_stats
;
958 netdev
->set_multicast_list
= ibmveth_set_multicast_list
;
959 netdev
->do_ioctl
= ibmveth_ioctl
;
960 netdev
->ethtool_ops
= &netdev_ethtool_ops
;
961 netdev
->change_mtu
= ibmveth_change_mtu
;
962 SET_NETDEV_DEV(netdev
, &dev
->dev
);
964 memcpy(&netdev
->dev_addr
, &adapter
->mac_addr
, netdev
->addr_len
);
966 ibmveth_init_buffer_pool(&adapter
->rx_buff_pool
[0], 0, IbmVethPool0DftCnt
, IbmVethPool0DftSize
);
967 ibmveth_init_buffer_pool(&adapter
->rx_buff_pool
[1], 1, IbmVethPool1DftCnt
, IbmVethPool1DftSize
);
968 ibmveth_init_buffer_pool(&adapter
->rx_buff_pool
[2], 2, IbmVethPool2DftCnt
, IbmVethPool2DftSize
);
970 ibmveth_debug_printk("adapter @ 0x%p\n", adapter
);
972 INIT_WORK(&adapter
->replenish_task
, (void*)ibmveth_replenish_task
, (void*)adapter
);
974 adapter
->buffer_list_dma
= DMA_ERROR_CODE
;
975 adapter
->filter_list_dma
= DMA_ERROR_CODE
;
976 adapter
->rx_queue
.queue_dma
= DMA_ERROR_CODE
;
978 atomic_set(&adapter
->not_replenishing
, 1);
980 ibmveth_debug_printk("registering netdev...\n");
982 rc
= register_netdev(netdev
);
985 ibmveth_debug_printk("failed to register netdev rc=%d\n", rc
);
990 ibmveth_debug_printk("registered\n");
992 ibmveth_proc_register_adapter(adapter
);
997 static int __devexit
ibmveth_remove(struct vio_dev
*dev
)
999 struct net_device
*netdev
= dev
->dev
.driver_data
;
1000 struct ibmveth_adapter
*adapter
= netdev
->priv
;
1002 unregister_netdev(netdev
);
1004 ibmveth_proc_unregister_adapter(adapter
);
1006 free_netdev(netdev
);
1010 #ifdef CONFIG_PROC_FS
1011 static void ibmveth_proc_register_driver(void)
1013 ibmveth_proc_dir
= create_proc_entry(IBMVETH_PROC_DIR
, S_IFDIR
, proc_net
);
1014 if (ibmveth_proc_dir
) {
1015 SET_MODULE_OWNER(ibmveth_proc_dir
);
1019 static void ibmveth_proc_unregister_driver(void)
1021 remove_proc_entry(IBMVETH_PROC_DIR
, proc_net
);
1024 static void *ibmveth_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1033 static void *ibmveth_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1039 static void ibmveth_seq_stop(struct seq_file
*seq
, void *v
)
1043 static int ibmveth_seq_show(struct seq_file
*seq
, void *v
)
1045 struct ibmveth_adapter
*adapter
= seq
->private;
1046 char *current_mac
= ((char*) &adapter
->netdev
->dev_addr
);
1047 char *firmware_mac
= ((char*) &adapter
->mac_addr
) ;
1049 seq_printf(seq
, "%s %s\n\n", ibmveth_driver_string
, ibmveth_driver_version
);
1051 seq_printf(seq
, "Unit Address: 0x%x\n", adapter
->vdev
->unit_address
);
1052 seq_printf(seq
, "LIOBN: 0x%lx\n", adapter
->liobn
);
1053 seq_printf(seq
, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1054 current_mac
[0], current_mac
[1], current_mac
[2],
1055 current_mac
[3], current_mac
[4], current_mac
[5]);
1056 seq_printf(seq
, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1057 firmware_mac
[0], firmware_mac
[1], firmware_mac
[2],
1058 firmware_mac
[3], firmware_mac
[4], firmware_mac
[5]);
1060 seq_printf(seq
, "\nAdapter Statistics:\n");
1061 seq_printf(seq
, " TX: skbuffs linearized: %ld\n", adapter
->tx_linearized
);
1062 seq_printf(seq
, " multi-descriptor sends: %ld\n", adapter
->tx_multidesc_send
);
1063 seq_printf(seq
, " skb_linearize failures: %ld\n", adapter
->tx_linearize_failed
);
1064 seq_printf(seq
, " vio_map_single failres: %ld\n", adapter
->tx_map_failed
);
1065 seq_printf(seq
, " send failures: %ld\n", adapter
->tx_send_failed
);
1066 seq_printf(seq
, " RX: replenish task cycles: %ld\n", adapter
->replenish_task_cycles
);
1067 seq_printf(seq
, " alloc_skb_failures: %ld\n", adapter
->replenish_no_mem
);
1068 seq_printf(seq
, " add buffer failures: %ld\n", adapter
->replenish_add_buff_failure
);
1069 seq_printf(seq
, " invalid buffers: %ld\n", adapter
->rx_invalid_buffer
);
1070 seq_printf(seq
, " no buffers: %ld\n", adapter
->rx_no_buffer
);
1074 static struct seq_operations ibmveth_seq_ops
= {
1075 .start
= ibmveth_seq_start
,
1076 .next
= ibmveth_seq_next
,
1077 .stop
= ibmveth_seq_stop
,
1078 .show
= ibmveth_seq_show
,
1081 static int ibmveth_proc_open(struct inode
*inode
, struct file
*file
)
1083 struct seq_file
*seq
;
1084 struct proc_dir_entry
*proc
;
1087 rc
= seq_open(file
, &ibmveth_seq_ops
);
1089 /* recover the pointer buried in proc_dir_entry data */
1090 seq
= file
->private_data
;
1092 seq
->private = proc
->data
;
1097 static struct file_operations ibmveth_proc_fops
= {
1098 .owner
= THIS_MODULE
,
1099 .open
= ibmveth_proc_open
,
1101 .llseek
= seq_lseek
,
1102 .release
= seq_release
,
1105 static void ibmveth_proc_register_adapter(struct ibmveth_adapter
*adapter
)
1107 struct proc_dir_entry
*entry
;
1108 if (ibmveth_proc_dir
) {
1109 entry
= create_proc_entry(adapter
->netdev
->name
, S_IFREG
, ibmveth_proc_dir
);
1111 ibmveth_error_printk("Cannot create adapter proc entry");
1113 entry
->data
= (void *) adapter
;
1114 entry
->proc_fops
= &ibmveth_proc_fops
;
1115 SET_MODULE_OWNER(entry
);
1121 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter
*adapter
)
1123 if (ibmveth_proc_dir
) {
1124 remove_proc_entry(adapter
->netdev
->name
, ibmveth_proc_dir
);
1128 #else /* CONFIG_PROC_FS */
1129 static void ibmveth_proc_register_adapter(struct ibmveth_adapter
*adapter
)
1133 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter
*adapter
)
1136 static void ibmveth_proc_register_driver(void)
1140 static void ibmveth_proc_unregister_driver(void)
1143 #endif /* CONFIG_PROC_FS */
1145 static struct vio_device_id ibmveth_device_table
[] __devinitdata
= {
1146 { "network", "IBM,l-lan"},
1150 MODULE_DEVICE_TABLE(vio
, ibmveth_device_table
);
1152 static struct vio_driver ibmveth_driver
= {
1153 .name
= (char *)ibmveth_driver_name
,
1154 .id_table
= ibmveth_device_table
,
1155 .probe
= ibmveth_probe
,
1156 .remove
= ibmveth_remove
1159 static int __init
ibmveth_module_init(void)
1161 ibmveth_printk("%s: %s %s\n", ibmveth_driver_name
, ibmveth_driver_string
, ibmveth_driver_version
);
1163 ibmveth_proc_register_driver();
1165 return vio_register_driver(&ibmveth_driver
);
1168 static void __exit
ibmveth_module_exit(void)
1170 vio_unregister_driver(&ibmveth_driver
);
1171 ibmveth_proc_unregister_driver();
1174 module_init(ibmveth_module_init
);
1175 module_exit(ibmveth_module_exit
);