cxgb3 - SGE doorbell overflow warning
[linux-2.6/sactl.git] / drivers / net / ibmveth.c
blob2dff9f2800cdea7aa266c89fe5517364533c46e8
1 /**************************************************************************/
2 /* */
3 /* IBM eServer i/pSeries Virtual Ethernet Device Driver */
4 /* Copyright (C) 2003 IBM Corp. */
5 /* Originally written by Dave Larson (larson1@us.ibm.com) */
6 /* Maintained by Santiago Leon (santil@us.ibm.com) */
7 /* */
8 /* This program is free software; you can redistribute it and/or modify */
9 /* it under the terms of the GNU General Public License as published by */
10 /* the Free Software Foundation; either version 2 of the License, or */
11 /* (at your option) any later version. */
12 /* */
13 /* This program is distributed in the hope that it will be useful, */
14 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
15 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
16 /* GNU General Public License for more details. */
17 /* */
18 /* You should have received a copy of the GNU General Public License */
19 /* along with this program; if not, write to the Free Software */
20 /* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
21 /* USA */
22 /* */
23 /* This module contains the implementation of a virtual ethernet device */
24 /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
25 /* option of the RS/6000 Platform Architechture to interface with virtual */
26 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* */
28 /**************************************************************************/
30 TODO:
31 - add support for sysfs
32 - possibly remove procfs support
35 #include <linux/module.h>
36 #include <linux/types.h>
37 #include <linux/errno.h>
38 #include <linux/ioport.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/kernel.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/skbuff.h>
44 #include <linux/init.h>
45 #include <linux/delay.h>
46 #include <linux/mm.h>
47 #include <linux/ethtool.h>
48 #include <linux/proc_fs.h>
49 #include <linux/in.h>
50 #include <linux/ip.h>
51 #include <net/net_namespace.h>
52 #include <asm/semaphore.h>
53 #include <asm/hvcall.h>
54 #include <asm/atomic.h>
55 #include <asm/vio.h>
56 #include <asm/uaccess.h>
57 #include <linux/seq_file.h>
59 #include "ibmveth.h"
61 #undef DEBUG
63 #define ibmveth_printk(fmt, args...) \
64 printk(KERN_DEBUG "%s: " fmt, __FILE__, ## args)
66 #define ibmveth_error_printk(fmt, args...) \
67 printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
69 #ifdef DEBUG
70 #define ibmveth_debug_printk_no_adapter(fmt, args...) \
71 printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args)
72 #define ibmveth_debug_printk(fmt, args...) \
73 printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
74 #define ibmveth_assert(expr) \
75 if(!(expr)) { \
76 printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \
77 BUG(); \
79 #else
80 #define ibmveth_debug_printk_no_adapter(fmt, args...)
81 #define ibmveth_debug_printk(fmt, args...)
82 #define ibmveth_assert(expr)
83 #endif
85 static int ibmveth_open(struct net_device *dev);
86 static int ibmveth_close(struct net_device *dev);
87 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
88 static int ibmveth_poll(struct napi_struct *napi, int budget);
89 static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev);
90 static struct net_device_stats *ibmveth_get_stats(struct net_device *dev);
91 static void ibmveth_set_multicast_list(struct net_device *dev);
92 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu);
93 static void ibmveth_proc_register_driver(void);
94 static void ibmveth_proc_unregister_driver(void);
95 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
96 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
97 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
98 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
99 static struct kobj_type ktype_veth_pool;
101 #ifdef CONFIG_PROC_FS
102 #define IBMVETH_PROC_DIR "ibmveth"
103 static struct proc_dir_entry *ibmveth_proc_dir;
104 #endif
106 static const char ibmveth_driver_name[] = "ibmveth";
107 static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver";
108 #define ibmveth_driver_version "1.03"
110 MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>");
111 MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver");
112 MODULE_LICENSE("GPL");
113 MODULE_VERSION(ibmveth_driver_version);
115 struct ibmveth_stat {
116 char name[ETH_GSTRING_LEN];
117 int offset;
120 #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
121 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
123 struct ibmveth_stat ibmveth_stats[] = {
124 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
125 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
126 { "replenish_add_buff_failure", IBMVETH_STAT_OFF(replenish_add_buff_failure) },
127 { "replenish_add_buff_success", IBMVETH_STAT_OFF(replenish_add_buff_success) },
128 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
129 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
130 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
131 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
134 /* simple methods of getting data from the current rxq entry */
135 static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
137 return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
140 static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
142 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> IBMVETH_RXQ_TOGGLE_SHIFT;
145 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
147 return (ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle);
150 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
152 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID);
155 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
157 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK);
160 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
162 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
165 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
167 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD);
170 /* setup the initial settings for a buffer pool */
171 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active)
173 pool->size = pool_size;
174 pool->index = pool_index;
175 pool->buff_size = buff_size;
176 pool->threshold = pool_size / 2;
177 pool->active = pool_active;
180 /* allocate and setup an buffer pool - called during open */
181 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
183 int i;
185 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
187 if(!pool->free_map) {
188 return -1;
191 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
192 if(!pool->dma_addr) {
193 kfree(pool->free_map);
194 pool->free_map = NULL;
195 return -1;
198 pool->skbuff = kmalloc(sizeof(void*) * pool->size, GFP_KERNEL);
200 if(!pool->skbuff) {
201 kfree(pool->dma_addr);
202 pool->dma_addr = NULL;
204 kfree(pool->free_map);
205 pool->free_map = NULL;
206 return -1;
209 memset(pool->skbuff, 0, sizeof(void*) * pool->size);
210 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
212 for(i = 0; i < pool->size; ++i) {
213 pool->free_map[i] = i;
216 atomic_set(&pool->available, 0);
217 pool->producer_index = 0;
218 pool->consumer_index = 0;
220 return 0;
223 /* replenish the buffers for a pool. note that we don't need to
224 * skb_reserve these since they are used for incoming...
226 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
228 u32 i;
229 u32 count = pool->size - atomic_read(&pool->available);
230 u32 buffers_added = 0;
232 mb();
234 for(i = 0; i < count; ++i) {
235 struct sk_buff *skb;
236 unsigned int free_index, index;
237 u64 correlator;
238 union ibmveth_buf_desc desc;
239 unsigned long lpar_rc;
240 dma_addr_t dma_addr;
242 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
244 if(!skb) {
245 ibmveth_debug_printk("replenish: unable to allocate skb\n");
246 adapter->replenish_no_mem++;
247 break;
250 free_index = pool->consumer_index;
251 pool->consumer_index = (pool->consumer_index + 1) % pool->size;
252 index = pool->free_map[free_index];
254 ibmveth_assert(index != IBM_VETH_INVALID_MAP);
255 ibmveth_assert(pool->skbuff[index] == NULL);
257 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
258 pool->buff_size, DMA_FROM_DEVICE);
260 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
261 pool->dma_addr[index] = dma_addr;
262 pool->skbuff[index] = skb;
264 correlator = ((u64)pool->index << 32) | index;
265 *(u64*)skb->data = correlator;
267 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
268 desc.fields.address = dma_addr;
270 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
272 if(lpar_rc != H_SUCCESS) {
273 pool->free_map[free_index] = index;
274 pool->skbuff[index] = NULL;
275 if (pool->consumer_index == 0)
276 pool->consumer_index = pool->size - 1;
277 else
278 pool->consumer_index--;
279 dma_unmap_single(&adapter->vdev->dev,
280 pool->dma_addr[index], pool->buff_size,
281 DMA_FROM_DEVICE);
282 dev_kfree_skb_any(skb);
283 adapter->replenish_add_buff_failure++;
284 break;
285 } else {
286 buffers_added++;
287 adapter->replenish_add_buff_success++;
291 mb();
292 atomic_add(buffers_added, &(pool->available));
295 /* replenish routine */
296 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
298 int i;
300 adapter->replenish_task_cycles++;
302 for(i = 0; i < IbmVethNumBufferPools; i++)
303 if(adapter->rx_buff_pool[i].active)
304 ibmveth_replenish_buffer_pool(adapter,
305 &adapter->rx_buff_pool[i]);
307 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
310 /* empty and free ana buffer pool - also used to do cleanup in error paths */
311 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
313 int i;
315 kfree(pool->free_map);
316 pool->free_map = NULL;
318 if(pool->skbuff && pool->dma_addr) {
319 for(i = 0; i < pool->size; ++i) {
320 struct sk_buff *skb = pool->skbuff[i];
321 if(skb) {
322 dma_unmap_single(&adapter->vdev->dev,
323 pool->dma_addr[i],
324 pool->buff_size,
325 DMA_FROM_DEVICE);
326 dev_kfree_skb_any(skb);
327 pool->skbuff[i] = NULL;
332 if(pool->dma_addr) {
333 kfree(pool->dma_addr);
334 pool->dma_addr = NULL;
337 if(pool->skbuff) {
338 kfree(pool->skbuff);
339 pool->skbuff = NULL;
343 /* remove a buffer from a pool */
344 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator)
346 unsigned int pool = correlator >> 32;
347 unsigned int index = correlator & 0xffffffffUL;
348 unsigned int free_index;
349 struct sk_buff *skb;
351 ibmveth_assert(pool < IbmVethNumBufferPools);
352 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
354 skb = adapter->rx_buff_pool[pool].skbuff[index];
356 ibmveth_assert(skb != NULL);
358 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
360 dma_unmap_single(&adapter->vdev->dev,
361 adapter->rx_buff_pool[pool].dma_addr[index],
362 adapter->rx_buff_pool[pool].buff_size,
363 DMA_FROM_DEVICE);
365 free_index = adapter->rx_buff_pool[pool].producer_index;
366 adapter->rx_buff_pool[pool].producer_index
367 = (adapter->rx_buff_pool[pool].producer_index + 1)
368 % adapter->rx_buff_pool[pool].size;
369 adapter->rx_buff_pool[pool].free_map[free_index] = index;
371 mb();
373 atomic_dec(&(adapter->rx_buff_pool[pool].available));
376 /* get the current buffer on the rx queue */
377 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
379 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
380 unsigned int pool = correlator >> 32;
381 unsigned int index = correlator & 0xffffffffUL;
383 ibmveth_assert(pool < IbmVethNumBufferPools);
384 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
386 return adapter->rx_buff_pool[pool].skbuff[index];
389 /* recycle the current buffer on the rx queue */
390 static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
392 u32 q_index = adapter->rx_queue.index;
393 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
394 unsigned int pool = correlator >> 32;
395 unsigned int index = correlator & 0xffffffffUL;
396 union ibmveth_buf_desc desc;
397 unsigned long lpar_rc;
399 ibmveth_assert(pool < IbmVethNumBufferPools);
400 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
402 if(!adapter->rx_buff_pool[pool].active) {
403 ibmveth_rxq_harvest_buffer(adapter);
404 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
405 return;
408 desc.fields.flags_len = IBMVETH_BUF_VALID |
409 adapter->rx_buff_pool[pool].buff_size;
410 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
412 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
414 if(lpar_rc != H_SUCCESS) {
415 ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
416 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
419 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
420 adapter->rx_queue.index = 0;
421 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
425 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
427 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
429 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
430 adapter->rx_queue.index = 0;
431 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
435 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
437 int i;
439 if(adapter->buffer_list_addr != NULL) {
440 if(!dma_mapping_error(adapter->buffer_list_dma)) {
441 dma_unmap_single(&adapter->vdev->dev,
442 adapter->buffer_list_dma, 4096,
443 DMA_BIDIRECTIONAL);
444 adapter->buffer_list_dma = DMA_ERROR_CODE;
446 free_page((unsigned long)adapter->buffer_list_addr);
447 adapter->buffer_list_addr = NULL;
450 if(adapter->filter_list_addr != NULL) {
451 if(!dma_mapping_error(adapter->filter_list_dma)) {
452 dma_unmap_single(&adapter->vdev->dev,
453 adapter->filter_list_dma, 4096,
454 DMA_BIDIRECTIONAL);
455 adapter->filter_list_dma = DMA_ERROR_CODE;
457 free_page((unsigned long)adapter->filter_list_addr);
458 adapter->filter_list_addr = NULL;
461 if(adapter->rx_queue.queue_addr != NULL) {
462 if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
463 dma_unmap_single(&adapter->vdev->dev,
464 adapter->rx_queue.queue_dma,
465 adapter->rx_queue.queue_len,
466 DMA_BIDIRECTIONAL);
467 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
469 kfree(adapter->rx_queue.queue_addr);
470 adapter->rx_queue.queue_addr = NULL;
473 for(i = 0; i<IbmVethNumBufferPools; i++)
474 if (adapter->rx_buff_pool[i].active)
475 ibmveth_free_buffer_pool(adapter,
476 &adapter->rx_buff_pool[i]);
479 static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
480 union ibmveth_buf_desc rxq_desc, u64 mac_address)
482 int rc, try_again = 1;
484 /* After a kexec the adapter will still be open, so our attempt to
485 * open it will fail. So if we get a failure we free the adapter and
486 * try again, but only once. */
487 retry:
488 rc = h_register_logical_lan(adapter->vdev->unit_address,
489 adapter->buffer_list_dma, rxq_desc.desc,
490 adapter->filter_list_dma, mac_address);
492 if (rc != H_SUCCESS && try_again) {
493 do {
494 rc = h_free_logical_lan(adapter->vdev->unit_address);
495 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
497 try_again = 0;
498 goto retry;
501 return rc;
504 static int ibmveth_open(struct net_device *netdev)
506 struct ibmveth_adapter *adapter = netdev->priv;
507 u64 mac_address = 0;
508 int rxq_entries = 1;
509 unsigned long lpar_rc;
510 int rc;
511 union ibmveth_buf_desc rxq_desc;
512 int i;
514 ibmveth_debug_printk("open starting\n");
516 napi_enable(&adapter->napi);
518 for(i = 0; i<IbmVethNumBufferPools; i++)
519 rxq_entries += adapter->rx_buff_pool[i].size;
521 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
522 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
524 if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
525 ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
526 ibmveth_cleanup(adapter);
527 napi_disable(&adapter->napi);
528 return -ENOMEM;
531 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries;
532 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL);
534 if(!adapter->rx_queue.queue_addr) {
535 ibmveth_error_printk("unable to allocate rx queue pages\n");
536 ibmveth_cleanup(adapter);
537 napi_disable(&adapter->napi);
538 return -ENOMEM;
541 adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev,
542 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
543 adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev,
544 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
545 adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev,
546 adapter->rx_queue.queue_addr,
547 adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
549 if((dma_mapping_error(adapter->buffer_list_dma) ) ||
550 (dma_mapping_error(adapter->filter_list_dma)) ||
551 (dma_mapping_error(adapter->rx_queue.queue_dma))) {
552 ibmveth_error_printk("unable to map filter or buffer list pages\n");
553 ibmveth_cleanup(adapter);
554 napi_disable(&adapter->napi);
555 return -ENOMEM;
558 adapter->rx_queue.index = 0;
559 adapter->rx_queue.num_slots = rxq_entries;
560 adapter->rx_queue.toggle = 1;
562 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
563 mac_address = mac_address >> 16;
565 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len;
566 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
568 ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr);
569 ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
570 ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
572 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
574 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
576 if(lpar_rc != H_SUCCESS) {
577 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
578 ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
579 adapter->buffer_list_dma,
580 adapter->filter_list_dma,
581 rxq_desc.desc,
582 mac_address);
583 ibmveth_cleanup(adapter);
584 napi_disable(&adapter->napi);
585 return -ENONET;
588 for(i = 0; i<IbmVethNumBufferPools; i++) {
589 if(!adapter->rx_buff_pool[i].active)
590 continue;
591 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
592 ibmveth_error_printk("unable to alloc pool\n");
593 adapter->rx_buff_pool[i].active = 0;
594 ibmveth_cleanup(adapter);
595 napi_disable(&adapter->napi);
596 return -ENOMEM ;
600 ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
601 if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
602 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
603 do {
604 rc = h_free_logical_lan(adapter->vdev->unit_address);
605 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
607 ibmveth_cleanup(adapter);
608 napi_disable(&adapter->napi);
609 return rc;
612 ibmveth_debug_printk("initial replenish cycle\n");
613 ibmveth_interrupt(netdev->irq, netdev);
615 netif_start_queue(netdev);
617 ibmveth_debug_printk("open complete\n");
619 return 0;
622 static int ibmveth_close(struct net_device *netdev)
624 struct ibmveth_adapter *adapter = netdev->priv;
625 long lpar_rc;
627 ibmveth_debug_printk("close starting\n");
629 napi_disable(&adapter->napi);
631 if (!adapter->pool_config)
632 netif_stop_queue(netdev);
634 free_irq(netdev->irq, netdev);
636 do {
637 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
638 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
640 if(lpar_rc != H_SUCCESS)
642 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
643 lpar_rc);
646 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
648 ibmveth_cleanup(adapter);
650 ibmveth_debug_printk("close complete\n");
652 return 0;
655 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
656 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
657 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE);
658 cmd->speed = SPEED_1000;
659 cmd->duplex = DUPLEX_FULL;
660 cmd->port = PORT_FIBRE;
661 cmd->phy_address = 0;
662 cmd->transceiver = XCVR_INTERNAL;
663 cmd->autoneg = AUTONEG_ENABLE;
664 cmd->maxtxpkt = 0;
665 cmd->maxrxpkt = 1;
666 return 0;
669 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) {
670 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
671 strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1);
674 static u32 netdev_get_link(struct net_device *dev) {
675 return 1;
678 static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
680 struct ibmveth_adapter *adapter = dev->priv;
682 if (data)
683 adapter->rx_csum = 1;
684 else {
686 * Since the ibmveth firmware interface does not have the concept of
687 * separate tx/rx checksum offload enable, if rx checksum is disabled
688 * we also have to disable tx checksum offload. Once we disable rx
689 * checksum offload, we are no longer allowed to send tx buffers that
690 * are not properly checksummed.
692 adapter->rx_csum = 0;
693 dev->features &= ~NETIF_F_IP_CSUM;
697 static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data)
699 struct ibmveth_adapter *adapter = dev->priv;
701 if (data) {
702 dev->features |= NETIF_F_IP_CSUM;
703 adapter->rx_csum = 1;
704 } else
705 dev->features &= ~NETIF_F_IP_CSUM;
708 static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
709 void (*done) (struct net_device *, u32))
711 struct ibmveth_adapter *adapter = dev->priv;
712 u64 set_attr, clr_attr, ret_attr;
713 long ret;
714 int rc1 = 0, rc2 = 0;
715 int restart = 0;
717 if (netif_running(dev)) {
718 restart = 1;
719 adapter->pool_config = 1;
720 ibmveth_close(dev);
721 adapter->pool_config = 0;
724 set_attr = 0;
725 clr_attr = 0;
727 if (data)
728 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
729 else
730 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
732 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
734 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
735 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
736 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
737 ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
738 set_attr, &ret_attr);
740 if (ret != H_SUCCESS) {
741 rc1 = -EIO;
742 ibmveth_error_printk("unable to change checksum offload settings."
743 " %d rc=%ld\n", data, ret);
745 ret = h_illan_attributes(adapter->vdev->unit_address,
746 set_attr, clr_attr, &ret_attr);
747 } else
748 done(dev, data);
749 } else {
750 rc1 = -EIO;
751 ibmveth_error_printk("unable to change checksum offload settings."
752 " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr);
755 if (restart)
756 rc2 = ibmveth_open(dev);
758 return rc1 ? rc1 : rc2;
761 static int ibmveth_set_rx_csum(struct net_device *dev, u32 data)
763 struct ibmveth_adapter *adapter = dev->priv;
765 if ((data && adapter->rx_csum) || (!data && !adapter->rx_csum))
766 return 0;
768 return ibmveth_set_csum_offload(dev, data, ibmveth_set_rx_csum_flags);
771 static int ibmveth_set_tx_csum(struct net_device *dev, u32 data)
773 struct ibmveth_adapter *adapter = dev->priv;
774 int rc = 0;
776 if (data && (dev->features & NETIF_F_IP_CSUM))
777 return 0;
778 if (!data && !(dev->features & NETIF_F_IP_CSUM))
779 return 0;
781 if (data && !adapter->rx_csum)
782 rc = ibmveth_set_csum_offload(dev, data, ibmveth_set_tx_csum_flags);
783 else
784 ibmveth_set_tx_csum_flags(dev, data);
786 return rc;
789 static u32 ibmveth_get_rx_csum(struct net_device *dev)
791 struct ibmveth_adapter *adapter = dev->priv;
792 return adapter->rx_csum;
795 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
797 int i;
799 if (stringset != ETH_SS_STATS)
800 return;
802 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
803 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
806 static int ibmveth_get_stats_count(struct net_device *dev)
808 return ARRAY_SIZE(ibmveth_stats);
811 static void ibmveth_get_ethtool_stats(struct net_device *dev,
812 struct ethtool_stats *stats, u64 *data)
814 int i;
815 struct ibmveth_adapter *adapter = dev->priv;
817 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
818 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
821 static const struct ethtool_ops netdev_ethtool_ops = {
822 .get_drvinfo = netdev_get_drvinfo,
823 .get_settings = netdev_get_settings,
824 .get_link = netdev_get_link,
825 .get_sg = ethtool_op_get_sg,
826 .get_tx_csum = ethtool_op_get_tx_csum,
827 .set_tx_csum = ibmveth_set_tx_csum,
828 .get_rx_csum = ibmveth_get_rx_csum,
829 .set_rx_csum = ibmveth_set_rx_csum,
830 .get_tso = ethtool_op_get_tso,
831 .get_ufo = ethtool_op_get_ufo,
832 .get_strings = ibmveth_get_strings,
833 .get_stats_count = ibmveth_get_stats_count,
834 .get_ethtool_stats = ibmveth_get_ethtool_stats,
837 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
839 return -EOPNOTSUPP;
842 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
844 static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
846 struct ibmveth_adapter *adapter = netdev->priv;
847 union ibmveth_buf_desc desc;
848 unsigned long lpar_rc;
849 unsigned long correlator;
850 unsigned long flags;
851 unsigned int retry_count;
852 unsigned int tx_dropped = 0;
853 unsigned int tx_bytes = 0;
854 unsigned int tx_packets = 0;
855 unsigned int tx_send_failed = 0;
856 unsigned int tx_map_failed = 0;
858 desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len;
859 desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
860 skb->len, DMA_TO_DEVICE);
862 if (skb->ip_summed == CHECKSUM_PARTIAL &&
863 ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) {
864 ibmveth_error_printk("tx: failed to checksum packet\n");
865 tx_dropped++;
866 goto out;
869 if (skb->ip_summed == CHECKSUM_PARTIAL) {
870 unsigned char *buf = skb_transport_header(skb) + skb->csum_offset;
872 desc.fields.flags_len |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
874 /* Need to zero out the checksum */
875 buf[0] = 0;
876 buf[1] = 0;
879 if (dma_mapping_error(desc.fields.address)) {
880 ibmveth_error_printk("tx: unable to map xmit buffer\n");
881 tx_map_failed++;
882 tx_dropped++;
883 goto out;
886 /* send the frame. Arbitrarily set retrycount to 1024 */
887 correlator = 0;
888 retry_count = 1024;
889 do {
890 lpar_rc = h_send_logical_lan(adapter->vdev->unit_address,
891 desc.desc, 0, 0, 0, 0, 0,
892 correlator, &correlator);
893 } while ((lpar_rc == H_BUSY) && (retry_count--));
895 if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
896 ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
897 ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n",
898 (desc.fields.flags_len & IBMVETH_BUF_VALID) ? 1 : 0,
899 skb->len, desc.fields.address);
900 tx_send_failed++;
901 tx_dropped++;
902 } else {
903 tx_packets++;
904 tx_bytes += skb->len;
905 netdev->trans_start = jiffies;
908 dma_unmap_single(&adapter->vdev->dev, desc.fields.address,
909 skb->len, DMA_TO_DEVICE);
911 out: spin_lock_irqsave(&adapter->stats_lock, flags);
912 adapter->stats.tx_dropped += tx_dropped;
913 adapter->stats.tx_bytes += tx_bytes;
914 adapter->stats.tx_packets += tx_packets;
915 adapter->tx_send_failed += tx_send_failed;
916 adapter->tx_map_failed += tx_map_failed;
917 spin_unlock_irqrestore(&adapter->stats_lock, flags);
919 dev_kfree_skb(skb);
920 return 0;
923 static int ibmveth_poll(struct napi_struct *napi, int budget)
925 struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi);
926 struct net_device *netdev = adapter->netdev;
927 int frames_processed = 0;
928 unsigned long lpar_rc;
930 restart_poll:
931 do {
932 struct sk_buff *skb;
934 if (!ibmveth_rxq_pending_buffer(adapter))
935 break;
937 rmb();
938 if (!ibmveth_rxq_buffer_valid(adapter)) {
939 wmb(); /* suggested by larson1 */
940 adapter->rx_invalid_buffer++;
941 ibmveth_debug_printk("recycling invalid buffer\n");
942 ibmveth_rxq_recycle_buffer(adapter);
943 } else {
944 int length = ibmveth_rxq_frame_length(adapter);
945 int offset = ibmveth_rxq_frame_offset(adapter);
946 int csum_good = ibmveth_rxq_csum_good(adapter);
948 skb = ibmveth_rxq_get_buffer(adapter);
949 if (csum_good)
950 skb->ip_summed = CHECKSUM_UNNECESSARY;
952 ibmveth_rxq_harvest_buffer(adapter);
954 skb_reserve(skb, offset);
955 skb_put(skb, length);
956 skb->protocol = eth_type_trans(skb, netdev);
958 netif_receive_skb(skb); /* send it up */
960 adapter->stats.rx_packets++;
961 adapter->stats.rx_bytes += length;
962 frames_processed++;
963 netdev->last_rx = jiffies;
965 } while (frames_processed < budget);
967 ibmveth_replenish_task(adapter);
969 if (frames_processed < budget) {
970 /* We think we are done - reenable interrupts,
971 * then check once more to make sure we are done.
973 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
974 VIO_IRQ_ENABLE);
976 ibmveth_assert(lpar_rc == H_SUCCESS);
978 netif_rx_complete(netdev, napi);
980 if (ibmveth_rxq_pending_buffer(adapter) &&
981 netif_rx_reschedule(netdev, napi)) {
982 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
983 VIO_IRQ_DISABLE);
984 goto restart_poll;
988 return frames_processed;
991 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
993 struct net_device *netdev = dev_instance;
994 struct ibmveth_adapter *adapter = netdev->priv;
995 unsigned long lpar_rc;
997 if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
998 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
999 VIO_IRQ_DISABLE);
1000 ibmveth_assert(lpar_rc == H_SUCCESS);
1001 __netif_rx_schedule(netdev, &adapter->napi);
1003 return IRQ_HANDLED;
1006 static struct net_device_stats *ibmveth_get_stats(struct net_device *dev)
1008 struct ibmveth_adapter *adapter = dev->priv;
1009 return &adapter->stats;
1012 static void ibmveth_set_multicast_list(struct net_device *netdev)
1014 struct ibmveth_adapter *adapter = netdev->priv;
1015 unsigned long lpar_rc;
1017 if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) {
1018 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1019 IbmVethMcastEnableRecv |
1020 IbmVethMcastDisableFiltering,
1022 if(lpar_rc != H_SUCCESS) {
1023 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
1025 } else {
1026 struct dev_mc_list *mclist = netdev->mc_list;
1027 int i;
1028 /* clear the filter table & disable filtering */
1029 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1030 IbmVethMcastEnableRecv |
1031 IbmVethMcastDisableFiltering |
1032 IbmVethMcastClearFilterTable,
1034 if(lpar_rc != H_SUCCESS) {
1035 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
1037 /* add the addresses to the filter table */
1038 for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) {
1039 // add the multicast address to the filter table
1040 unsigned long mcast_addr = 0;
1041 memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
1042 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1043 IbmVethMcastAddFilter,
1044 mcast_addr);
1045 if(lpar_rc != H_SUCCESS) {
1046 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
1050 /* re-enable filtering */
1051 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1052 IbmVethMcastEnableFiltering,
1054 if(lpar_rc != H_SUCCESS) {
1055 ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc);
1060 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1062 struct ibmveth_adapter *adapter = dev->priv;
1063 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1064 int reinit = 0;
1065 int i, rc;
1067 if (new_mtu < IBMVETH_MAX_MTU)
1068 return -EINVAL;
1070 for (i = 0; i < IbmVethNumBufferPools; i++)
1071 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
1072 break;
1074 if (i == IbmVethNumBufferPools)
1075 return -EINVAL;
1077 /* Look for an active buffer pool that can hold the new MTU */
1078 for(i = 0; i<IbmVethNumBufferPools; i++) {
1079 if (!adapter->rx_buff_pool[i].active) {
1080 adapter->rx_buff_pool[i].active = 1;
1081 reinit = 1;
1084 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
1085 if (reinit && netif_running(adapter->netdev)) {
1086 adapter->pool_config = 1;
1087 ibmveth_close(adapter->netdev);
1088 adapter->pool_config = 0;
1089 dev->mtu = new_mtu;
1090 if ((rc = ibmveth_open(adapter->netdev)))
1091 return rc;
1092 } else
1093 dev->mtu = new_mtu;
1094 return 0;
1097 return -EINVAL;
1100 #ifdef CONFIG_NET_POLL_CONTROLLER
1101 static void ibmveth_poll_controller(struct net_device *dev)
1103 ibmveth_replenish_task(dev->priv);
1104 ibmveth_interrupt(dev->irq, dev);
1106 #endif
1108 static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1110 int rc, i;
1111 long ret;
1112 struct net_device *netdev;
1113 struct ibmveth_adapter *adapter;
1114 u64 set_attr, ret_attr;
1116 unsigned char *mac_addr_p;
1117 unsigned int *mcastFilterSize_p;
1120 ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
1121 dev->unit_address);
1123 mac_addr_p = (unsigned char *) vio_get_attribute(dev,
1124 VETH_MAC_ADDR, NULL);
1125 if(!mac_addr_p) {
1126 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
1127 "attribute\n", __FILE__, __LINE__);
1128 return 0;
1131 mcastFilterSize_p = (unsigned int *) vio_get_attribute(dev,
1132 VETH_MCAST_FILTER_SIZE, NULL);
1133 if(!mcastFilterSize_p) {
1134 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find "
1135 "VETH_MCAST_FILTER_SIZE attribute\n",
1136 __FILE__, __LINE__);
1137 return 0;
1140 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1142 if(!netdev)
1143 return -ENOMEM;
1145 SET_MODULE_OWNER(netdev);
1147 adapter = netdev->priv;
1148 dev->dev.driver_data = netdev;
1150 adapter->vdev = dev;
1151 adapter->netdev = netdev;
1152 adapter->mcastFilterSize= *mcastFilterSize_p;
1153 adapter->pool_config = 0;
1155 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1157 /* Some older boxes running PHYP non-natively have an OF that
1158 returns a 8-byte local-mac-address field (and the first
1159 2 bytes have to be ignored) while newer boxes' OF return
1160 a 6-byte field. Note that IEEE 1275 specifies that
1161 local-mac-address must be a 6-byte field.
1162 The RPA doc specifies that the first byte must be 10b, so
1163 we'll just look for it to solve this 8 vs. 6 byte field issue */
1165 if ((*mac_addr_p & 0x3) != 0x02)
1166 mac_addr_p += 2;
1168 adapter->mac_addr = 0;
1169 memcpy(&adapter->mac_addr, mac_addr_p, 6);
1171 netdev->irq = dev->irq;
1172 netdev->open = ibmveth_open;
1173 netdev->stop = ibmveth_close;
1174 netdev->hard_start_xmit = ibmveth_start_xmit;
1175 netdev->get_stats = ibmveth_get_stats;
1176 netdev->set_multicast_list = ibmveth_set_multicast_list;
1177 netdev->do_ioctl = ibmveth_ioctl;
1178 netdev->ethtool_ops = &netdev_ethtool_ops;
1179 netdev->change_mtu = ibmveth_change_mtu;
1180 SET_NETDEV_DEV(netdev, &dev->dev);
1181 #ifdef CONFIG_NET_POLL_CONTROLLER
1182 netdev->poll_controller = ibmveth_poll_controller;
1183 #endif
1184 netdev->features |= NETIF_F_LLTX;
1185 spin_lock_init(&adapter->stats_lock);
1187 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
1189 for(i = 0; i<IbmVethNumBufferPools; i++) {
1190 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1191 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1192 pool_count[i], pool_size[i],
1193 pool_active[i]);
1194 kobj->parent = &dev->dev.kobj;
1195 sprintf(kobj->name, "pool%d", i);
1196 kobj->ktype = &ktype_veth_pool;
1197 kobject_register(kobj);
1200 ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
1202 adapter->buffer_list_dma = DMA_ERROR_CODE;
1203 adapter->filter_list_dma = DMA_ERROR_CODE;
1204 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1206 ibmveth_debug_printk("registering netdev...\n");
1208 ret = h_illan_attributes(dev->unit_address, 0, 0, &ret_attr);
1210 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
1211 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
1212 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
1213 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
1215 ret = h_illan_attributes(dev->unit_address, 0, set_attr, &ret_attr);
1217 if (ret == H_SUCCESS) {
1218 adapter->rx_csum = 1;
1219 netdev->features |= NETIF_F_IP_CSUM;
1220 } else
1221 ret = h_illan_attributes(dev->unit_address, set_attr, 0, &ret_attr);
1224 rc = register_netdev(netdev);
1226 if(rc) {
1227 ibmveth_debug_printk("failed to register netdev rc=%d\n", rc);
1228 free_netdev(netdev);
1229 return rc;
1232 ibmveth_debug_printk("registered\n");
1234 ibmveth_proc_register_adapter(adapter);
1236 return 0;
1239 static int __devexit ibmveth_remove(struct vio_dev *dev)
1241 struct net_device *netdev = dev->dev.driver_data;
1242 struct ibmveth_adapter *adapter = netdev->priv;
1243 int i;
1245 for(i = 0; i<IbmVethNumBufferPools; i++)
1246 kobject_unregister(&adapter->rx_buff_pool[i].kobj);
1248 unregister_netdev(netdev);
1250 ibmveth_proc_unregister_adapter(adapter);
1252 free_netdev(netdev);
1253 return 0;
1256 #ifdef CONFIG_PROC_FS
1257 static void ibmveth_proc_register_driver(void)
1259 ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, init_net.proc_net);
1260 if (ibmveth_proc_dir) {
1261 SET_MODULE_OWNER(ibmveth_proc_dir);
1265 static void ibmveth_proc_unregister_driver(void)
1267 remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net);
1270 static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
1272 if (*pos == 0) {
1273 return (void *)1;
1274 } else {
1275 return NULL;
1279 static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1281 ++*pos;
1282 return NULL;
1285 static void ibmveth_seq_stop(struct seq_file *seq, void *v)
1289 static int ibmveth_seq_show(struct seq_file *seq, void *v)
1291 struct ibmveth_adapter *adapter = seq->private;
1292 char *current_mac = ((char*) &adapter->netdev->dev_addr);
1293 char *firmware_mac = ((char*) &adapter->mac_addr) ;
1295 seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
1297 seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
1298 seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1299 current_mac[0], current_mac[1], current_mac[2],
1300 current_mac[3], current_mac[4], current_mac[5]);
1301 seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1302 firmware_mac[0], firmware_mac[1], firmware_mac[2],
1303 firmware_mac[3], firmware_mac[4], firmware_mac[5]);
1305 seq_printf(seq, "\nAdapter Statistics:\n");
1306 seq_printf(seq, " TX: vio_map_single failres: %ld\n", adapter->tx_map_failed);
1307 seq_printf(seq, " send failures: %ld\n", adapter->tx_send_failed);
1308 seq_printf(seq, " RX: replenish task cycles: %ld\n", adapter->replenish_task_cycles);
1309 seq_printf(seq, " alloc_skb_failures: %ld\n", adapter->replenish_no_mem);
1310 seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure);
1311 seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer);
1312 seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer);
1314 return 0;
1316 static struct seq_operations ibmveth_seq_ops = {
1317 .start = ibmveth_seq_start,
1318 .next = ibmveth_seq_next,
1319 .stop = ibmveth_seq_stop,
1320 .show = ibmveth_seq_show,
1323 static int ibmveth_proc_open(struct inode *inode, struct file *file)
1325 struct seq_file *seq;
1326 struct proc_dir_entry *proc;
1327 int rc;
1329 rc = seq_open(file, &ibmveth_seq_ops);
1330 if (!rc) {
1331 /* recover the pointer buried in proc_dir_entry data */
1332 seq = file->private_data;
1333 proc = PDE(inode);
1334 seq->private = proc->data;
1336 return rc;
1339 static const struct file_operations ibmveth_proc_fops = {
1340 .owner = THIS_MODULE,
1341 .open = ibmveth_proc_open,
1342 .read = seq_read,
1343 .llseek = seq_lseek,
1344 .release = seq_release,
1347 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1349 struct proc_dir_entry *entry;
1350 if (ibmveth_proc_dir) {
1351 char u_addr[10];
1352 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1353 entry = create_proc_entry(u_addr, S_IFREG, ibmveth_proc_dir);
1354 if (!entry) {
1355 ibmveth_error_printk("Cannot create adapter proc entry");
1356 } else {
1357 entry->data = (void *) adapter;
1358 entry->proc_fops = &ibmveth_proc_fops;
1359 SET_MODULE_OWNER(entry);
1362 return;
1365 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1367 if (ibmveth_proc_dir) {
1368 char u_addr[10];
1369 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1370 remove_proc_entry(u_addr, ibmveth_proc_dir);
1374 #else /* CONFIG_PROC_FS */
1375 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1379 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1382 static void ibmveth_proc_register_driver(void)
1386 static void ibmveth_proc_unregister_driver(void)
1389 #endif /* CONFIG_PROC_FS */
1391 static struct attribute veth_active_attr;
1392 static struct attribute veth_num_attr;
1393 static struct attribute veth_size_attr;
1395 static ssize_t veth_pool_show(struct kobject * kobj,
1396 struct attribute * attr, char * buf)
1398 struct ibmveth_buff_pool *pool = container_of(kobj,
1399 struct ibmveth_buff_pool,
1400 kobj);
1402 if (attr == &veth_active_attr)
1403 return sprintf(buf, "%d\n", pool->active);
1404 else if (attr == &veth_num_attr)
1405 return sprintf(buf, "%d\n", pool->size);
1406 else if (attr == &veth_size_attr)
1407 return sprintf(buf, "%d\n", pool->buff_size);
1408 return 0;
1411 static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr,
1412 const char * buf, size_t count)
1414 struct ibmveth_buff_pool *pool = container_of(kobj,
1415 struct ibmveth_buff_pool,
1416 kobj);
1417 struct net_device *netdev =
1418 container_of(kobj->parent, struct device, kobj)->driver_data;
1419 struct ibmveth_adapter *adapter = netdev->priv;
1420 long value = simple_strtol(buf, NULL, 10);
1421 long rc;
1423 if (attr == &veth_active_attr) {
1424 if (value && !pool->active) {
1425 if (netif_running(netdev)) {
1426 if(ibmveth_alloc_buffer_pool(pool)) {
1427 ibmveth_error_printk("unable to alloc pool\n");
1428 return -ENOMEM;
1430 pool->active = 1;
1431 adapter->pool_config = 1;
1432 ibmveth_close(netdev);
1433 adapter->pool_config = 0;
1434 if ((rc = ibmveth_open(netdev)))
1435 return rc;
1436 } else
1437 pool->active = 1;
1438 } else if (!value && pool->active) {
1439 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1440 int i;
1441 /* Make sure there is a buffer pool with buffers that
1442 can hold a packet of the size of the MTU */
1443 for (i = 0; i < IbmVethNumBufferPools; i++) {
1444 if (pool == &adapter->rx_buff_pool[i])
1445 continue;
1446 if (!adapter->rx_buff_pool[i].active)
1447 continue;
1448 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1449 break;
1452 if (i == IbmVethNumBufferPools) {
1453 ibmveth_error_printk("no active pool >= MTU\n");
1454 return -EPERM;
1457 pool->active = 0;
1458 if (netif_running(netdev)) {
1459 adapter->pool_config = 1;
1460 ibmveth_close(netdev);
1461 adapter->pool_config = 0;
1462 if ((rc = ibmveth_open(netdev)))
1463 return rc;
1466 } else if (attr == &veth_num_attr) {
1467 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
1468 return -EINVAL;
1469 else {
1470 if (netif_running(netdev)) {
1471 adapter->pool_config = 1;
1472 ibmveth_close(netdev);
1473 adapter->pool_config = 0;
1474 pool->size = value;
1475 if ((rc = ibmveth_open(netdev)))
1476 return rc;
1477 } else
1478 pool->size = value;
1480 } else if (attr == &veth_size_attr) {
1481 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE)
1482 return -EINVAL;
1483 else {
1484 if (netif_running(netdev)) {
1485 adapter->pool_config = 1;
1486 ibmveth_close(netdev);
1487 adapter->pool_config = 0;
1488 pool->buff_size = value;
1489 if ((rc = ibmveth_open(netdev)))
1490 return rc;
1491 } else
1492 pool->buff_size = value;
1496 /* kick the interrupt handler to allocate/deallocate pools */
1497 ibmveth_interrupt(netdev->irq, netdev);
1498 return count;
1502 #define ATTR(_name, _mode) \
1503 struct attribute veth_##_name##_attr = { \
1504 .name = __stringify(_name), .mode = _mode, \
1507 static ATTR(active, 0644);
1508 static ATTR(num, 0644);
1509 static ATTR(size, 0644);
1511 static struct attribute * veth_pool_attrs[] = {
1512 &veth_active_attr,
1513 &veth_num_attr,
1514 &veth_size_attr,
1515 NULL,
1518 static struct sysfs_ops veth_pool_ops = {
1519 .show = veth_pool_show,
1520 .store = veth_pool_store,
1523 static struct kobj_type ktype_veth_pool = {
1524 .release = NULL,
1525 .sysfs_ops = &veth_pool_ops,
1526 .default_attrs = veth_pool_attrs,
1530 static struct vio_device_id ibmveth_device_table[] __devinitdata= {
1531 { "network", "IBM,l-lan"},
1532 { "", "" }
1534 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1536 static struct vio_driver ibmveth_driver = {
1537 .id_table = ibmveth_device_table,
1538 .probe = ibmveth_probe,
1539 .remove = ibmveth_remove,
1540 .driver = {
1541 .name = ibmveth_driver_name,
1542 .owner = THIS_MODULE,
1546 static int __init ibmveth_module_init(void)
1548 ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version);
1550 ibmveth_proc_register_driver();
1552 return vio_register_driver(&ibmveth_driver);
1555 static void __exit ibmveth_module_exit(void)
1557 vio_unregister_driver(&ibmveth_driver);
1558 ibmveth_proc_unregister_driver();
1561 module_init(ibmveth_module_init);
1562 module_exit(ibmveth_module_exit);