Staging: benet: fix problems reported by checkpatch
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / benet / be_init.c
blob12a026c3f9e1bfeebc904d734f3b1ed9a3036a1d
1 /*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
17 #include <linux/etherdevice.h>
18 #include "benet.h"
20 #define DRVR_VERSION "1.0.728"
22 static const struct pci_device_id be_device_id_table[] = {
23 {PCI_DEVICE(0x19a2, 0x0201)},
24 {0}
27 MODULE_DEVICE_TABLE(pci, be_device_id_table);
29 MODULE_VERSION(DRVR_VERSION);
31 #define DRV_DESCRIPTION "ServerEngines BladeEngine Network Driver Version "
33 MODULE_DESCRIPTION(DRV_DESCRIPTION DRVR_VERSION);
34 MODULE_AUTHOR("ServerEngines");
35 MODULE_LICENSE("GPL");
37 static unsigned int msix = 1;
38 module_param(msix, uint, S_IRUGO);
39 MODULE_PARM_DESC(msix, "Use MSI-x interrupts");
41 static unsigned int rxbuf_size = 2048; /* Default RX frag size */
42 module_param(rxbuf_size, uint, S_IRUGO);
43 MODULE_PARM_DESC(rxbuf_size, "Size of buffers to hold Rx data");
45 const char be_drvr_ver[] = DRVR_VERSION;
46 char be_fw_ver[32]; /* F/W version filled in by be_probe */
47 char be_driver_name[] = "benet";
50 * Number of entries in each queue.
52 #define EVENT_Q_LEN 1024
53 #define ETH_TXQ_LEN 2048
54 #define ETH_TXCQ_LEN 1024
55 #define ETH_RXQ_LEN 1024 /* Does not support any other value */
56 #define ETH_UC_RXCQ_LEN 1024
57 #define ETH_BC_RXCQ_LEN 256
58 #define MCC_Q_LEN 64 /* total size not to exceed 8 pages */
59 #define MCC_CQ_LEN 256
61 /* Bit mask describing events of interest to be traced */
62 unsigned int trace_level;
64 static int
65 init_pci_be_function(struct be_adapter *adapter, struct pci_dev *pdev)
67 u64 pa;
69 /* CSR */
70 pa = pci_resource_start(pdev, 2);
71 adapter->csr_va = ioremap_nocache(pa, pci_resource_len(pdev, 2));
72 if (adapter->csr_va == NULL)
73 return -ENOMEM;
75 /* Door Bell */
76 pa = pci_resource_start(pdev, 4);
77 adapter->db_va = ioremap_nocache(pa, (128 * 1024));
78 if (adapter->db_va == NULL) {
79 iounmap(adapter->csr_va);
80 return -ENOMEM;
83 /* PCI */
84 pa = pci_resource_start(pdev, 1);
85 adapter->pci_va = ioremap_nocache(pa, pci_resource_len(pdev, 1));
86 if (adapter->pci_va == NULL) {
87 iounmap(adapter->csr_va);
88 iounmap(adapter->db_va);
89 return -ENOMEM;
91 return 0;
95 This function enables the interrupt corresponding to the Event
96 queue ID for the given NetObject
98 void be_enable_eq_intr(struct be_net_object *pnob)
100 struct CQ_DB_AMAP cqdb;
101 cqdb.dw[0] = 0;
102 AMAP_SET_BITS_PTR(CQ_DB, event, &cqdb, 1);
103 AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, 1);
104 AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, 0);
105 AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, pnob->event_q_id);
106 PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
110 This function disables the interrupt corresponding to the Event
111 queue ID for the given NetObject
113 void be_disable_eq_intr(struct be_net_object *pnob)
115 struct CQ_DB_AMAP cqdb;
116 cqdb.dw[0] = 0;
117 AMAP_SET_BITS_PTR(CQ_DB, event, &cqdb, 1);
118 AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, 0);
119 AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, 0);
120 AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, pnob->event_q_id);
121 PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
125 This function enables the interrupt from the network function
126 of the BladeEngine. Use the function be_disable_eq_intr()
127 to enable the interrupt from the event queue of only one specific
128 NetObject
130 void be_enable_intr(struct be_net_object *pnob)
132 struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl;
133 u32 host_intr;
135 ctrl.dw[0] = PCICFG1_READ(&pnob->fn_obj, host_timer_int_ctrl);
136 host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
137 hostintr, ctrl.dw);
138 if (!host_intr) {
139 AMAP_SET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
140 hostintr, ctrl.dw, 1);
141 PCICFG1_WRITE(&pnob->fn_obj, host_timer_int_ctrl,
142 ctrl.dw[0]);
147 This function disables the interrupt from the network function of
148 the BladeEngine. Use the function be_disable_eq_intr() to
149 disable the interrupt from the event queue of only one specific NetObject
151 void be_disable_intr(struct be_net_object *pnob)
154 struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl;
155 u32 host_intr;
156 ctrl.dw[0] = PCICFG1_READ(&pnob->fn_obj, host_timer_int_ctrl);
157 host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
158 hostintr, ctrl.dw);
159 if (host_intr) {
160 AMAP_SET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR, hostintr,
161 ctrl.dw, 0);
162 PCICFG1_WRITE(&pnob->fn_obj, host_timer_int_ctrl,
163 ctrl.dw[0]);
167 static int be_enable_msix(struct be_adapter *adapter)
169 int i, ret;
171 if (!msix)
172 return -1;
174 for (i = 0; i < BE_MAX_REQ_MSIX_VECTORS; i++)
175 adapter->msix_entries[i].entry = i;
177 ret = pci_enable_msix(adapter->pdev, adapter->msix_entries,
178 BE_MAX_REQ_MSIX_VECTORS);
180 if (ret == 0)
181 adapter->msix_enabled = 1;
182 return ret;
185 static int be_register_isr(struct be_adapter *adapter,
186 struct be_net_object *pnob)
188 struct net_device *netdev = pnob->netdev;
189 int intx = 0, r;
191 netdev->irq = adapter->pdev->irq;
192 r = be_enable_msix(adapter);
194 if (r == 0) {
195 r = request_irq(adapter->msix_entries[0].vector,
196 be_int, IRQF_SHARED, netdev->name, netdev);
197 if (r) {
198 printk(KERN_WARNING
199 "MSIX Request IRQ failed - Errno %d\n", r);
200 intx = 1;
201 pci_disable_msix(adapter->pdev);
202 adapter->msix_enabled = 0;
204 } else {
205 intx = 1;
208 if (intx) {
209 r = request_irq(netdev->irq, be_int, IRQF_SHARED,
210 netdev->name, netdev);
211 if (r) {
212 printk(KERN_WARNING
213 "INTx Request IRQ failed - Errno %d\n", r);
214 return -1;
217 adapter->isr_registered = 1;
218 return 0;
221 static void be_unregister_isr(struct be_adapter *adapter)
223 struct net_device *netdev = adapter->netdevp;
224 if (adapter->isr_registered) {
225 if (adapter->msix_enabled) {
226 free_irq(adapter->msix_entries[0].vector, netdev);
227 pci_disable_msix(adapter->pdev);
228 adapter->msix_enabled = 0;
229 } else {
230 free_irq(netdev->irq, netdev);
232 adapter->isr_registered = 0;
237 This function processes the Flush Completions that are issued by the
238 ARM F/W, when a Recv Ring is destroyed. A flush completion is
239 identified when a Rx COmpl descriptor has the tcpcksum and udpcksum
240 set and the pktsize is 32. These completions are received on the
241 Rx Completion Queue.
243 static u32 be_process_rx_flush_cmpl(struct be_net_object *pnob)
245 struct ETH_RX_COMPL_AMAP *rxcp;
246 unsigned int i = 0;
247 while ((rxcp = be_get_rx_cmpl(pnob)) != NULL) {
248 be_notify_cmpl(pnob, 1, pnob->rx_cq_id, 1);
249 i++;
251 return i;
254 static void be_tx_q_clean(struct be_net_object *pnob)
256 while (atomic_read(&pnob->tx_q_used))
257 process_one_tx_compl(pnob, tx_compl_lastwrb_idx_get(pnob));
260 static void be_rx_q_clean(struct be_net_object *pnob)
262 if (pnob->rx_ctxt) {
263 int i;
264 struct be_rx_page_info *rx_page_info;
265 for (i = 0; i < pnob->rx_q_len; i++) {
266 rx_page_info = &(pnob->rx_page_info[i]);
267 if (!pnob->rx_pg_shared || rx_page_info->page_offset) {
268 pci_unmap_page(pnob->adapter->pdev,
269 pci_unmap_addr(rx_page_info, bus),
270 pnob->rx_buf_size,
271 PCI_DMA_FROMDEVICE);
273 if (rx_page_info->page)
274 put_page(rx_page_info->page);
275 memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
277 pnob->rx_pg_info_hd = 0;
281 static void be_destroy_netobj(struct be_net_object *pnob)
283 int status;
285 if (pnob->tx_q_created) {
286 status = be_eth_sq_destroy(&pnob->tx_q_obj);
287 pnob->tx_q_created = 0;
290 if (pnob->rx_q_created) {
291 status = be_eth_rq_destroy(&pnob->rx_q_obj);
292 if (status != 0) {
293 status = be_eth_rq_destroy_options(&pnob->rx_q_obj, 0,
294 NULL, NULL);
295 BUG_ON(status);
297 pnob->rx_q_created = 0;
300 be_process_rx_flush_cmpl(pnob);
302 if (pnob->tx_cq_created) {
303 status = be_cq_destroy(&pnob->tx_cq_obj);
304 pnob->tx_cq_created = 0;
307 if (pnob->rx_cq_created) {
308 status = be_cq_destroy(&pnob->rx_cq_obj);
309 pnob->rx_cq_created = 0;
312 if (pnob->mcc_q_created) {
313 status = be_mcc_ring_destroy(&pnob->mcc_q_obj);
314 pnob->mcc_q_created = 0;
316 if (pnob->mcc_cq_created) {
317 status = be_cq_destroy(&pnob->mcc_cq_obj);
318 pnob->mcc_cq_created = 0;
321 if (pnob->event_q_created) {
322 status = be_eq_destroy(&pnob->event_q_obj);
323 pnob->event_q_created = 0;
325 be_function_cleanup(&pnob->fn_obj);
329 * free all resources associated with a pnob
330 * Called at the time of module cleanup as well a any error during
331 * module init. Some resources may be partially allocated in a NetObj.
333 static void netobject_cleanup(struct be_adapter *adapter,
334 struct be_net_object *pnob)
336 struct net_device *netdev = adapter->netdevp;
338 if (netif_running(netdev)) {
339 netif_stop_queue(netdev);
340 be_wait_nic_tx_cmplx_cmpl(pnob);
341 be_disable_eq_intr(pnob);
344 be_unregister_isr(adapter);
346 if (adapter->tasklet_started) {
347 tasklet_kill(&(adapter->sts_handler));
348 adapter->tasklet_started = 0;
350 if (pnob->fn_obj_created)
351 be_disable_intr(pnob);
353 if (adapter->dev_state != BE_DEV_STATE_NONE)
354 unregister_netdev(netdev);
356 if (pnob->fn_obj_created)
357 be_destroy_netobj(pnob);
359 adapter->net_obj = NULL;
360 adapter->netdevp = NULL;
362 be_rx_q_clean(pnob);
363 if (pnob->rx_ctxt) {
364 kfree(pnob->rx_page_info);
365 kfree(pnob->rx_ctxt);
368 be_tx_q_clean(pnob);
369 kfree(pnob->tx_ctxt);
371 if (pnob->mcc_q)
372 pci_free_consistent(adapter->pdev, pnob->mcc_q_size,
373 pnob->mcc_q, pnob->mcc_q_bus);
375 if (pnob->mcc_wrb_ctxt)
376 free_pages((unsigned long)pnob->mcc_wrb_ctxt,
377 get_order(pnob->mcc_wrb_ctxt_size));
379 if (pnob->mcc_cq)
380 pci_free_consistent(adapter->pdev, pnob->mcc_cq_size,
381 pnob->mcc_cq, pnob->mcc_cq_bus);
383 if (pnob->event_q)
384 pci_free_consistent(adapter->pdev, pnob->event_q_size,
385 pnob->event_q, pnob->event_q_bus);
387 if (pnob->tx_cq)
388 pci_free_consistent(adapter->pdev, pnob->tx_cq_size,
389 pnob->tx_cq, pnob->tx_cq_bus);
391 if (pnob->tx_q)
392 pci_free_consistent(adapter->pdev, pnob->tx_q_size,
393 pnob->tx_q, pnob->tx_q_bus);
395 if (pnob->rx_q)
396 pci_free_consistent(adapter->pdev, pnob->rx_q_size,
397 pnob->rx_q, pnob->rx_q_bus);
399 if (pnob->rx_cq)
400 pci_free_consistent(adapter->pdev, pnob->rx_cq_size,
401 pnob->rx_cq, pnob->rx_cq_bus);
404 if (pnob->mb_ptr)
405 pci_free_consistent(adapter->pdev, pnob->mb_size, pnob->mb_ptr,
406 pnob->mb_bus);
408 free_netdev(netdev);
412 static int be_nob_ring_alloc(struct be_adapter *adapter,
413 struct be_net_object *pnob)
415 u32 size;
417 /* Mail box rd; mailbox pointer needs to be 16 byte aligned */
418 pnob->mb_size = sizeof(struct MCC_MAILBOX_AMAP) + 16;
419 pnob->mb_ptr = pci_alloc_consistent(adapter->pdev, pnob->mb_size,
420 &pnob->mb_bus);
421 if (!pnob->mb_bus)
422 return -1;
423 memset(pnob->mb_ptr, 0, pnob->mb_size);
424 pnob->mb_rd.va = PTR_ALIGN(pnob->mb_ptr, 16);
425 pnob->mb_rd.pa = PTR_ALIGN(pnob->mb_bus, 16);
426 pnob->mb_rd.length = sizeof(struct MCC_MAILBOX_AMAP);
428 * Event queue
430 pnob->event_q_len = EVENT_Q_LEN;
431 pnob->event_q_size = pnob->event_q_len * sizeof(struct EQ_ENTRY_AMAP);
432 pnob->event_q = pci_alloc_consistent(adapter->pdev, pnob->event_q_size,
433 &pnob->event_q_bus);
434 if (!pnob->event_q_bus)
435 return -1;
436 memset(pnob->event_q, 0, pnob->event_q_size);
438 * Eth TX queue
440 pnob->tx_q_len = ETH_TXQ_LEN;
441 pnob->tx_q_port = 0;
442 pnob->tx_q_size = pnob->tx_q_len * sizeof(struct ETH_WRB_AMAP);
443 pnob->tx_q = pci_alloc_consistent(adapter->pdev, pnob->tx_q_size,
444 &pnob->tx_q_bus);
445 if (!pnob->tx_q_bus)
446 return -1;
447 memset(pnob->tx_q, 0, pnob->tx_q_size);
449 * Eth TX Compl queue
451 pnob->txcq_len = ETH_TXCQ_LEN;
452 pnob->tx_cq_size = pnob->txcq_len * sizeof(struct ETH_TX_COMPL_AMAP);
453 pnob->tx_cq = pci_alloc_consistent(adapter->pdev, pnob->tx_cq_size,
454 &pnob->tx_cq_bus);
455 if (!pnob->tx_cq_bus)
456 return -1;
457 memset(pnob->tx_cq, 0, pnob->tx_cq_size);
459 * Eth RX queue
461 pnob->rx_q_len = ETH_RXQ_LEN;
462 pnob->rx_q_size = pnob->rx_q_len * sizeof(struct ETH_RX_D_AMAP);
463 pnob->rx_q = pci_alloc_consistent(adapter->pdev, pnob->rx_q_size,
464 &pnob->rx_q_bus);
465 if (!pnob->rx_q_bus)
466 return -1;
467 memset(pnob->rx_q, 0, pnob->rx_q_size);
469 * Eth Unicast RX Compl queue
471 pnob->rx_cq_len = ETH_UC_RXCQ_LEN;
472 pnob->rx_cq_size = pnob->rx_cq_len *
473 sizeof(struct ETH_RX_COMPL_AMAP);
474 pnob->rx_cq = pci_alloc_consistent(adapter->pdev, pnob->rx_cq_size,
475 &pnob->rx_cq_bus);
476 if (!pnob->rx_cq_bus)
477 return -1;
478 memset(pnob->rx_cq, 0, pnob->rx_cq_size);
480 /* TX resources */
481 size = pnob->tx_q_len * sizeof(void **);
482 pnob->tx_ctxt = kzalloc(size, GFP_KERNEL);
483 if (pnob->tx_ctxt == NULL)
484 return -1;
486 /* RX resources */
487 size = pnob->rx_q_len * sizeof(void *);
488 pnob->rx_ctxt = kzalloc(size, GFP_KERNEL);
489 if (pnob->rx_ctxt == NULL)
490 return -1;
492 size = (pnob->rx_q_len * sizeof(struct be_rx_page_info));
493 pnob->rx_page_info = kzalloc(size, GFP_KERNEL);
494 if (pnob->rx_page_info == NULL)
495 return -1;
497 adapter->eth_statsp = kzalloc(sizeof(struct FWCMD_ETH_GET_STATISTICS),
498 GFP_KERNEL);
499 if (adapter->eth_statsp == NULL)
500 return -1;
501 pnob->rx_buf_size = rxbuf_size;
502 return 0;
506 This function initializes the be_net_object for subsequent
507 network operations.
509 Before calling this function, the driver must have allocated
510 space for the NetObject structure, initialized the structure,
511 allocated DMAable memory for all the network queues that form
512 part of the NetObject and populated the start address (virtual)
513 and number of entries allocated for each queue in the NetObject structure.
515 The driver must also have allocated memory to hold the
516 mailbox structure (MCC_MAILBOX) and post the physical address,
517 virtual addresses and the size of the mailbox memory in the
518 NetObj.mb_rd. This structure is used by BECLIB for
519 initial communication with the embedded MCC processor. BECLIB
520 uses the mailbox until MCC rings are created for more efficient
521 communication with the MCC processor.
523 If the driver wants to create multiple network interface for more
524 than one protection domain, it can call be_create_netobj()
525 multiple times once for each protection domain. A Maximum of
526 32 protection domains are supported.
529 static int
530 be_create_netobj(struct be_net_object *pnob, u8 __iomem *csr_va,
531 u8 __iomem *db_va, u8 __iomem *pci_va)
533 int status = 0;
534 bool eventable = false, tx_no_delay = false, rx_no_delay = false;
535 struct be_eq_object *eq_objectp = NULL;
536 struct be_function_object *pfob = &pnob->fn_obj;
537 struct ring_desc rd;
538 u32 set_rxbuf_size;
539 u32 tx_cmpl_wm = CEV_WMARK_96; /* 0xffffffff to disable */
540 u32 rx_cmpl_wm = CEV_WMARK_160; /* 0xffffffff to disable */
541 u32 eq_delay = 0; /* delay in 8usec units. 0xffffffff to disable */
543 memset(&rd, 0, sizeof(struct ring_desc));
545 status = be_function_object_create(csr_va, db_va, pci_va,
546 BE_FUNCTION_TYPE_NETWORK, &pnob->mb_rd, pfob);
547 if (status != BE_SUCCESS)
548 return status;
549 pnob->fn_obj_created = true;
551 if (tx_cmpl_wm == 0xffffffff)
552 tx_no_delay = true;
553 if (rx_cmpl_wm == 0xffffffff)
554 rx_no_delay = true;
556 * now create the necessary rings
557 * Event Queue first.
559 if (pnob->event_q_len) {
560 rd.va = pnob->event_q;
561 rd.pa = pnob->event_q_bus;
562 rd.length = pnob->event_q_size;
564 status = be_eq_create(pfob, &rd, 4, pnob->event_q_len,
565 (u32) -1, /* CEV_WMARK_* or -1 */
566 eq_delay, /* in 8us units, or -1 */
567 &pnob->event_q_obj);
568 if (status != BE_SUCCESS)
569 goto error_ret;
570 pnob->event_q_id = pnob->event_q_obj.eq_id;
571 pnob->event_q_created = 1;
572 eventable = true;
573 eq_objectp = &pnob->event_q_obj;
576 * Now Eth Tx Compl. queue.
578 if (pnob->txcq_len) {
579 rd.va = pnob->tx_cq;
580 rd.pa = pnob->tx_cq_bus;
581 rd.length = pnob->tx_cq_size;
583 status = be_cq_create(pfob, &rd,
584 pnob->txcq_len * sizeof(struct ETH_TX_COMPL_AMAP),
585 false, /* solicted events, */
586 tx_no_delay, /* nodelay */
587 tx_cmpl_wm, /* Watermark encodings */
588 eq_objectp, &pnob->tx_cq_obj);
589 if (status != BE_SUCCESS)
590 goto error_ret;
592 pnob->tx_cq_id = pnob->tx_cq_obj.cq_id;
593 pnob->tx_cq_created = 1;
596 * Eth Tx queue
598 if (pnob->tx_q_len) {
599 struct be_eth_sq_parameters ex_params = { 0 };
600 u32 type;
602 if (pnob->tx_q_port) {
603 /* TXQ to be bound to a specific port */
604 type = BE_ETH_TX_RING_TYPE_BOUND;
605 ex_params.port = pnob->tx_q_port - 1;
606 } else
607 type = BE_ETH_TX_RING_TYPE_STANDARD;
609 rd.va = pnob->tx_q;
610 rd.pa = pnob->tx_q_bus;
611 rd.length = pnob->tx_q_size;
613 status = be_eth_sq_create_ex(pfob, &rd,
614 pnob->tx_q_len * sizeof(struct ETH_WRB_AMAP),
615 type, 2, &pnob->tx_cq_obj,
616 &ex_params, &pnob->tx_q_obj);
618 if (status != BE_SUCCESS)
619 goto error_ret;
621 pnob->tx_q_id = pnob->tx_q_obj.bid;
622 pnob->tx_q_created = 1;
625 * Now Eth Rx compl. queue. Always needed.
627 rd.va = pnob->rx_cq;
628 rd.pa = pnob->rx_cq_bus;
629 rd.length = pnob->rx_cq_size;
631 status = be_cq_create(pfob, &rd,
632 pnob->rx_cq_len * sizeof(struct ETH_RX_COMPL_AMAP),
633 false, /* solicted events, */
634 rx_no_delay, /* nodelay */
635 rx_cmpl_wm, /* Watermark encodings */
636 eq_objectp, &pnob->rx_cq_obj);
637 if (status != BE_SUCCESS)
638 goto error_ret;
640 pnob->rx_cq_id = pnob->rx_cq_obj.cq_id;
641 pnob->rx_cq_created = 1;
643 status = be_eth_rq_set_frag_size(pfob, pnob->rx_buf_size,
644 (u32 *) &set_rxbuf_size);
645 if (status != BE_SUCCESS) {
646 be_eth_rq_get_frag_size(pfob, (u32 *) &pnob->rx_buf_size);
647 if ((pnob->rx_buf_size != 2048) && (pnob->rx_buf_size != 4096)
648 && (pnob->rx_buf_size != 8192))
649 goto error_ret;
650 } else {
651 if (pnob->rx_buf_size != set_rxbuf_size)
652 pnob->rx_buf_size = set_rxbuf_size;
655 * Eth RX queue. be_eth_rq_create() always assumes 2 pages size
657 rd.va = pnob->rx_q;
658 rd.pa = pnob->rx_q_bus;
659 rd.length = pnob->rx_q_size;
661 status = be_eth_rq_create(pfob, &rd, &pnob->rx_cq_obj,
662 &pnob->rx_cq_obj, &pnob->rx_q_obj);
664 if (status != BE_SUCCESS)
665 goto error_ret;
667 pnob->rx_q_id = pnob->rx_q_obj.rid;
668 pnob->rx_q_created = 1;
670 return BE_SUCCESS; /* All required queues created. */
672 error_ret:
673 be_destroy_netobj(pnob);
674 return status;
677 static int be_nob_ring_init(struct be_adapter *adapter,
678 struct be_net_object *pnob)
680 int status;
682 pnob->event_q_tl = 0;
684 pnob->tx_q_hd = 0;
685 pnob->tx_q_tl = 0;
687 pnob->tx_cq_tl = 0;
689 pnob->rx_cq_tl = 0;
691 memset(pnob->event_q, 0, pnob->event_q_size);
692 memset(pnob->tx_cq, 0, pnob->tx_cq_size);
693 memset(pnob->tx_ctxt, 0, pnob->tx_q_len * sizeof(void **));
694 memset(pnob->rx_ctxt, 0, pnob->rx_q_len * sizeof(void *));
695 pnob->rx_pg_info_hd = 0;
696 pnob->rx_q_hd = 0;
697 atomic_set(&pnob->rx_q_posted, 0);
699 status = be_create_netobj(pnob, adapter->csr_va, adapter->db_va,
700 adapter->pci_va);
701 if (status != BE_SUCCESS)
702 return -1;
704 be_post_eth_rx_buffs(pnob);
705 return 0;
708 /* This function handles async callback for link status */
709 static void
710 be_link_status_async_callback(void *context, u32 event_code, void *event)
712 struct ASYNC_EVENT_LINK_STATE_AMAP *link_status = event;
713 struct be_adapter *adapter = context;
714 bool link_enable = false;
715 struct be_net_object *pnob;
716 struct ASYNC_EVENT_TRAILER_AMAP *async_trailer;
717 struct net_device *netdev;
718 u32 async_event_code, async_event_type, active_port;
719 u32 port0_link_status, port1_link_status, port0_duplex, port1_duplex;
720 u32 port0_speed, port1_speed;
722 if (event_code != ASYNC_EVENT_CODE_LINK_STATE) {
723 /* Not our event to handle */
724 return;
726 async_trailer = (struct ASYNC_EVENT_TRAILER_AMAP *)
727 ((u8 *) event + sizeof(struct MCC_CQ_ENTRY_AMAP) -
728 sizeof(struct ASYNC_EVENT_TRAILER_AMAP));
730 async_event_code = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER, event_code,
731 async_trailer);
732 BUG_ON(async_event_code != ASYNC_EVENT_CODE_LINK_STATE);
734 pnob = adapter->net_obj;
735 netdev = pnob->netdev;
737 /* Determine if this event is a switch VLD or a physical link event */
738 async_event_type = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER, event_type,
739 async_trailer);
740 active_port = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
741 active_port, link_status);
742 port0_link_status = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
743 port0_link_status, link_status);
744 port1_link_status = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
745 port1_link_status, link_status);
746 port0_duplex = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
747 port0_duplex, link_status);
748 port1_duplex = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
749 port1_duplex, link_status);
750 port0_speed = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
751 port0_speed, link_status);
752 port1_speed = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
753 port1_speed, link_status);
754 if (async_event_type == NTWK_LINK_TYPE_VIRTUAL) {
755 adapter->be_stat.bes_link_change_virtual++;
756 if (adapter->be_link_sts->active_port != active_port) {
757 dev_notice(&netdev->dev,
758 "Active port changed due to VLD on switch\n");
759 } else {
760 dev_notice(&netdev->dev, "Link status update\n");
763 } else {
764 adapter->be_stat.bes_link_change_physical++;
765 if (adapter->be_link_sts->active_port != active_port) {
766 dev_notice(&netdev->dev,
767 "Active port changed due to port link"
768 " status change\n");
769 } else {
770 dev_notice(&netdev->dev, "Link status update\n");
774 memset(adapter->be_link_sts, 0, sizeof(adapter->be_link_sts));
776 if ((port0_link_status == ASYNC_EVENT_LINK_UP) ||
777 (port1_link_status == ASYNC_EVENT_LINK_UP)) {
778 if ((adapter->port0_link_sts == BE_PORT_LINK_DOWN) &&
779 (adapter->port1_link_sts == BE_PORT_LINK_DOWN)) {
780 /* Earlier both the ports are down So link is up */
781 link_enable = true;
784 if (port0_link_status == ASYNC_EVENT_LINK_UP) {
785 adapter->port0_link_sts = BE_PORT_LINK_UP;
786 adapter->be_link_sts->mac0_duplex = port0_duplex;
787 adapter->be_link_sts->mac0_speed = port0_speed;
788 if (active_port == NTWK_PORT_A)
789 adapter->be_link_sts->active_port = 0;
790 } else
791 adapter->port0_link_sts = BE_PORT_LINK_DOWN;
793 if (port1_link_status == ASYNC_EVENT_LINK_UP) {
794 adapter->port1_link_sts = BE_PORT_LINK_UP;
795 adapter->be_link_sts->mac1_duplex = port1_duplex;
796 adapter->be_link_sts->mac1_speed = port1_speed;
797 if (active_port == NTWK_PORT_B)
798 adapter->be_link_sts->active_port = 1;
799 } else
800 adapter->port1_link_sts = BE_PORT_LINK_DOWN;
802 printk(KERN_INFO "Link Properties for %s:\n", netdev->name);
803 dev_info(&netdev->dev, "Link Properties:\n");
804 be_print_link_info(adapter->be_link_sts);
806 if (!link_enable)
807 return;
809 * Both ports were down previously, but atleast one of
810 * them has come up if this netdevice's carrier is not up,
811 * then indicate to stack
813 if (!netif_carrier_ok(netdev)) {
814 netif_start_queue(netdev);
815 netif_carrier_on(netdev);
817 return;
820 /* Now both the ports are down. Tell the stack about it */
821 dev_info(&netdev->dev, "Both ports are down\n");
822 adapter->port0_link_sts = BE_PORT_LINK_DOWN;
823 adapter->port1_link_sts = BE_PORT_LINK_DOWN;
824 if (netif_carrier_ok(netdev)) {
825 netif_carrier_off(netdev);
826 netif_stop_queue(netdev);
828 return;
831 static int be_mcc_create(struct be_adapter *adapter)
833 struct be_net_object *pnob;
835 pnob = adapter->net_obj;
837 * Create the MCC ring so that all further communication with
838 * MCC can go thru the ring. we do this at the end since
839 * we do not want to be dealing with interrupts until the
840 * initialization is complete.
842 pnob->mcc_q_len = MCC_Q_LEN;
843 pnob->mcc_q_size = pnob->mcc_q_len * sizeof(struct MCC_WRB_AMAP);
844 pnob->mcc_q = pci_alloc_consistent(adapter->pdev, pnob->mcc_q_size,
845 &pnob->mcc_q_bus);
846 if (!pnob->mcc_q_bus)
847 return -1;
849 * space for MCC WRB context
851 pnob->mcc_wrb_ctxtLen = MCC_Q_LEN;
852 pnob->mcc_wrb_ctxt_size = pnob->mcc_wrb_ctxtLen *
853 sizeof(struct be_mcc_wrb_context);
854 pnob->mcc_wrb_ctxt = (void *)__get_free_pages(GFP_KERNEL,
855 get_order(pnob->mcc_wrb_ctxt_size));
856 if (pnob->mcc_wrb_ctxt == NULL)
857 return -1;
859 * Space for MCC compl. ring
861 pnob->mcc_cq_len = MCC_CQ_LEN;
862 pnob->mcc_cq_size = pnob->mcc_cq_len * sizeof(struct MCC_CQ_ENTRY_AMAP);
863 pnob->mcc_cq = pci_alloc_consistent(adapter->pdev, pnob->mcc_cq_size,
864 &pnob->mcc_cq_bus);
865 if (!pnob->mcc_cq_bus)
866 return -1;
867 return 0;
871 This function creates the MCC request and completion ring required
872 for communicating with the ARM processor. The caller must have
873 allocated required amount of memory for the MCC ring and MCC
874 completion ring and posted the virtual address and number of
875 entries in the corresponding members (mcc_q and mcc_cq) in the
876 NetObject struture.
878 When this call is completed, all further communication with
879 ARM will switch from mailbox to this ring.
881 pnob - Pointer to the NetObject structure. This NetObject should
882 have been created using a previous call to be_create_netobj()
884 int be_create_mcc_rings(struct be_net_object *pnob)
886 int status = 0;
887 struct ring_desc rd;
888 struct be_function_object *pfob = &pnob->fn_obj;
890 memset(&rd, 0, sizeof(struct ring_desc));
891 if (pnob->mcc_cq_len) {
892 rd.va = pnob->mcc_cq;
893 rd.pa = pnob->mcc_cq_bus;
894 rd.length = pnob->mcc_cq_size;
896 status = be_cq_create(pfob, &rd,
897 pnob->mcc_cq_len * sizeof(struct MCC_CQ_ENTRY_AMAP),
898 false, /* solicted events, */
899 true, /* nodelay */
900 0, /* 0 Watermark since Nodelay is true */
901 &pnob->event_q_obj,
902 &pnob->mcc_cq_obj);
904 if (status != BE_SUCCESS)
905 return status;
907 pnob->mcc_cq_id = pnob->mcc_cq_obj.cq_id;
908 pnob->mcc_cq_created = 1;
910 if (pnob->mcc_q_len) {
911 rd.va = pnob->mcc_q;
912 rd.pa = pnob->mcc_q_bus;
913 rd.length = pnob->mcc_q_size;
915 status = be_mcc_ring_create(pfob, &rd,
916 pnob->mcc_q_len * sizeof(struct MCC_WRB_AMAP),
917 pnob->mcc_wrb_ctxt, pnob->mcc_wrb_ctxtLen,
918 &pnob->mcc_cq_obj, &pnob->mcc_q_obj);
920 if (status != BE_SUCCESS)
921 return status;
923 pnob->mcc_q_created = 1;
925 return BE_SUCCESS;
928 static int be_mcc_init(struct be_adapter *adapter)
930 u32 r;
931 struct be_net_object *pnob;
933 pnob = adapter->net_obj;
934 memset(pnob->mcc_q, 0, pnob->mcc_q_size);
935 pnob->mcc_q_hd = 0;
937 memset(pnob->mcc_wrb_ctxt, 0, pnob->mcc_wrb_ctxt_size);
939 memset(pnob->mcc_cq, 0, pnob->mcc_cq_size);
940 pnob->mcc_cq_tl = 0;
942 r = be_create_mcc_rings(adapter->net_obj);
943 if (r != BE_SUCCESS)
944 return -1;
946 return 0;
949 static void be_remove(struct pci_dev *pdev)
951 struct be_net_object *pnob;
952 struct be_adapter *adapter;
954 adapter = pci_get_drvdata(pdev);
955 if (!adapter)
956 return;
958 pci_set_drvdata(pdev, NULL);
959 pnob = (struct be_net_object *)adapter->net_obj;
961 flush_scheduled_work();
963 if (pnob) {
964 /* Unregister async callback function for link status updates */
965 if (pnob->mcc_q_created)
966 be_mcc_add_async_event_callback(&pnob->mcc_q_obj,
967 NULL, NULL);
968 netobject_cleanup(adapter, pnob);
971 if (adapter->csr_va)
972 iounmap(adapter->csr_va);
973 if (adapter->db_va)
974 iounmap(adapter->db_va);
975 if (adapter->pci_va)
976 iounmap(adapter->pci_va);
978 pci_release_regions(adapter->pdev);
979 pci_disable_device(adapter->pdev);
981 kfree(adapter->be_link_sts);
982 kfree(adapter->eth_statsp);
984 if (adapter->timer_ctxt.get_stats_timer.function)
985 del_timer_sync(&adapter->timer_ctxt.get_stats_timer);
986 kfree(adapter);
990 * This function is called by the PCI sub-system when it finds a PCI
991 * device with dev/vendor IDs that match with one of our devices.
992 * All of the driver initialization is done in this function.
994 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
996 int status = 0;
997 struct be_adapter *adapter;
998 struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD get_fwv;
999 struct be_net_object *pnob;
1000 struct net_device *netdev;
1002 status = pci_enable_device(pdev);
1003 if (status)
1004 goto error;
1006 status = pci_request_regions(pdev, be_driver_name);
1007 if (status)
1008 goto error_pci_req;
1010 pci_set_master(pdev);
1011 adapter = kzalloc(sizeof(struct be_adapter), GFP_KERNEL);
1012 if (adapter == NULL) {
1013 status = -ENOMEM;
1014 goto error_adapter;
1016 adapter->dev_state = BE_DEV_STATE_NONE;
1017 adapter->pdev = pdev;
1018 pci_set_drvdata(pdev, adapter);
1020 adapter->enable_aic = 1;
1021 adapter->max_eqd = MAX_EQD;
1022 adapter->min_eqd = 0;
1023 adapter->cur_eqd = 0;
1025 status = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
1026 if (!status) {
1027 adapter->dma_64bit_cap = true;
1028 } else {
1029 adapter->dma_64bit_cap = false;
1030 status = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1031 if (status != 0) {
1032 printk(KERN_ERR "Could not set PCI DMA Mask\n");
1033 goto cleanup;
1037 status = init_pci_be_function(adapter, pdev);
1038 if (status != 0) {
1039 printk(KERN_ERR "Failed to map PCI BARS\n");
1040 status = -ENOMEM;
1041 goto cleanup;
1044 be_trace_set_level(DL_ALWAYS | DL_ERR);
1046 adapter->be_link_sts = kmalloc(sizeof(struct BE_LINK_STATUS),
1047 GFP_KERNEL);
1048 if (adapter->be_link_sts == NULL) {
1049 printk(KERN_ERR "Memory allocation for link status "
1050 "buffer failed\n");
1051 goto cleanup;
1053 spin_lock_init(&adapter->txq_lock);
1055 netdev = alloc_etherdev(sizeof(struct be_net_object));
1056 if (netdev == NULL) {
1057 status = -ENOMEM;
1058 goto cleanup;
1060 pnob = netdev_priv(netdev);
1061 adapter->net_obj = pnob;
1062 adapter->netdevp = netdev;
1063 pnob->adapter = adapter;
1064 pnob->netdev = netdev;
1066 status = be_nob_ring_alloc(adapter, pnob);
1067 if (status != 0)
1068 goto cleanup;
1070 status = be_nob_ring_init(adapter, pnob);
1071 if (status != 0)
1072 goto cleanup;
1074 be_rxf_mac_address_read_write(&pnob->fn_obj, false, false, false,
1075 false, false, netdev->dev_addr, NULL, NULL);
1077 netdev->init = &benet_init;
1078 netif_carrier_off(netdev);
1079 netif_stop_queue(netdev);
1081 SET_NETDEV_DEV(netdev, &(adapter->pdev->dev));
1083 netif_napi_add(netdev, &pnob->napi, be_poll, 64);
1085 /* if the rx_frag size if 2K, one page is shared as two RX frags */
1086 pnob->rx_pg_shared =
1087 (pnob->rx_buf_size <= PAGE_SIZE / 2) ? true : false;
1088 if (pnob->rx_buf_size != rxbuf_size) {
1089 printk(KERN_WARNING
1090 "Could not set Rx buffer size to %d. Using %d\n",
1091 rxbuf_size, pnob->rx_buf_size);
1092 rxbuf_size = pnob->rx_buf_size;
1095 tasklet_init(&(adapter->sts_handler), be_process_intr,
1096 (unsigned long)adapter);
1097 adapter->tasklet_started = 1;
1098 spin_lock_init(&(adapter->int_lock));
1100 status = be_register_isr(adapter, pnob);
1101 if (status != 0)
1102 goto cleanup;
1104 adapter->rx_csum = 1;
1105 adapter->max_rx_coal = BE_LRO_MAX_PKTS;
1107 memset(&get_fwv, 0,
1108 sizeof(struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD));
1109 printk(KERN_INFO "BladeEngine Driver version:%s. "
1110 "Copyright ServerEngines, Corporation 2005 - 2008\n",
1111 be_drvr_ver);
1112 status = be_function_get_fw_version(&pnob->fn_obj, &get_fwv, NULL,
1113 NULL);
1114 if (status == BE_SUCCESS) {
1115 strncpy(be_fw_ver, get_fwv.firmware_version_string, 32);
1116 printk(KERN_INFO "BladeEngine Firmware Version:%s\n",
1117 get_fwv.firmware_version_string);
1118 } else {
1119 printk(KERN_WARNING "Unable to get BE Firmware Version\n");
1122 sema_init(&adapter->get_eth_stat_sem, 0);
1123 init_timer(&adapter->timer_ctxt.get_stats_timer);
1124 atomic_set(&adapter->timer_ctxt.get_stat_flag, 0);
1125 adapter->timer_ctxt.get_stats_timer.function =
1126 &be_get_stats_timer_handler;
1128 status = be_mcc_create(adapter);
1129 if (status < 0)
1130 goto cleanup;
1131 status = be_mcc_init(adapter);
1132 if (status < 0)
1133 goto cleanup;
1136 status = be_mcc_add_async_event_callback(&adapter->net_obj->mcc_q_obj,
1137 be_link_status_async_callback, (void *)adapter);
1138 if (status != BE_SUCCESS) {
1139 printk(KERN_WARNING "add_async_event_callback failed");
1140 printk(KERN_WARNING
1141 "Link status changes may not be reflected\n");
1144 status = register_netdev(netdev);
1145 if (status != 0)
1146 goto cleanup;
1147 be_update_link_status(adapter);
1148 adapter->dev_state = BE_DEV_STATE_INIT;
1149 return 0;
1151 cleanup:
1152 be_remove(pdev);
1153 return status;
1154 error_adapter:
1155 pci_release_regions(pdev);
1156 error_pci_req:
1157 pci_disable_device(pdev);
1158 error:
1159 printk(KERN_ERR "BladeEngine initalization failed\n");
1160 return status;
1164 * Get the current link status and print the status on console
1166 void be_update_link_status(struct be_adapter *adapter)
1168 int status;
1169 struct be_net_object *pnob = adapter->net_obj;
1171 status = be_rxf_link_status(&pnob->fn_obj, adapter->be_link_sts, NULL,
1172 NULL, NULL);
1173 if (status == BE_SUCCESS) {
1174 if (adapter->be_link_sts->mac0_speed &&
1175 adapter->be_link_sts->mac0_duplex)
1176 adapter->port0_link_sts = BE_PORT_LINK_UP;
1177 else
1178 adapter->port0_link_sts = BE_PORT_LINK_DOWN;
1180 if (adapter->be_link_sts->mac1_speed &&
1181 adapter->be_link_sts->mac1_duplex)
1182 adapter->port1_link_sts = BE_PORT_LINK_UP;
1183 else
1184 adapter->port1_link_sts = BE_PORT_LINK_DOWN;
1186 dev_info(&pnob->netdev->dev, "Link Properties:\n");
1187 be_print_link_info(adapter->be_link_sts);
1188 return;
1190 dev_info(&pnob->netdev->dev, "Could not get link status\n");
1191 return;
1195 #ifdef CONFIG_PM
1196 static void
1197 be_pm_cleanup(struct be_adapter *adapter,
1198 struct be_net_object *pnob, struct net_device *netdev)
1200 netif_carrier_off(netdev);
1201 netif_stop_queue(netdev);
1203 be_wait_nic_tx_cmplx_cmpl(pnob);
1204 be_disable_eq_intr(pnob);
1206 if (adapter->tasklet_started) {
1207 tasklet_kill(&adapter->sts_handler);
1208 adapter->tasklet_started = 0;
1211 be_unregister_isr(adapter);
1212 be_disable_intr(pnob);
1214 be_tx_q_clean(pnob);
1215 be_rx_q_clean(pnob);
1217 be_destroy_netobj(pnob);
1220 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
1222 struct be_adapter *adapter = pci_get_drvdata(pdev);
1223 struct net_device *netdev = adapter->netdevp;
1224 struct be_net_object *pnob = netdev_priv(netdev);
1226 adapter->dev_pm_state = adapter->dev_state;
1227 adapter->dev_state = BE_DEV_STATE_SUSPEND;
1229 netif_device_detach(netdev);
1230 if (netif_running(netdev))
1231 be_pm_cleanup(adapter, pnob, netdev);
1233 pci_enable_wake(pdev, 3, 1);
1234 pci_enable_wake(pdev, 4, 1); /* D3 Cold = 4 */
1235 pci_save_state(pdev);
1236 pci_disable_device(pdev);
1237 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1238 return 0;
1241 static void be_up(struct be_adapter *adapter)
1243 struct be_net_object *pnob = adapter->net_obj;
1245 if (pnob->num_vlans != 0)
1246 be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
1247 pnob->vlan_tag, NULL, NULL, NULL);
1251 static int be_resume(struct pci_dev *pdev)
1253 int status = 0;
1254 struct be_adapter *adapter = pci_get_drvdata(pdev);
1255 struct net_device *netdev = adapter->netdevp;
1256 struct be_net_object *pnob = netdev_priv(netdev);
1258 netif_device_detach(netdev);
1260 status = pci_enable_device(pdev);
1261 if (status)
1262 return status;
1264 pci_set_power_state(pdev, 0);
1265 pci_restore_state(pdev);
1266 pci_enable_wake(pdev, 3, 0);
1267 pci_enable_wake(pdev, 4, 0); /* 4 is D3 cold */
1269 netif_carrier_on(netdev);
1270 netif_start_queue(netdev);
1272 if (netif_running(netdev)) {
1273 be_rxf_mac_address_read_write(&pnob->fn_obj, false, false,
1274 false, true, false, netdev->dev_addr, NULL, NULL);
1276 status = be_nob_ring_init(adapter, pnob);
1277 if (status < 0)
1278 return status;
1280 tasklet_init(&(adapter->sts_handler), be_process_intr,
1281 (unsigned long)adapter);
1282 adapter->tasklet_started = 1;
1284 if (be_register_isr(adapter, pnob) != 0) {
1285 printk(KERN_ERR "be_register_isr failed\n");
1286 return status;
1290 status = be_mcc_init(adapter);
1291 if (status < 0) {
1292 printk(KERN_ERR "be_mcc_init failed\n");
1293 return status;
1295 be_update_link_status(adapter);
1297 * Register async call back function to handle link
1298 * status updates
1300 status = be_mcc_add_async_event_callback(
1301 &adapter->net_obj->mcc_q_obj,
1302 be_link_status_async_callback, (void *)adapter);
1303 if (status != BE_SUCCESS) {
1304 printk(KERN_WARNING "add_async_event_callback failed");
1305 printk(KERN_WARNING
1306 "Link status changes may not be reflected\n");
1308 be_enable_intr(pnob);
1309 be_enable_eq_intr(pnob);
1310 be_up(adapter);
1312 netif_device_attach(netdev);
1313 adapter->dev_state = adapter->dev_pm_state;
1314 return 0;
1318 #endif
1320 /* Wait until no more pending transmits */
1321 void be_wait_nic_tx_cmplx_cmpl(struct be_net_object *pnob)
1323 int i;
1325 /* Wait for 20us * 50000 (= 1s) and no more */
1326 i = 0;
1327 while ((pnob->tx_q_tl != pnob->tx_q_hd) && (i < 50000)) {
1328 ++i;
1329 udelay(20);
1332 /* Check for no more pending transmits */
1333 if (i >= 50000) {
1334 printk(KERN_WARNING
1335 "Did not receive completions for all TX requests\n");
1339 static struct pci_driver be_driver = {
1340 .name = be_driver_name,
1341 .id_table = be_device_id_table,
1342 .probe = be_probe,
1343 #ifdef CONFIG_PM
1344 .suspend = be_suspend,
1345 .resume = be_resume,
1346 #endif
1347 .remove = be_remove
1351 * Module init entry point. Registers our our device and return.
1352 * Our probe will be called if the device is found.
1354 static int __init be_init_module(void)
1356 int ret;
1358 if (rxbuf_size != 8192 && rxbuf_size != 4096 && rxbuf_size != 2048) {
1359 printk(KERN_WARNING
1360 "Unsupported receive buffer size (%d) requested\n",
1361 rxbuf_size);
1362 printk(KERN_WARNING
1363 "Must be 2048, 4096 or 8192. Defaulting to 2048\n");
1364 rxbuf_size = 2048;
1367 ret = pci_register_driver(&be_driver);
1369 return ret;
1372 module_init(be_init_module);
1375 * be_exit_module - Driver Exit Cleanup Routine
1377 static void __exit be_exit_module(void)
1379 pci_unregister_driver(&be_driver);
1382 module_exit(be_exit_module);