mac80211: clean up channel type config
[linux-2.6/libata-dev.git] / drivers / staging / benet / be_netif.c
blob2b8daf63dc7d51fb8c33e7d1474a69f79a103d08
1 /*
2 * Copyright (C) 2005 - 2008 ServerEngines
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
18 * be_netif.c
20 * This file contains various entry points of drivers seen by tcp/ip stack.
23 #include <linux/if_vlan.h>
24 #include <linux/in.h>
25 #include "benet.h"
26 #include <linux/ip.h>
27 #include <linux/inet_lro.h>
29 /* Strings to print Link properties */
30 static const char *link_speed[] = {
31 "Invalid link Speed Value",
32 "10 Mbps",
33 "100 Mbps",
34 "1 Gbps",
35 "10 Gbps"
38 static const char *link_duplex[] = {
39 "Invalid Duplex Value",
40 "Half Duplex",
41 "Full Duplex"
44 static const char *link_state[] = {
45 "",
46 "(active)"
49 void be_print_link_info(struct BE_LINK_STATUS *lnk_status)
51 u16 si, di, ai;
53 /* Port 0 */
54 if (lnk_status->mac0_speed && lnk_status->mac0_duplex) {
55 /* Port is up and running */
56 si = (lnk_status->mac0_speed < 5) ? lnk_status->mac0_speed : 0;
57 di = (lnk_status->mac0_duplex < 3) ?
58 lnk_status->mac0_duplex : 0;
59 ai = (lnk_status->active_port == 0) ? 1 : 0;
60 printk(KERN_INFO "PortNo. 0: Speed - %s %s %s\n",
61 link_speed[si], link_duplex[di], link_state[ai]);
62 } else
63 printk(KERN_INFO "PortNo. 0: Down\n");
65 /* Port 1 */
66 if (lnk_status->mac1_speed && lnk_status->mac1_duplex) {
67 /* Port is up and running */
68 si = (lnk_status->mac1_speed < 5) ? lnk_status->mac1_speed : 0;
69 di = (lnk_status->mac1_duplex < 3) ?
70 lnk_status->mac1_duplex : 0;
71 ai = (lnk_status->active_port == 0) ? 1 : 0;
72 printk(KERN_INFO "PortNo. 1: Speed - %s %s %s\n",
73 link_speed[si], link_duplex[di], link_state[ai]);
74 } else
75 printk(KERN_INFO "PortNo. 1: Down\n");
77 return;
80 static int
81 be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
82 void **ip_hdr, void **tcpudp_hdr,
83 u64 *hdr_flags, void *priv)
85 struct ethhdr *eh;
86 struct vlan_ethhdr *veh;
87 struct iphdr *iph;
88 u8 *va = page_address(frag->page) + frag->page_offset;
89 unsigned long ll_hlen;
91 /* find the mac header, abort if not IPv4 */
93 prefetch(va);
94 eh = (struct ethhdr *)va;
95 *mac_hdr = eh;
96 ll_hlen = ETH_HLEN;
97 if (eh->h_proto != htons(ETH_P_IP)) {
98 if (eh->h_proto == htons(ETH_P_8021Q)) {
99 veh = (struct vlan_ethhdr *)va;
100 if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
101 return -1;
103 ll_hlen += VLAN_HLEN;
105 } else {
106 return -1;
109 *hdr_flags = LRO_IPV4;
111 iph = (struct iphdr *)(va + ll_hlen);
112 *ip_hdr = iph;
113 if (iph->protocol != IPPROTO_TCP)
114 return -1;
115 *hdr_flags |= LRO_TCP;
116 *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
118 return 0;
121 static int benet_open(struct net_device *netdev)
123 struct be_net_object *pnob = netdev_priv(netdev);
124 struct be_adapter *adapter = pnob->adapter;
125 struct net_lro_mgr *lro_mgr;
127 if (adapter->dev_state < BE_DEV_STATE_INIT)
128 return -EAGAIN;
130 lro_mgr = &pnob->lro_mgr;
131 lro_mgr->dev = netdev;
133 lro_mgr->features = LRO_F_NAPI;
134 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
135 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
136 lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
137 lro_mgr->lro_arr = pnob->lro_desc;
138 lro_mgr->get_frag_header = be_get_frag_header;
139 lro_mgr->max_aggr = adapter->max_rx_coal;
140 lro_mgr->frag_align_pad = 2;
141 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
142 lro_mgr->max_aggr = MAX_SKB_FRAGS;
144 adapter->max_rx_coal = BE_LRO_MAX_PKTS;
146 be_update_link_status(adapter);
149 * Set carrier on only if Physical Link up
150 * Either of the port link status up signifies this
152 if ((adapter->port0_link_sts == BE_PORT_LINK_UP) ||
153 (adapter->port1_link_sts == BE_PORT_LINK_UP)) {
154 netif_start_queue(netdev);
155 netif_carrier_on(netdev);
158 adapter->dev_state = BE_DEV_STATE_OPEN;
159 napi_enable(&pnob->napi);
160 be_enable_intr(pnob);
161 be_enable_eq_intr(pnob);
163 * RX completion queue may be in dis-armed state. Arm it.
165 be_notify_cmpl(pnob, 0, pnob->rx_cq_id, 1);
167 return 0;
170 static int benet_close(struct net_device *netdev)
172 struct be_net_object *pnob = netdev_priv(netdev);
173 struct be_adapter *adapter = pnob->adapter;
175 netif_stop_queue(netdev);
176 synchronize_irq(netdev->irq);
178 be_wait_nic_tx_cmplx_cmpl(pnob);
179 adapter->dev_state = BE_DEV_STATE_INIT;
180 netif_carrier_off(netdev);
182 adapter->port0_link_sts = BE_PORT_LINK_DOWN;
183 adapter->port1_link_sts = BE_PORT_LINK_DOWN;
184 be_disable_intr(pnob);
185 be_disable_eq_intr(pnob);
186 napi_disable(&pnob->napi);
188 return 0;
192 * Setting a Mac Address for BE
193 * Takes netdev and a void pointer as arguments.
194 * The pointer holds the new addres to be used.
196 static int benet_set_mac_addr(struct net_device *netdev, void *p)
198 struct sockaddr *addr = p;
199 struct be_net_object *pnob = netdev_priv(netdev);
201 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
202 be_rxf_mac_address_read_write(&pnob->fn_obj, 0, 0, false, true, false,
203 netdev->dev_addr, NULL, NULL);
205 * Since we are doing Active-Passive failover, both
206 * ports should have matching MAC addresses everytime.
208 be_rxf_mac_address_read_write(&pnob->fn_obj, 1, 0, false, true, false,
209 netdev->dev_addr, NULL, NULL);
211 return 0;
214 void be_get_stats_timer_handler(unsigned long context)
216 struct be_timer_ctxt *ctxt = (struct be_timer_ctxt *)context;
218 if (atomic_read(&ctxt->get_stat_flag)) {
219 atomic_dec(&ctxt->get_stat_flag);
220 up((void *)ctxt->get_stat_sem_addr);
222 del_timer(&ctxt->get_stats_timer);
223 return;
226 void be_get_stat_cb(void *context, int status,
227 struct MCC_WRB_AMAP *optional_wrb)
229 struct be_timer_ctxt *ctxt = (struct be_timer_ctxt *)context;
231 * just up the semaphore if the get_stat_flag
232 * reads 1. so that the waiter can continue.
233 * If it is 0, then it was handled by the timer handler.
235 del_timer(&ctxt->get_stats_timer);
236 if (atomic_read(&ctxt->get_stat_flag)) {
237 atomic_dec(&ctxt->get_stat_flag);
238 up((void *)ctxt->get_stat_sem_addr);
242 struct net_device_stats *benet_get_stats(struct net_device *dev)
244 struct be_net_object *pnob = netdev_priv(dev);
245 struct be_adapter *adapter = pnob->adapter;
246 u64 pa;
247 struct be_timer_ctxt *ctxt = &adapter->timer_ctxt;
249 if (adapter->dev_state != BE_DEV_STATE_OPEN) {
250 /* Return previously read stats */
251 return &(adapter->benet_stats);
253 /* Get Physical Addr */
254 pa = pci_map_single(adapter->pdev, adapter->eth_statsp,
255 sizeof(struct FWCMD_ETH_GET_STATISTICS),
256 PCI_DMA_FROMDEVICE);
257 ctxt->get_stat_sem_addr = (unsigned long)&adapter->get_eth_stat_sem;
258 atomic_inc(&ctxt->get_stat_flag);
260 be_rxf_query_eth_statistics(&pnob->fn_obj, adapter->eth_statsp,
261 cpu_to_le64(pa), be_get_stat_cb, ctxt,
262 NULL);
264 ctxt->get_stats_timer.data = (unsigned long)ctxt;
265 mod_timer(&ctxt->get_stats_timer, (jiffies + (HZ * 2)));
266 down((void *)ctxt->get_stat_sem_addr); /* callback will unblock us */
268 /* Adding port0 and port1 stats. */
269 adapter->benet_stats.rx_packets =
270 adapter->eth_statsp->params.response.p0recvdtotalframes +
271 adapter->eth_statsp->params.response.p1recvdtotalframes;
272 adapter->benet_stats.tx_packets =
273 adapter->eth_statsp->params.response.p0xmitunicastframes +
274 adapter->eth_statsp->params.response.p1xmitunicastframes;
275 adapter->benet_stats.tx_bytes =
276 adapter->eth_statsp->params.response.p0xmitbyteslsd +
277 adapter->eth_statsp->params.response.p1xmitbyteslsd;
278 adapter->benet_stats.rx_errors =
279 adapter->eth_statsp->params.response.p0crcerrors +
280 adapter->eth_statsp->params.response.p1crcerrors;
281 adapter->benet_stats.rx_errors +=
282 adapter->eth_statsp->params.response.p0alignmentsymerrs +
283 adapter->eth_statsp->params.response.p1alignmentsymerrs;
284 adapter->benet_stats.rx_errors +=
285 adapter->eth_statsp->params.response.p0inrangelenerrors +
286 adapter->eth_statsp->params.response.p1inrangelenerrors;
287 adapter->benet_stats.rx_bytes =
288 adapter->eth_statsp->params.response.p0recvdtotalbytesLSD +
289 adapter->eth_statsp->params.response.p1recvdtotalbytesLSD;
290 adapter->benet_stats.rx_crc_errors =
291 adapter->eth_statsp->params.response.p0crcerrors +
292 adapter->eth_statsp->params.response.p1crcerrors;
294 adapter->benet_stats.tx_packets +=
295 adapter->eth_statsp->params.response.p0xmitmulticastframes +
296 adapter->eth_statsp->params.response.p1xmitmulticastframes;
297 adapter->benet_stats.tx_packets +=
298 adapter->eth_statsp->params.response.p0xmitbroadcastframes +
299 adapter->eth_statsp->params.response.p1xmitbroadcastframes;
300 adapter->benet_stats.tx_errors = 0;
302 adapter->benet_stats.multicast =
303 adapter->eth_statsp->params.response.p0xmitmulticastframes +
304 adapter->eth_statsp->params.response.p1xmitmulticastframes;
306 adapter->benet_stats.rx_fifo_errors =
307 adapter->eth_statsp->params.response.p0rxfifooverflowdropped +
308 adapter->eth_statsp->params.response.p1rxfifooverflowdropped;
309 adapter->benet_stats.rx_frame_errors =
310 adapter->eth_statsp->params.response.p0alignmentsymerrs +
311 adapter->eth_statsp->params.response.p1alignmentsymerrs;
312 adapter->benet_stats.rx_length_errors =
313 adapter->eth_statsp->params.response.p0inrangelenerrors +
314 adapter->eth_statsp->params.response.p1inrangelenerrors;
315 adapter->benet_stats.rx_length_errors +=
316 adapter->eth_statsp->params.response.p0outrangeerrors +
317 adapter->eth_statsp->params.response.p1outrangeerrors;
318 adapter->benet_stats.rx_length_errors +=
319 adapter->eth_statsp->params.response.p0frametoolongerrors +
320 adapter->eth_statsp->params.response.p1frametoolongerrors;
322 pci_unmap_single(adapter->pdev, (ulong) adapter->eth_statsp,
323 sizeof(struct FWCMD_ETH_GET_STATISTICS),
324 PCI_DMA_FROMDEVICE);
325 return &(adapter->benet_stats);
329 static void be_start_tx(struct be_net_object *pnob, u32 nposted)
331 #define CSR_ETH_MAX_SQPOSTS 255
332 struct SQ_DB_AMAP sqdb;
334 sqdb.dw[0] = 0;
336 AMAP_SET_BITS_PTR(SQ_DB, cid, &sqdb, pnob->tx_q_id);
337 while (nposted) {
338 if (nposted > CSR_ETH_MAX_SQPOSTS) {
339 AMAP_SET_BITS_PTR(SQ_DB, numPosted, &sqdb,
340 CSR_ETH_MAX_SQPOSTS);
341 nposted -= CSR_ETH_MAX_SQPOSTS;
342 } else {
343 AMAP_SET_BITS_PTR(SQ_DB, numPosted, &sqdb, nposted);
344 nposted = 0;
346 PD_WRITE(&pnob->fn_obj, etx_sq_db, sqdb.dw[0]);
349 return;
352 static void update_tx_rate(struct be_adapter *adapter)
354 /* update the rate once in two seconds */
355 if ((jiffies - adapter->eth_tx_jiffies) > 2 * (HZ)) {
356 u32 r;
357 r = adapter->eth_tx_bytes /
358 ((jiffies - adapter->eth_tx_jiffies) / (HZ));
359 r = (r / 1000000); /* M bytes/s */
360 adapter->be_stat.bes_eth_tx_rate = (r * 8); /* M bits/s */
361 adapter->eth_tx_jiffies = jiffies;
362 adapter->eth_tx_bytes = 0;
366 static int wrb_cnt_in_skb(struct sk_buff *skb)
368 int cnt = 0;
369 while (skb) {
370 if (skb->len > skb->data_len)
371 cnt++;
372 cnt += skb_shinfo(skb)->nr_frags;
373 skb = skb_shinfo(skb)->frag_list;
375 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
376 return cnt;
379 static void wrb_fill(struct ETH_WRB_AMAP *wrb, u64 addr, int len)
381 AMAP_SET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb, addr >> 32);
382 AMAP_SET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb, addr & 0xFFFFFFFF);
383 AMAP_SET_BITS_PTR(ETH_WRB, frag_len, wrb, len);
386 static void wrb_fill_extra(struct ETH_WRB_AMAP *wrb, struct sk_buff *skb,
387 struct be_net_object *pnob)
389 wrb->dw[2] = 0;
390 wrb->dw[3] = 0;
391 AMAP_SET_BITS_PTR(ETH_WRB, crc, wrb, 1);
392 if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
393 AMAP_SET_BITS_PTR(ETH_WRB, lso, wrb, 1);
394 AMAP_SET_BITS_PTR(ETH_WRB, lso_mss, wrb,
395 skb_shinfo(skb)->gso_size);
396 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
397 u8 proto = ((struct iphdr *)ip_hdr(skb))->protocol;
398 if (proto == IPPROTO_TCP)
399 AMAP_SET_BITS_PTR(ETH_WRB, tcpcs, wrb, 1);
400 else if (proto == IPPROTO_UDP)
401 AMAP_SET_BITS_PTR(ETH_WRB, udpcs, wrb, 1);
403 if (pnob->vlan_grp && vlan_tx_tag_present(skb)) {
404 AMAP_SET_BITS_PTR(ETH_WRB, vlan, wrb, 1);
405 AMAP_SET_BITS_PTR(ETH_WRB, vlan_tag, wrb, vlan_tx_tag_get(skb));
409 static inline void wrb_copy_extra(struct ETH_WRB_AMAP *to,
410 struct ETH_WRB_AMAP *from)
413 to->dw[2] = from->dw[2];
414 to->dw[3] = from->dw[3];
417 /* Returns the actual count of wrbs used including a possible dummy */
418 static int copy_skb_to_txq(struct be_net_object *pnob, struct sk_buff *skb,
419 u32 wrb_cnt, u32 *copied)
421 u64 busaddr;
422 struct ETH_WRB_AMAP *wrb = NULL, *first = NULL;
423 u32 i;
424 bool dummy = true;
425 struct pci_dev *pdev = pnob->adapter->pdev;
427 if (wrb_cnt & 1)
428 wrb_cnt++;
429 else
430 dummy = false;
432 atomic_add(wrb_cnt, &pnob->tx_q_used);
434 while (skb) {
435 if (skb->len > skb->data_len) {
436 int len = skb->len - skb->data_len;
437 busaddr = pci_map_single(pdev, skb->data, len,
438 PCI_DMA_TODEVICE);
439 busaddr = cpu_to_le64(busaddr);
440 wrb = &pnob->tx_q[pnob->tx_q_hd];
441 if (first == NULL) {
442 wrb_fill_extra(wrb, skb, pnob);
443 first = wrb;
444 } else {
445 wrb_copy_extra(wrb, first);
447 wrb_fill(wrb, busaddr, len);
448 be_adv_txq_hd(pnob);
449 *copied += len;
452 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
453 struct skb_frag_struct *frag =
454 &skb_shinfo(skb)->frags[i];
455 busaddr = pci_map_page(pdev, frag->page,
456 frag->page_offset, frag->size,
457 PCI_DMA_TODEVICE);
458 busaddr = cpu_to_le64(busaddr);
459 wrb = &pnob->tx_q[pnob->tx_q_hd];
460 if (first == NULL) {
461 wrb_fill_extra(wrb, skb, pnob);
462 first = wrb;
463 } else {
464 wrb_copy_extra(wrb, first);
466 wrb_fill(wrb, busaddr, frag->size);
467 be_adv_txq_hd(pnob);
468 *copied += frag->size;
470 skb = skb_shinfo(skb)->frag_list;
473 if (dummy) {
474 wrb = &pnob->tx_q[pnob->tx_q_hd];
475 BUG_ON(first == NULL);
476 wrb_copy_extra(wrb, first);
477 wrb_fill(wrb, 0, 0);
478 be_adv_txq_hd(pnob);
480 AMAP_SET_BITS_PTR(ETH_WRB, complete, wrb, 1);
481 AMAP_SET_BITS_PTR(ETH_WRB, last, wrb, 1);
482 return wrb_cnt;
485 /* For each skb transmitted, tx_ctxt stores the num of wrbs in the
486 * start index and skb pointer in the end index
488 static inline void be_tx_wrb_info_remember(struct be_net_object *pnob,
489 struct sk_buff *skb, int wrb_cnt,
490 u32 start)
492 *(u32 *) (&pnob->tx_ctxt[start]) = wrb_cnt;
493 index_adv(&start, wrb_cnt - 1, pnob->tx_q_len);
494 pnob->tx_ctxt[start] = skb;
497 static int benet_xmit(struct sk_buff *skb, struct net_device *netdev)
499 struct be_net_object *pnob = netdev_priv(netdev);
500 struct be_adapter *adapter = pnob->adapter;
501 u32 wrb_cnt, copied = 0;
502 u32 start = pnob->tx_q_hd;
504 adapter->be_stat.bes_tx_reqs++;
506 wrb_cnt = wrb_cnt_in_skb(skb);
507 spin_lock_bh(&adapter->txq_lock);
508 if ((pnob->tx_q_len - 2 - atomic_read(&pnob->tx_q_used)) <= wrb_cnt) {
509 netif_stop_queue(pnob->netdev);
510 spin_unlock_bh(&adapter->txq_lock);
511 adapter->be_stat.bes_tx_fails++;
512 return NETDEV_TX_BUSY;
514 spin_unlock_bh(&adapter->txq_lock);
516 wrb_cnt = copy_skb_to_txq(pnob, skb, wrb_cnt, &copied);
517 be_tx_wrb_info_remember(pnob, skb, wrb_cnt, start);
519 be_start_tx(pnob, wrb_cnt);
521 adapter->eth_tx_bytes += copied;
522 adapter->be_stat.bes_tx_wrbs += wrb_cnt;
523 update_tx_rate(adapter);
524 netdev->trans_start = jiffies;
526 return NETDEV_TX_OK;
530 * This is the driver entry point to change the mtu of the device
531 * Returns 0 for success and errno for failure.
533 static int benet_change_mtu(struct net_device *netdev, int new_mtu)
536 * BE supports jumbo frame size upto 9000 bytes including the link layer
537 * header. Considering the different variants of frame formats possible
538 * like VLAN, SNAP/LLC, the maximum possible value for MTU is 8974 bytes
541 if (new_mtu < (ETH_ZLEN + ETH_FCS_LEN) || (new_mtu > BE_MAX_MTU)) {
542 dev_info(&netdev->dev, "Invalid MTU requested. "
543 "Must be between %d and %d bytes\n",
544 (ETH_ZLEN + ETH_FCS_LEN), BE_MAX_MTU);
545 return -EINVAL;
547 dev_info(&netdev->dev, "MTU changed from %d to %d\n",
548 netdev->mtu, new_mtu);
549 netdev->mtu = new_mtu;
550 return 0;
554 * This is the driver entry point to register a vlan with the device
556 static void benet_vlan_register(struct net_device *netdev,
557 struct vlan_group *grp)
559 struct be_net_object *pnob = netdev_priv(netdev);
561 be_disable_eq_intr(pnob);
562 pnob->vlan_grp = grp;
563 pnob->num_vlans = 0;
564 be_enable_eq_intr(pnob);
568 * This is the driver entry point to add a vlan vlan_id
569 * with the device netdev
571 static void benet_vlan_add_vid(struct net_device *netdev, u16 vlan_id)
573 struct be_net_object *pnob = netdev_priv(netdev);
575 if (pnob->num_vlans == (BE_NUM_VLAN_SUPPORTED - 1)) {
576 /* no way to return an error */
577 dev_info(&netdev->dev,
578 "BladeEngine: Cannot configure more than %d Vlans\n",
579 BE_NUM_VLAN_SUPPORTED);
580 return;
582 /* The new vlan tag will be in the slot indicated by num_vlans. */
583 pnob->vlan_tag[pnob->num_vlans++] = vlan_id;
584 be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
585 pnob->vlan_tag, NULL, NULL, NULL);
589 * This is the driver entry point to remove a vlan vlan_id
590 * with the device netdev
592 static void benet_vlan_rem_vid(struct net_device *netdev, u16 vlan_id)
594 struct be_net_object *pnob = netdev_priv(netdev);
596 u32 i, value;
599 * In Blade Engine, we support 32 vlan tag filters across both ports.
600 * To program a vlan tag, the RXF_RTPR_CSR register is used.
601 * Each 32-bit value of RXF_RTDR_CSR can address 2 vlan tag entries.
602 * The Vlan table is of depth 16. thus we support 32 tags.
605 value = vlan_id | VLAN_VALID_BIT;
606 for (i = 0; i < BE_NUM_VLAN_SUPPORTED; i++) {
607 if (pnob->vlan_tag[i] == vlan_id)
608 break;
611 if (i == BE_NUM_VLAN_SUPPORTED)
612 return;
613 /* Now compact the vlan tag array by removing hole created. */
614 while ((i + 1) < BE_NUM_VLAN_SUPPORTED) {
615 pnob->vlan_tag[i] = pnob->vlan_tag[i + 1];
616 i++;
618 if ((i + 1) == BE_NUM_VLAN_SUPPORTED)
619 pnob->vlan_tag[i] = (u16) 0x0;
620 pnob->num_vlans--;
621 be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
622 pnob->vlan_tag, NULL, NULL, NULL);
626 * This function is called to program multicast
627 * address in the multicast filter of the ASIC.
629 static void be_set_multicast_filter(struct net_device *netdev)
631 struct be_net_object *pnob = netdev_priv(netdev);
632 struct dev_mc_list *mc_ptr;
633 u8 mac_addr[32][ETH_ALEN];
634 int i;
636 if (netdev->flags & IFF_ALLMULTI) {
637 /* set BE in Multicast promiscuous */
638 be_rxf_multicast_config(&pnob->fn_obj, true, 0, NULL, NULL,
639 NULL, NULL);
640 return;
643 for (mc_ptr = netdev->mc_list, i = 0; mc_ptr;
644 mc_ptr = mc_ptr->next, i++) {
645 memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
648 /* reset the promiscuous mode also. */
649 be_rxf_multicast_config(&pnob->fn_obj, false, i,
650 &mac_addr[0][0], NULL, NULL, NULL);
654 * This is the driver entry point to set multicast list
655 * with the device netdev. This function will be used to
656 * set promiscuous mode or multicast promiscuous mode
657 * or multicast mode....
659 static void benet_set_multicast_list(struct net_device *netdev)
661 struct be_net_object *pnob = netdev_priv(netdev);
663 if (netdev->flags & IFF_PROMISC) {
664 be_rxf_promiscuous(&pnob->fn_obj, 1, 1, NULL, NULL, NULL);
665 } else {
666 be_rxf_promiscuous(&pnob->fn_obj, 0, 0, NULL, NULL, NULL);
667 be_set_multicast_filter(netdev);
671 int benet_init(struct net_device *netdev)
673 struct be_net_object *pnob = netdev_priv(netdev);
674 struct be_adapter *adapter = pnob->adapter;
676 ether_setup(netdev);
678 netdev->open = &benet_open;
679 netdev->stop = &benet_close;
680 netdev->hard_start_xmit = &benet_xmit;
682 netdev->get_stats = &benet_get_stats;
684 netdev->set_multicast_list = &benet_set_multicast_list;
686 netdev->change_mtu = &benet_change_mtu;
687 netdev->set_mac_address = &benet_set_mac_addr;
689 netdev->vlan_rx_register = benet_vlan_register;
690 netdev->vlan_rx_add_vid = benet_vlan_add_vid;
691 netdev->vlan_rx_kill_vid = benet_vlan_rem_vid;
693 netdev->features =
694 NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
695 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM;
697 netdev->flags |= IFF_MULTICAST;
699 /* If device is DAC Capable, set the HIGHDMA flag for netdevice. */
700 if (adapter->dma_64bit_cap)
701 netdev->features |= NETIF_F_HIGHDMA;
703 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
704 return 0;