hung_task: fix false positive during vfork
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / enic / vnic_dev.c
blob8c4c8cf486f68862a432924543b88c79085a0fa4
1 /*
2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/pci.h>
24 #include <linux/delay.h>
25 #include <linux/if_ether.h>
27 #include "vnic_resource.h"
28 #include "vnic_devcmd.h"
29 #include "vnic_dev.h"
30 #include "vnic_stats.h"
32 enum vnic_proxy_type {
33 PROXY_NONE,
34 PROXY_BY_BDF,
37 struct vnic_res {
38 void __iomem *vaddr;
39 dma_addr_t bus_addr;
40 unsigned int count;
43 struct vnic_intr_coal_timer_info {
44 u32 mul;
45 u32 div;
46 u32 max_usec;
49 struct vnic_dev {
50 void *priv;
51 struct pci_dev *pdev;
52 struct vnic_res res[RES_TYPE_MAX];
53 enum vnic_dev_intr_mode intr_mode;
54 struct vnic_devcmd __iomem *devcmd;
55 struct vnic_devcmd_notify *notify;
56 struct vnic_devcmd_notify notify_copy;
57 dma_addr_t notify_pa;
58 u32 notify_sz;
59 dma_addr_t linkstatus_pa;
60 struct vnic_stats *stats;
61 dma_addr_t stats_pa;
62 struct vnic_devcmd_fw_info *fw_info;
63 dma_addr_t fw_info_pa;
64 enum vnic_proxy_type proxy;
65 u32 proxy_index;
66 u64 args[VNIC_DEVCMD_NARGS];
67 struct vnic_intr_coal_timer_info intr_coal_timer_info;
70 #define VNIC_MAX_RES_HDR_SIZE \
71 (sizeof(struct vnic_resource_header) + \
72 sizeof(struct vnic_resource) * RES_TYPE_MAX)
73 #define VNIC_RES_STRIDE 128
75 void *vnic_dev_priv(struct vnic_dev *vdev)
77 return vdev->priv;
80 static int vnic_dev_discover_res(struct vnic_dev *vdev,
81 struct vnic_dev_bar *bar, unsigned int num_bars)
83 struct vnic_resource_header __iomem *rh;
84 struct mgmt_barmap_hdr __iomem *mrh;
85 struct vnic_resource __iomem *r;
86 u8 type;
88 if (num_bars == 0)
89 return -EINVAL;
91 if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
92 pr_err("vNIC BAR0 res hdr length error\n");
93 return -EINVAL;
96 rh = bar->vaddr;
97 mrh = bar->vaddr;
98 if (!rh) {
99 pr_err("vNIC BAR0 res hdr not mem-mapped\n");
100 return -EINVAL;
103 /* Check for mgmt vnic in addition to normal vnic */
104 if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
105 (ioread32(&rh->version) != VNIC_RES_VERSION)) {
106 if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
107 (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
108 pr_err("vNIC BAR0 res magic/version error "
109 "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
110 VNIC_RES_MAGIC, VNIC_RES_VERSION,
111 MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
112 ioread32(&rh->magic), ioread32(&rh->version));
113 return -EINVAL;
117 if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
118 r = (struct vnic_resource __iomem *)(mrh + 1);
119 else
120 r = (struct vnic_resource __iomem *)(rh + 1);
123 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
125 u8 bar_num = ioread8(&r->bar);
126 u32 bar_offset = ioread32(&r->bar_offset);
127 u32 count = ioread32(&r->count);
128 u32 len;
130 r++;
132 if (bar_num >= num_bars)
133 continue;
135 if (!bar[bar_num].len || !bar[bar_num].vaddr)
136 continue;
138 switch (type) {
139 case RES_TYPE_WQ:
140 case RES_TYPE_RQ:
141 case RES_TYPE_CQ:
142 case RES_TYPE_INTR_CTRL:
143 /* each count is stride bytes long */
144 len = count * VNIC_RES_STRIDE;
145 if (len + bar_offset > bar[bar_num].len) {
146 pr_err("vNIC BAR0 resource %d "
147 "out-of-bounds, offset 0x%x + "
148 "size 0x%x > bar len 0x%lx\n",
149 type, bar_offset,
150 len,
151 bar[bar_num].len);
152 return -EINVAL;
154 break;
155 case RES_TYPE_INTR_PBA_LEGACY:
156 case RES_TYPE_DEVCMD:
157 len = count;
158 break;
159 default:
160 continue;
163 vdev->res[type].count = count;
164 vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
165 bar_offset;
166 vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset;
169 return 0;
172 unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
173 enum vnic_res_type type)
175 return vdev->res[type].count;
178 void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
179 unsigned int index)
181 if (!vdev->res[type].vaddr)
182 return NULL;
184 switch (type) {
185 case RES_TYPE_WQ:
186 case RES_TYPE_RQ:
187 case RES_TYPE_CQ:
188 case RES_TYPE_INTR_CTRL:
189 return (char __iomem *)vdev->res[type].vaddr +
190 index * VNIC_RES_STRIDE;
191 default:
192 return (char __iomem *)vdev->res[type].vaddr;
196 static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
197 unsigned int desc_count, unsigned int desc_size)
199 /* The base address of the desc rings must be 512 byte aligned.
200 * Descriptor count is aligned to groups of 32 descriptors. A
201 * count of 0 means the maximum 4096 descriptors. Descriptor
202 * size is aligned to 16 bytes.
205 unsigned int count_align = 32;
206 unsigned int desc_align = 16;
208 ring->base_align = 512;
210 if (desc_count == 0)
211 desc_count = 4096;
213 ring->desc_count = ALIGN(desc_count, count_align);
215 ring->desc_size = ALIGN(desc_size, desc_align);
217 ring->size = ring->desc_count * ring->desc_size;
218 ring->size_unaligned = ring->size + ring->base_align;
220 return ring->size_unaligned;
223 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
225 memset(ring->descs, 0, ring->size);
228 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
229 unsigned int desc_count, unsigned int desc_size)
231 vnic_dev_desc_ring_size(ring, desc_count, desc_size);
233 ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
234 ring->size_unaligned,
235 &ring->base_addr_unaligned);
237 if (!ring->descs_unaligned) {
238 pr_err("Failed to allocate ring (size=%d), aborting\n",
239 (int)ring->size);
240 return -ENOMEM;
243 ring->base_addr = ALIGN(ring->base_addr_unaligned,
244 ring->base_align);
245 ring->descs = (u8 *)ring->descs_unaligned +
246 (ring->base_addr - ring->base_addr_unaligned);
248 vnic_dev_clear_desc_ring(ring);
250 ring->desc_avail = ring->desc_count - 1;
252 return 0;
255 void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
257 if (ring->descs) {
258 pci_free_consistent(vdev->pdev,
259 ring->size_unaligned,
260 ring->descs_unaligned,
261 ring->base_addr_unaligned);
262 ring->descs = NULL;
266 static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
267 int wait)
269 struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
270 unsigned int i;
271 int delay;
272 u32 status;
273 int err;
275 status = ioread32(&devcmd->status);
276 if (status == 0xFFFFFFFF) {
277 /* PCI-e target device is gone */
278 return -ENODEV;
280 if (status & STAT_BUSY) {
281 pr_err("Busy devcmd %d\n", _CMD_N(cmd));
282 return -EBUSY;
285 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
286 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
287 writeq(vdev->args[i], &devcmd->args[i]);
288 wmb();
291 iowrite32(cmd, &devcmd->cmd);
293 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
294 return 0;
296 for (delay = 0; delay < wait; delay++) {
298 udelay(100);
300 status = ioread32(&devcmd->status);
301 if (status == 0xFFFFFFFF) {
302 /* PCI-e target device is gone */
303 return -ENODEV;
306 if (!(status & STAT_BUSY)) {
308 if (status & STAT_ERROR) {
309 err = (int)readq(&devcmd->args[0]);
310 if (err != ERR_ECMDUNKNOWN ||
311 cmd != CMD_CAPABILITY)
312 pr_err("Error %d devcmd %d\n",
313 err, _CMD_N(cmd));
314 return err;
317 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
318 rmb();
319 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
320 vdev->args[i] = readq(&devcmd->args[i]);
323 return 0;
327 pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
328 return -ETIMEDOUT;
331 static int vnic_dev_cmd_proxy_by_bdf(struct vnic_dev *vdev,
332 enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
334 u32 status;
335 int err;
337 memset(vdev->args, 0, sizeof(vdev->args));
339 vdev->args[0] = vdev->proxy_index; /* bdf */
340 vdev->args[1] = cmd;
341 vdev->args[2] = *a0;
342 vdev->args[3] = *a1;
344 err = _vnic_dev_cmd(vdev, CMD_PROXY_BY_BDF, wait);
345 if (err)
346 return err;
348 status = (u32)vdev->args[0];
349 if (status & STAT_ERROR) {
350 err = (int)vdev->args[1];
351 if (err != ERR_ECMDUNKNOWN ||
352 cmd != CMD_CAPABILITY)
353 pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
354 return err;
357 *a0 = vdev->args[1];
358 *a1 = vdev->args[2];
360 return 0;
363 static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
364 enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
366 int err;
368 vdev->args[0] = *a0;
369 vdev->args[1] = *a1;
371 err = _vnic_dev_cmd(vdev, cmd, wait);
373 *a0 = vdev->args[0];
374 *a1 = vdev->args[1];
376 return err;
379 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
380 u64 *a0, u64 *a1, int wait)
382 memset(vdev->args, 0, sizeof(vdev->args));
384 switch (vdev->proxy) {
385 case PROXY_BY_BDF:
386 return vnic_dev_cmd_proxy_by_bdf(vdev, cmd, a0, a1, wait);
387 case PROXY_NONE:
388 default:
389 return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
393 static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
395 u64 a0 = (u32)cmd, a1 = 0;
396 int wait = 1000;
397 int err;
399 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
401 return !(err || a0);
404 int vnic_dev_fw_info(struct vnic_dev *vdev,
405 struct vnic_devcmd_fw_info **fw_info)
407 u64 a0, a1 = 0;
408 int wait = 1000;
409 int err = 0;
411 if (!vdev->fw_info) {
412 vdev->fw_info = pci_alloc_consistent(vdev->pdev,
413 sizeof(struct vnic_devcmd_fw_info),
414 &vdev->fw_info_pa);
415 if (!vdev->fw_info)
416 return -ENOMEM;
418 memset(vdev->fw_info, 0, sizeof(struct vnic_devcmd_fw_info));
420 a0 = vdev->fw_info_pa;
421 a1 = sizeof(struct vnic_devcmd_fw_info);
423 /* only get fw_info once and cache it */
424 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
425 if (err == ERR_ECMDUNKNOWN) {
426 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD,
427 &a0, &a1, wait);
431 *fw_info = vdev->fw_info;
433 return err;
436 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
437 void *value)
439 u64 a0, a1;
440 int wait = 1000;
441 int err;
443 a0 = offset;
444 a1 = size;
446 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
448 switch (size) {
449 case 1: *(u8 *)value = (u8)a0; break;
450 case 2: *(u16 *)value = (u16)a0; break;
451 case 4: *(u32 *)value = (u32)a0; break;
452 case 8: *(u64 *)value = a0; break;
453 default: BUG(); break;
456 return err;
459 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
461 u64 a0, a1;
462 int wait = 1000;
464 if (!vdev->stats) {
465 vdev->stats = pci_alloc_consistent(vdev->pdev,
466 sizeof(struct vnic_stats), &vdev->stats_pa);
467 if (!vdev->stats)
468 return -ENOMEM;
471 *stats = vdev->stats;
472 a0 = vdev->stats_pa;
473 a1 = sizeof(struct vnic_stats);
475 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
478 int vnic_dev_close(struct vnic_dev *vdev)
480 u64 a0 = 0, a1 = 0;
481 int wait = 1000;
482 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
485 int vnic_dev_enable_wait(struct vnic_dev *vdev)
487 u64 a0 = 0, a1 = 0;
488 int wait = 1000;
489 int err;
491 err = vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
492 if (err == ERR_ECMDUNKNOWN)
493 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
495 return err;
498 int vnic_dev_disable(struct vnic_dev *vdev)
500 u64 a0 = 0, a1 = 0;
501 int wait = 1000;
502 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
505 int vnic_dev_open(struct vnic_dev *vdev, int arg)
507 u64 a0 = (u32)arg, a1 = 0;
508 int wait = 1000;
509 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
512 int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
514 u64 a0 = 0, a1 = 0;
515 int wait = 1000;
516 int err;
518 *done = 0;
520 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
521 if (err)
522 return err;
524 *done = (a0 == 0);
526 return 0;
529 static int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
531 u64 a0 = (u32)arg, a1 = 0;
532 int wait = 1000;
533 return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
536 static int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
538 u64 a0 = 0, a1 = 0;
539 int wait = 1000;
540 int err;
542 *done = 0;
544 err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
545 if (err)
546 return err;
548 *done = (a0 == 0);
550 return 0;
553 int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg)
555 u64 a0 = (u32)arg, a1 = 0;
556 int wait = 1000;
557 int err;
559 err = vnic_dev_cmd(vdev, CMD_HANG_RESET, &a0, &a1, wait);
560 if (err == ERR_ECMDUNKNOWN) {
561 err = vnic_dev_soft_reset(vdev, arg);
562 if (err)
563 return err;
565 return vnic_dev_init(vdev, 0);
568 return err;
571 int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
573 u64 a0 = 0, a1 = 0;
574 int wait = 1000;
575 int err;
577 *done = 0;
579 err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS, &a0, &a1, wait);
580 if (err) {
581 if (err == ERR_ECMDUNKNOWN)
582 return vnic_dev_soft_reset_done(vdev, done);
583 return err;
586 *done = (a0 == 0);
588 return 0;
591 int vnic_dev_hang_notify(struct vnic_dev *vdev)
593 u64 a0, a1;
594 int wait = 1000;
595 return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
598 int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
600 u64 a0, a1;
601 int wait = 1000;
602 int err, i;
604 for (i = 0; i < ETH_ALEN; i++)
605 mac_addr[i] = 0;
607 err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
608 if (err)
609 return err;
611 for (i = 0; i < ETH_ALEN; i++)
612 mac_addr[i] = ((u8 *)&a0)[i];
614 return 0;
617 int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
618 int broadcast, int promisc, int allmulti)
620 u64 a0, a1 = 0;
621 int wait = 1000;
622 int err;
624 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
625 (multicast ? CMD_PFILTER_MULTICAST : 0) |
626 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
627 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
628 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
630 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
631 if (err)
632 pr_err("Can't set packet filter\n");
634 return err;
637 int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
639 u64 a0 = 0, a1 = 0;
640 int wait = 1000;
641 int err;
642 int i;
644 for (i = 0; i < ETH_ALEN; i++)
645 ((u8 *)&a0)[i] = addr[i];
647 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
648 if (err)
649 pr_err("Can't add addr [%pM], %d\n", addr, err);
651 return err;
654 int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
656 u64 a0 = 0, a1 = 0;
657 int wait = 1000;
658 int err;
659 int i;
661 for (i = 0; i < ETH_ALEN; i++)
662 ((u8 *)&a0)[i] = addr[i];
664 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
665 if (err)
666 pr_err("Can't del addr [%pM], %d\n", addr, err);
668 return err;
671 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
672 u8 ig_vlan_rewrite_mode)
674 u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
675 int wait = 1000;
676 int err;
678 err = vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, &a0, &a1, wait);
679 if (err == ERR_ECMDUNKNOWN)
680 return 0;
682 return err;
685 static int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
686 void *notify_addr, dma_addr_t notify_pa, u16 intr)
688 u64 a0, a1;
689 int wait = 1000;
690 int r;
692 memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
693 vdev->notify = notify_addr;
694 vdev->notify_pa = notify_pa;
696 a0 = (u64)notify_pa;
697 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
698 a1 += sizeof(struct vnic_devcmd_notify);
700 r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
701 vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
702 return r;
705 int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
707 void *notify_addr;
708 dma_addr_t notify_pa;
710 if (vdev->notify || vdev->notify_pa) {
711 pr_err("notify block %p still allocated", vdev->notify);
712 return -EINVAL;
715 notify_addr = pci_alloc_consistent(vdev->pdev,
716 sizeof(struct vnic_devcmd_notify),
717 &notify_pa);
718 if (!notify_addr)
719 return -ENOMEM;
721 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
724 static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
726 u64 a0, a1;
727 int wait = 1000;
728 int err;
730 a0 = 0; /* paddr = 0 to unset notify buffer */
731 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
732 a1 += sizeof(struct vnic_devcmd_notify);
734 err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
735 vdev->notify = NULL;
736 vdev->notify_pa = 0;
737 vdev->notify_sz = 0;
739 return err;
742 int vnic_dev_notify_unset(struct vnic_dev *vdev)
744 if (vdev->notify) {
745 pci_free_consistent(vdev->pdev,
746 sizeof(struct vnic_devcmd_notify),
747 vdev->notify,
748 vdev->notify_pa);
751 return vnic_dev_notify_unsetcmd(vdev);
754 static int vnic_dev_notify_ready(struct vnic_dev *vdev)
756 u32 *words;
757 unsigned int nwords = vdev->notify_sz / 4;
758 unsigned int i;
759 u32 csum;
761 if (!vdev->notify || !vdev->notify_sz)
762 return 0;
764 do {
765 csum = 0;
766 memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
767 words = (u32 *)&vdev->notify_copy;
768 for (i = 1; i < nwords; i++)
769 csum += words[i];
770 } while (csum != words[0]);
772 return 1;
775 int vnic_dev_init(struct vnic_dev *vdev, int arg)
777 u64 a0 = (u32)arg, a1 = 0;
778 int wait = 1000;
779 int r = 0;
781 if (vnic_dev_capable(vdev, CMD_INIT))
782 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
783 else {
784 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
785 if (a0 & CMD_INITF_DEFAULT_MAC) {
786 /* Emulate these for old CMD_INIT_v1 which
787 * didn't pass a0 so no CMD_INITF_*.
789 vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
790 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
793 return r;
796 int vnic_dev_deinit(struct vnic_dev *vdev)
798 u64 a0 = 0, a1 = 0;
799 int wait = 1000;
801 return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
804 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
806 /* Default: hardware intr coal timer is in units of 1.5 usecs */
807 vdev->intr_coal_timer_info.mul = 2;
808 vdev->intr_coal_timer_info.div = 3;
809 vdev->intr_coal_timer_info.max_usec =
810 vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
813 int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
815 int wait = 1000;
816 int err;
818 memset(vdev->args, 0, sizeof(vdev->args));
820 err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait);
822 /* Use defaults when firmware doesn't support the devcmd at all or
823 * supports it for only specific hardware
825 if ((err == ERR_ECMDUNKNOWN) ||
826 (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
827 pr_warning("Using default conversion factor for "
828 "interrupt coalesce timer\n");
829 vnic_dev_intr_coal_timer_info_default(vdev);
830 return 0;
833 vdev->intr_coal_timer_info.mul = (u32) vdev->args[0];
834 vdev->intr_coal_timer_info.div = (u32) vdev->args[1];
835 vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2];
837 return err;
840 int vnic_dev_link_status(struct vnic_dev *vdev)
842 if (!vnic_dev_notify_ready(vdev))
843 return 0;
845 return vdev->notify_copy.link_state;
848 u32 vnic_dev_port_speed(struct vnic_dev *vdev)
850 if (!vnic_dev_notify_ready(vdev))
851 return 0;
853 return vdev->notify_copy.port_speed;
856 u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
858 if (!vnic_dev_notify_ready(vdev))
859 return 0;
861 return vdev->notify_copy.msglvl;
864 u32 vnic_dev_mtu(struct vnic_dev *vdev)
866 if (!vnic_dev_notify_ready(vdev))
867 return 0;
869 return vdev->notify_copy.mtu;
872 void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
873 enum vnic_dev_intr_mode intr_mode)
875 vdev->intr_mode = intr_mode;
878 enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
879 struct vnic_dev *vdev)
881 return vdev->intr_mode;
884 u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
886 return (usec * vdev->intr_coal_timer_info.mul) /
887 vdev->intr_coal_timer_info.div;
890 u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
892 return (hw_cycles * vdev->intr_coal_timer_info.div) /
893 vdev->intr_coal_timer_info.mul;
896 u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
898 return vdev->intr_coal_timer_info.max_usec;
901 void vnic_dev_unregister(struct vnic_dev *vdev)
903 if (vdev) {
904 if (vdev->notify)
905 pci_free_consistent(vdev->pdev,
906 sizeof(struct vnic_devcmd_notify),
907 vdev->notify,
908 vdev->notify_pa);
909 if (vdev->stats)
910 pci_free_consistent(vdev->pdev,
911 sizeof(struct vnic_stats),
912 vdev->stats, vdev->stats_pa);
913 if (vdev->fw_info)
914 pci_free_consistent(vdev->pdev,
915 sizeof(struct vnic_devcmd_fw_info),
916 vdev->fw_info, vdev->fw_info_pa);
917 kfree(vdev);
921 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
922 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
923 unsigned int num_bars)
925 if (!vdev) {
926 vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
927 if (!vdev)
928 return NULL;
931 vdev->priv = priv;
932 vdev->pdev = pdev;
934 if (vnic_dev_discover_res(vdev, bar, num_bars))
935 goto err_out;
937 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
938 if (!vdev->devcmd)
939 goto err_out;
941 return vdev;
943 err_out:
944 vnic_dev_unregister(vdev);
945 return NULL;
948 int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
950 u64 a0, a1 = len;
951 int wait = 1000;
952 dma_addr_t prov_pa;
953 void *prov_buf;
954 int ret;
956 prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
957 if (!prov_buf)
958 return -ENOMEM;
960 memcpy(prov_buf, buf, len);
962 a0 = prov_pa;
964 ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait);
966 pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
968 return ret;
971 int vnic_dev_enable2(struct vnic_dev *vdev, int active)
973 u64 a0, a1 = 0;
974 int wait = 1000;
976 a0 = (active ? CMD_ENABLE2_ACTIVE : 0);
978 return vnic_dev_cmd(vdev, CMD_ENABLE2, &a0, &a1, wait);
981 static int vnic_dev_cmd_status(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
982 int *status)
984 u64 a0 = cmd, a1 = 0;
985 int wait = 1000;
986 int ret;
988 ret = vnic_dev_cmd(vdev, CMD_STATUS, &a0, &a1, wait);
989 if (!ret)
990 *status = (int)a0;
992 return ret;
995 int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status)
997 return vnic_dev_cmd_status(vdev, CMD_ENABLE2, status);
1000 int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status)
1002 return vnic_dev_cmd_status(vdev, CMD_DEINIT, status);