1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14 #include <linux/vmalloc.h>
15 #include <linux/etherdevice.h>
16 #include <linux/pci.h>
17 #include <linux/pci_hotplug.h>
18 #include <linux/slab.h>
20 #include "vxge-traffic.h"
21 #include "vxge-config.h"
22 #include "vxge-main.h"
24 static enum vxge_hw_status
25 __vxge_hw_fifo_delete(
26 struct __vxge_hw_vpath_handle
*vpath_handle
);
28 static struct __vxge_hw_blockpool_entry
*
29 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device
*hldev
,
33 __vxge_hw_blockpool_block_free(struct __vxge_hw_device
*hldev
,
34 struct __vxge_hw_blockpool_entry
*entry
);
36 static void vxge_hw_blockpool_block_add(struct __vxge_hw_device
*devh
,
39 struct pci_dev
*dma_h
,
40 struct pci_dev
*acc_handle
);
42 static enum vxge_hw_status
43 __vxge_hw_blockpool_create(struct __vxge_hw_device
*hldev
,
44 struct __vxge_hw_blockpool
*blockpool
,
49 __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool
*blockpool
);
52 __vxge_hw_blockpool_malloc(struct __vxge_hw_device
*hldev
,
54 struct vxge_hw_mempool_dma
*dma_object
);
57 __vxge_hw_blockpool_free(struct __vxge_hw_device
*hldev
,
60 struct vxge_hw_mempool_dma
*dma_object
);
63 __vxge_hw_channel_free(
64 struct __vxge_hw_channel
*channel
);
66 static enum vxge_hw_status
__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle
*vp
);
68 static enum vxge_hw_status
69 __vxge_hw_device_config_check(struct vxge_hw_device_config
*new_config
);
71 static enum vxge_hw_status
72 __vxge_hw_device_register_poll(
74 u64 mask
, u32 max_millis
);
76 static inline enum vxge_hw_status
77 __vxge_hw_pio_mem_write64(u64 val64
, void __iomem
*addr
,
78 u64 mask
, u32 max_millis
)
80 __vxge_hw_pio_mem_write32_lower((u32
)vxge_bVALn(val64
, 32, 32), addr
);
83 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(val64
, 0, 32), addr
);
86 return __vxge_hw_device_register_poll(addr
, mask
, max_millis
);
89 static struct vxge_hw_mempool
*
90 __vxge_hw_mempool_create(struct __vxge_hw_device
*devh
, u32 memblock_size
,
91 u32 item_size
, u32 private_size
, u32 items_initial
,
92 u32 items_max
, struct vxge_hw_mempool_cbs
*mp_callback
,
95 static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool
*mempool
);
97 static enum vxge_hw_status
98 __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath
*vpath
,
99 struct vxge_hw_vpath_stats_hw_info
*hw_stats
);
101 static enum vxge_hw_status
102 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle
*vpath_handle
);
104 static enum vxge_hw_status
105 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem
*legacy_reg
);
108 __vxge_hw_vp_terminate(struct __vxge_hw_device
*devh
, u32 vp_id
);
110 static enum vxge_hw_status
111 __vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath
*vpath
,
112 struct vxge_hw_xmac_vpath_tx_stats
*vpath_tx_stats
);
114 static enum vxge_hw_status
115 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath
*vpath
,
116 struct vxge_hw_xmac_vpath_rx_stats
*vpath_rx_stats
);
119 vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem
*vp_reg
)
123 val64
= readq(&vp_reg
->rxmac_vcfg0
);
124 val64
&= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
125 writeq(val64
, &vp_reg
->rxmac_vcfg0
);
126 val64
= readq(&vp_reg
->rxmac_vcfg0
);
132 * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
134 int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device
*hldev
, u32 vp_id
)
136 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
137 struct __vxge_hw_virtualpath
*vpath
;
138 u64 val64
, rxd_count
, rxd_spat
;
139 int count
= 0, total_count
= 0;
141 vpath
= &hldev
->virtual_paths
[vp_id
];
142 vp_reg
= vpath
->vp_reg
;
144 vxge_hw_vpath_set_zero_rx_frm_len(vp_reg
);
146 /* Check that the ring controller for this vpath has enough free RxDs
147 * to send frames to the host. This is done by reading the
148 * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
149 * RXD_SPAT value for the vpath.
151 val64
= readq(&vp_reg
->prc_cfg6
);
152 rxd_spat
= VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64
) + 1;
153 /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
161 rxd_count
= readq(&vp_reg
->prc_rxd_doorbell
);
163 /* Check that the ring controller for this vpath does
164 * not have any frame in its pipeline.
166 val64
= readq(&vp_reg
->frm_in_progress_cnt
);
167 if ((rxd_count
<= rxd_spat
) || (val64
> 0))
172 } while ((count
< VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT
) &&
173 (total_count
< VXGE_HW_MAX_POLLING_COUNT
));
175 if (total_count
>= VXGE_HW_MAX_POLLING_COUNT
)
176 printk(KERN_ALERT
"%s: Still Receiving traffic. Abort wait\n",
182 /* vxge_hw_device_wait_receive_idle - This function waits until all frames
183 * stored in the frame buffer for each vpath assigned to the given
184 * function (hldev) have been sent to the host.
186 void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device
*hldev
)
188 int i
, total_count
= 0;
190 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
191 if (!(hldev
->vpaths_deployed
& vxge_mBIT(i
)))
194 total_count
+= vxge_hw_vpath_wait_receive_idle(hldev
, i
);
195 if (total_count
>= VXGE_HW_MAX_POLLING_COUNT
)
200 static enum vxge_hw_status
201 vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath
*vpath
, u32 action
,
202 u32 fw_memo
, u32 offset
, u64
*data0
, u64
*data1
,
205 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
206 enum vxge_hw_status status
;
208 u32 retry
= 0, max_retry
= 100;
210 vp_reg
= vpath
->vp_reg
;
212 if (vpath
->vp_open
) {
214 spin_lock(&vpath
->lock
);
217 writeq(*data0
, &vp_reg
->rts_access_steer_data0
);
218 writeq(*data1
, &vp_reg
->rts_access_steer_data1
);
221 val64
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action
) |
222 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo
) |
223 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset
) |
224 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
|
227 status
= __vxge_hw_pio_mem_write64(val64
,
228 &vp_reg
->rts_access_steer_ctrl
,
229 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
230 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
232 /* The __vxge_hw_device_register_poll can udelay for a significant
233 * amount of time, blocking other proccess from the CPU. If it delays
234 * for ~5secs, a NMI error can occur. A way around this is to give up
235 * the processor via msleep, but this is not allowed is under lock.
236 * So, only allow it to sleep for ~4secs if open. Otherwise, delay for
237 * 1sec and sleep for 10ms until the firmware operation has completed
240 while ((status
!= VXGE_HW_OK
) && retry
++ < max_retry
) {
243 status
= __vxge_hw_device_register_poll(
244 &vp_reg
->rts_access_steer_ctrl
,
245 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE
,
246 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
249 if (status
!= VXGE_HW_OK
)
252 val64
= readq(&vp_reg
->rts_access_steer_ctrl
);
253 if (val64
& VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS
) {
254 *data0
= readq(&vp_reg
->rts_access_steer_data0
);
255 *data1
= readq(&vp_reg
->rts_access_steer_data1
);
258 status
= VXGE_HW_FAIL
;
262 spin_unlock(&vpath
->lock
);
267 vxge_hw_upgrade_read_version(struct __vxge_hw_device
*hldev
, u32
*major
,
268 u32
*minor
, u32
*build
)
270 u64 data0
= 0, data1
= 0, steer_ctrl
= 0;
271 struct __vxge_hw_virtualpath
*vpath
;
272 enum vxge_hw_status status
;
274 vpath
= &hldev
->virtual_paths
[hldev
->first_vp_id
];
276 status
= vxge_hw_vpath_fw_api(vpath
,
277 VXGE_HW_FW_UPGRADE_ACTION
,
278 VXGE_HW_FW_UPGRADE_MEMO
,
279 VXGE_HW_FW_UPGRADE_OFFSET_READ
,
280 &data0
, &data1
, &steer_ctrl
);
281 if (status
!= VXGE_HW_OK
)
284 *major
= VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0
);
285 *minor
= VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0
);
286 *build
= VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0
);
291 enum vxge_hw_status
vxge_hw_flash_fw(struct __vxge_hw_device
*hldev
)
293 u64 data0
= 0, data1
= 0, steer_ctrl
= 0;
294 struct __vxge_hw_virtualpath
*vpath
;
295 enum vxge_hw_status status
;
298 vpath
= &hldev
->virtual_paths
[hldev
->first_vp_id
];
300 status
= vxge_hw_vpath_fw_api(vpath
,
301 VXGE_HW_FW_UPGRADE_ACTION
,
302 VXGE_HW_FW_UPGRADE_MEMO
,
303 VXGE_HW_FW_UPGRADE_OFFSET_COMMIT
,
304 &data0
, &data1
, &steer_ctrl
);
305 if (status
!= VXGE_HW_OK
) {
306 vxge_debug_init(VXGE_ERR
, "%s: FW upgrade failed", __func__
);
310 ret
= VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl
) & 0x7F;
312 vxge_debug_init(VXGE_ERR
, "%s: FW commit failed with error %d",
314 status
= VXGE_HW_FAIL
;
322 vxge_update_fw_image(struct __vxge_hw_device
*hldev
, const u8
*fwdata
, int size
)
324 u64 data0
= 0, data1
= 0, steer_ctrl
= 0;
325 struct __vxge_hw_virtualpath
*vpath
;
326 enum vxge_hw_status status
;
327 int ret_code
, sec_code
;
329 vpath
= &hldev
->virtual_paths
[hldev
->first_vp_id
];
331 /* send upgrade start command */
332 status
= vxge_hw_vpath_fw_api(vpath
,
333 VXGE_HW_FW_UPGRADE_ACTION
,
334 VXGE_HW_FW_UPGRADE_MEMO
,
335 VXGE_HW_FW_UPGRADE_OFFSET_START
,
336 &data0
, &data1
, &steer_ctrl
);
337 if (status
!= VXGE_HW_OK
) {
338 vxge_debug_init(VXGE_ERR
, " %s: Upgrade start cmd failed",
343 /* Transfer fw image to adapter 16 bytes at a time */
344 for (; size
> 0; size
-= VXGE_HW_FW_UPGRADE_BLK_SIZE
) {
347 /* The next 128bits of fwdata to be loaded onto the adapter */
348 data0
= *((u64
*)fwdata
);
349 data1
= *((u64
*)fwdata
+ 1);
351 status
= vxge_hw_vpath_fw_api(vpath
,
352 VXGE_HW_FW_UPGRADE_ACTION
,
353 VXGE_HW_FW_UPGRADE_MEMO
,
354 VXGE_HW_FW_UPGRADE_OFFSET_SEND
,
355 &data0
, &data1
, &steer_ctrl
);
356 if (status
!= VXGE_HW_OK
) {
357 vxge_debug_init(VXGE_ERR
, "%s: Upgrade send failed",
362 ret_code
= VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0
);
364 case VXGE_HW_FW_UPGRADE_OK
:
365 /* All OK, send next 16 bytes. */
367 case VXGE_FW_UPGRADE_BYTES2SKIP
:
368 /* skip bytes in the stream */
369 fwdata
+= (data0
>> 8) & 0xFFFFFFFF;
371 case VXGE_HW_FW_UPGRADE_DONE
:
373 case VXGE_HW_FW_UPGRADE_ERR
:
374 sec_code
= VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0
);
376 case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1
:
377 case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7
:
379 "corrupted data from .ncf file\n");
381 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3
:
382 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4
:
383 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5
:
384 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6
:
385 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8
:
386 printk(KERN_ERR
"invalid .ncf file\n");
388 case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW
:
389 printk(KERN_ERR
"buffer overflow\n");
391 case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH
:
392 printk(KERN_ERR
"failed to flash the image\n");
394 case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN
:
396 "generic error. Unknown error type\n");
399 printk(KERN_ERR
"Unknown error of type %d\n",
403 status
= VXGE_HW_FAIL
;
406 printk(KERN_ERR
"Unknown FW error: %d\n", ret_code
);
407 status
= VXGE_HW_FAIL
;
410 /* point to next 16 bytes */
411 fwdata
+= VXGE_HW_FW_UPGRADE_BLK_SIZE
;
418 vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device
*hldev
,
419 struct eprom_image
*img
)
421 u64 data0
= 0, data1
= 0, steer_ctrl
= 0;
422 struct __vxge_hw_virtualpath
*vpath
;
423 enum vxge_hw_status status
;
426 vpath
= &hldev
->virtual_paths
[hldev
->first_vp_id
];
428 for (i
= 0; i
< VXGE_HW_MAX_ROM_IMAGES
; i
++) {
429 data0
= VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i
);
430 data1
= steer_ctrl
= 0;
432 status
= vxge_hw_vpath_fw_api(vpath
,
433 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
,
434 VXGE_HW_FW_API_GET_EPROM_REV
,
435 0, &data0
, &data1
, &steer_ctrl
);
436 if (status
!= VXGE_HW_OK
)
439 img
[i
].is_valid
= VXGE_HW_GET_EPROM_IMAGE_VALID(data0
);
440 img
[i
].index
= VXGE_HW_GET_EPROM_IMAGE_INDEX(data0
);
441 img
[i
].type
= VXGE_HW_GET_EPROM_IMAGE_TYPE(data0
);
442 img
[i
].version
= VXGE_HW_GET_EPROM_IMAGE_REV(data0
);
449 * __vxge_hw_channel_allocate - Allocate memory for channel
450 * This function allocates required memory for the channel and various arrays
453 static struct __vxge_hw_channel
*
454 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle
*vph
,
455 enum __vxge_hw_channel_type type
,
456 u32 length
, u32 per_dtr_space
, void *userdata
)
458 struct __vxge_hw_channel
*channel
;
459 struct __vxge_hw_device
*hldev
;
463 hldev
= vph
->vpath
->hldev
;
464 vp_id
= vph
->vpath
->vp_id
;
467 case VXGE_HW_CHANNEL_TYPE_FIFO
:
468 size
= sizeof(struct __vxge_hw_fifo
);
470 case VXGE_HW_CHANNEL_TYPE_RING
:
471 size
= sizeof(struct __vxge_hw_ring
);
477 channel
= kzalloc(size
, GFP_KERNEL
);
480 INIT_LIST_HEAD(&channel
->item
);
482 channel
->common_reg
= hldev
->common_reg
;
483 channel
->first_vp_id
= hldev
->first_vp_id
;
484 channel
->type
= type
;
485 channel
->devh
= hldev
;
487 channel
->userdata
= userdata
;
488 channel
->per_dtr_space
= per_dtr_space
;
489 channel
->length
= length
;
490 channel
->vp_id
= vp_id
;
492 channel
->work_arr
= kzalloc(sizeof(void *)*length
, GFP_KERNEL
);
493 if (channel
->work_arr
== NULL
)
496 channel
->free_arr
= kzalloc(sizeof(void *)*length
, GFP_KERNEL
);
497 if (channel
->free_arr
== NULL
)
499 channel
->free_ptr
= length
;
501 channel
->reserve_arr
= kzalloc(sizeof(void *)*length
, GFP_KERNEL
);
502 if (channel
->reserve_arr
== NULL
)
504 channel
->reserve_ptr
= length
;
505 channel
->reserve_top
= 0;
507 channel
->orig_arr
= kzalloc(sizeof(void *)*length
, GFP_KERNEL
);
508 if (channel
->orig_arr
== NULL
)
513 __vxge_hw_channel_free(channel
);
520 * __vxge_hw_channel_free - Free memory allocated for channel
521 * This function deallocates memory from the channel and various arrays
524 static void __vxge_hw_channel_free(struct __vxge_hw_channel
*channel
)
526 kfree(channel
->work_arr
);
527 kfree(channel
->free_arr
);
528 kfree(channel
->reserve_arr
);
529 kfree(channel
->orig_arr
);
534 * __vxge_hw_channel_initialize - Initialize a channel
535 * This function initializes a channel by properly setting the
538 static enum vxge_hw_status
539 __vxge_hw_channel_initialize(struct __vxge_hw_channel
*channel
)
542 struct __vxge_hw_virtualpath
*vpath
;
544 vpath
= channel
->vph
->vpath
;
546 if ((channel
->reserve_arr
!= NULL
) && (channel
->orig_arr
!= NULL
)) {
547 for (i
= 0; i
< channel
->length
; i
++)
548 channel
->orig_arr
[i
] = channel
->reserve_arr
[i
];
551 switch (channel
->type
) {
552 case VXGE_HW_CHANNEL_TYPE_FIFO
:
553 vpath
->fifoh
= (struct __vxge_hw_fifo
*)channel
;
554 channel
->stats
= &((struct __vxge_hw_fifo
*)
555 channel
)->stats
->common_stats
;
557 case VXGE_HW_CHANNEL_TYPE_RING
:
558 vpath
->ringh
= (struct __vxge_hw_ring
*)channel
;
559 channel
->stats
= &((struct __vxge_hw_ring
*)
560 channel
)->stats
->common_stats
;
570 * __vxge_hw_channel_reset - Resets a channel
571 * This function resets a channel by properly setting the various references
573 static enum vxge_hw_status
574 __vxge_hw_channel_reset(struct __vxge_hw_channel
*channel
)
578 for (i
= 0; i
< channel
->length
; i
++) {
579 if (channel
->reserve_arr
!= NULL
)
580 channel
->reserve_arr
[i
] = channel
->orig_arr
[i
];
581 if (channel
->free_arr
!= NULL
)
582 channel
->free_arr
[i
] = NULL
;
583 if (channel
->work_arr
!= NULL
)
584 channel
->work_arr
[i
] = NULL
;
586 channel
->free_ptr
= channel
->length
;
587 channel
->reserve_ptr
= channel
->length
;
588 channel
->reserve_top
= 0;
589 channel
->post_index
= 0;
590 channel
->compl_index
= 0;
596 * __vxge_hw_device_pci_e_init
597 * Initialize certain PCI/PCI-X configuration registers
598 * with recommended values. Save config space for future hw resets.
600 static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device
*hldev
)
604 /* Set the PErr Repconse bit and SERR in PCI command register. */
605 pci_read_config_word(hldev
->pdev
, PCI_COMMAND
, &cmd
);
607 pci_write_config_word(hldev
->pdev
, PCI_COMMAND
, cmd
);
609 pci_save_state(hldev
->pdev
);
613 * __vxge_hw_device_register_poll
614 * Will poll certain register for specified amount of time.
615 * Will poll until masked bit is not cleared.
617 static enum vxge_hw_status
618 __vxge_hw_device_register_poll(void __iomem
*reg
, u64 mask
, u32 max_millis
)
622 enum vxge_hw_status ret
= VXGE_HW_FAIL
;
639 } while (++i
<= max_millis
);
644 /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
646 * This routine checks the vpath reset in progress register is turned zero
648 static enum vxge_hw_status
649 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem
*vpath_rst_in_prog
)
651 enum vxge_hw_status status
;
652 status
= __vxge_hw_device_register_poll(vpath_rst_in_prog
,
653 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
654 VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
659 * __vxge_hw_device_toc_get
660 * This routine sets the swapper and reads the toc pointer and returns the
661 * memory mapped address of the toc
663 static struct vxge_hw_toc_reg __iomem
*
664 __vxge_hw_device_toc_get(void __iomem
*bar0
)
667 struct vxge_hw_toc_reg __iomem
*toc
= NULL
;
668 enum vxge_hw_status status
;
670 struct vxge_hw_legacy_reg __iomem
*legacy_reg
=
671 (struct vxge_hw_legacy_reg __iomem
*)bar0
;
673 status
= __vxge_hw_legacy_swapper_set(legacy_reg
);
674 if (status
!= VXGE_HW_OK
)
677 val64
= readq(&legacy_reg
->toc_first_pointer
);
678 toc
= (struct vxge_hw_toc_reg __iomem
*)(bar0
+val64
);
684 * __vxge_hw_device_reg_addr_get
685 * This routine sets the swapper and reads the toc pointer and initializes the
686 * register location pointers in the device object. It waits until the ric is
687 * completed initializing registers.
689 static enum vxge_hw_status
690 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device
*hldev
)
694 enum vxge_hw_status status
= VXGE_HW_OK
;
696 hldev
->legacy_reg
= (struct vxge_hw_legacy_reg __iomem
*)hldev
->bar0
;
698 hldev
->toc_reg
= __vxge_hw_device_toc_get(hldev
->bar0
);
699 if (hldev
->toc_reg
== NULL
) {
700 status
= VXGE_HW_FAIL
;
704 val64
= readq(&hldev
->toc_reg
->toc_common_pointer
);
706 (struct vxge_hw_common_reg __iomem
*)(hldev
->bar0
+ val64
);
708 val64
= readq(&hldev
->toc_reg
->toc_mrpcim_pointer
);
710 (struct vxge_hw_mrpcim_reg __iomem
*)(hldev
->bar0
+ val64
);
712 for (i
= 0; i
< VXGE_HW_TITAN_SRPCIM_REG_SPACES
; i
++) {
713 val64
= readq(&hldev
->toc_reg
->toc_srpcim_pointer
[i
]);
714 hldev
->srpcim_reg
[i
] =
715 (struct vxge_hw_srpcim_reg __iomem
*)
716 (hldev
->bar0
+ val64
);
719 for (i
= 0; i
< VXGE_HW_TITAN_VPMGMT_REG_SPACES
; i
++) {
720 val64
= readq(&hldev
->toc_reg
->toc_vpmgmt_pointer
[i
]);
721 hldev
->vpmgmt_reg
[i
] =
722 (struct vxge_hw_vpmgmt_reg __iomem
*)(hldev
->bar0
+ val64
);
725 for (i
= 0; i
< VXGE_HW_TITAN_VPATH_REG_SPACES
; i
++) {
726 val64
= readq(&hldev
->toc_reg
->toc_vpath_pointer
[i
]);
727 hldev
->vpath_reg
[i
] =
728 (struct vxge_hw_vpath_reg __iomem
*)
729 (hldev
->bar0
+ val64
);
732 val64
= readq(&hldev
->toc_reg
->toc_kdfc
);
734 switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64
)) {
736 hldev
->kdfc
= (u8 __iomem
*)(hldev
->bar0
+
737 VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64
));
743 status
= __vxge_hw_device_vpath_reset_in_prog_check(
744 (u64 __iomem
*)&hldev
->common_reg
->vpath_rst_in_prog
);
750 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
751 * This routine returns the Access Rights of the driver
754 __vxge_hw_device_access_rights_get(u32 host_type
, u32 func_id
)
756 u32 access_rights
= VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH
;
759 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION
:
761 access_rights
|= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
|
762 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
;
765 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION
:
766 access_rights
|= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
|
767 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
;
769 case VXGE_HW_NO_MR_SR_VH0_FUNCTION0
:
770 access_rights
|= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
|
771 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
;
773 case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION
:
774 case VXGE_HW_SR_VH_VIRTUAL_FUNCTION
:
775 case VXGE_HW_MR_SR_VH0_INVALID_CONFIG
:
777 case VXGE_HW_SR_VH_FUNCTION0
:
778 case VXGE_HW_VH_NORMAL_FUNCTION
:
779 access_rights
|= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
;
783 return access_rights
;
786 * __vxge_hw_device_is_privilaged
787 * This routine checks if the device function is privilaged or not
791 __vxge_hw_device_is_privilaged(u32 host_type
, u32 func_id
)
793 if (__vxge_hw_device_access_rights_get(host_type
,
795 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
)
798 return VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
802 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
803 * Returns the function number of the vpath.
806 __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem
*vpmgmt_reg
)
810 val64
= readq(&vpmgmt_reg
->vpath_to_func_map_cfg1
);
813 (u32
)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64
);
817 * __vxge_hw_device_host_info_get
818 * This routine returns the host type assignments
820 static void __vxge_hw_device_host_info_get(struct __vxge_hw_device
*hldev
)
825 val64
= readq(&hldev
->common_reg
->host_type_assignments
);
828 (u32
)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64
);
830 hldev
->vpath_assignments
= readq(&hldev
->common_reg
->vpath_assignments
);
832 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
833 if (!(hldev
->vpath_assignments
& vxge_mBIT(i
)))
837 __vxge_hw_vpath_func_id_get(hldev
->vpmgmt_reg
[i
]);
839 hldev
->access_rights
= __vxge_hw_device_access_rights_get(
840 hldev
->host_type
, hldev
->func_id
);
842 hldev
->virtual_paths
[i
].vp_open
= VXGE_HW_VP_NOT_OPEN
;
843 hldev
->virtual_paths
[i
].vp_reg
= hldev
->vpath_reg
[i
];
845 hldev
->first_vp_id
= i
;
851 * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
852 * link width and signalling rate.
854 static enum vxge_hw_status
855 __vxge_hw_verify_pci_e_info(struct __vxge_hw_device
*hldev
)
860 /* Get the negotiated link width and speed from PCI config space */
861 exp_cap
= pci_find_capability(hldev
->pdev
, PCI_CAP_ID_EXP
);
862 pci_read_config_word(hldev
->pdev
, exp_cap
+ PCI_EXP_LNKSTA
, &lnk
);
864 if ((lnk
& PCI_EXP_LNKSTA_CLS
) != 1)
865 return VXGE_HW_ERR_INVALID_PCI_INFO
;
867 switch ((lnk
& PCI_EXP_LNKSTA_NLW
) >> 4) {
868 case PCIE_LNK_WIDTH_RESRV
:
875 return VXGE_HW_ERR_INVALID_PCI_INFO
;
882 * __vxge_hw_device_initialize
883 * Initialize Titan-V hardware.
885 static enum vxge_hw_status
886 __vxge_hw_device_initialize(struct __vxge_hw_device
*hldev
)
888 enum vxge_hw_status status
= VXGE_HW_OK
;
890 if (VXGE_HW_OK
== __vxge_hw_device_is_privilaged(hldev
->host_type
,
892 /* Validate the pci-e link width and speed */
893 status
= __vxge_hw_verify_pci_e_info(hldev
);
894 if (status
!= VXGE_HW_OK
)
903 * __vxge_hw_vpath_fw_ver_get - Get the fw version
906 static enum vxge_hw_status
907 __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath
*vpath
,
908 struct vxge_hw_device_hw_info
*hw_info
)
910 struct vxge_hw_device_version
*fw_version
= &hw_info
->fw_version
;
911 struct vxge_hw_device_date
*fw_date
= &hw_info
->fw_date
;
912 struct vxge_hw_device_version
*flash_version
= &hw_info
->flash_version
;
913 struct vxge_hw_device_date
*flash_date
= &hw_info
->flash_date
;
914 u64 data0
, data1
= 0, steer_ctrl
= 0;
915 enum vxge_hw_status status
;
917 status
= vxge_hw_vpath_fw_api(vpath
,
918 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY
,
919 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
,
920 0, &data0
, &data1
, &steer_ctrl
);
921 if (status
!= VXGE_HW_OK
)
925 (u32
) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0
);
927 (u32
) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0
);
929 (u32
) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0
);
931 snprintf(fw_date
->date
, VXGE_HW_FW_STRLEN
, "%2.2d/%2.2d/%4.4d",
932 fw_date
->month
, fw_date
->day
, fw_date
->year
);
935 (u32
) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0
);
937 (u32
) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0
);
939 (u32
) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0
);
941 snprintf(fw_version
->version
, VXGE_HW_FW_STRLEN
, "%d.%d.%d",
942 fw_version
->major
, fw_version
->minor
, fw_version
->build
);
945 (u32
) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1
);
947 (u32
) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1
);
949 (u32
) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1
);
951 snprintf(flash_date
->date
, VXGE_HW_FW_STRLEN
, "%2.2d/%2.2d/%4.4d",
952 flash_date
->month
, flash_date
->day
, flash_date
->year
);
954 flash_version
->major
=
955 (u32
) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1
);
956 flash_version
->minor
=
957 (u32
) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1
);
958 flash_version
->build
=
959 (u32
) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1
);
961 snprintf(flash_version
->version
, VXGE_HW_FW_STRLEN
, "%d.%d.%d",
962 flash_version
->major
, flash_version
->minor
,
963 flash_version
->build
);
970 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
971 * part number and product description.
973 static enum vxge_hw_status
974 __vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath
*vpath
,
975 struct vxge_hw_device_hw_info
*hw_info
)
977 enum vxge_hw_status status
;
978 u64 data0
, data1
= 0, steer_ctrl
= 0;
979 u8
*serial_number
= hw_info
->serial_number
;
980 u8
*part_number
= hw_info
->part_number
;
981 u8
*product_desc
= hw_info
->product_desc
;
984 data0
= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER
;
986 status
= vxge_hw_vpath_fw_api(vpath
,
987 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY
,
988 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
,
989 0, &data0
, &data1
, &steer_ctrl
);
990 if (status
!= VXGE_HW_OK
)
993 ((u64
*)serial_number
)[0] = be64_to_cpu(data0
);
994 ((u64
*)serial_number
)[1] = be64_to_cpu(data1
);
996 data0
= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER
;
997 data1
= steer_ctrl
= 0;
999 status
= vxge_hw_vpath_fw_api(vpath
,
1000 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY
,
1001 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
,
1002 0, &data0
, &data1
, &steer_ctrl
);
1003 if (status
!= VXGE_HW_OK
)
1006 ((u64
*)part_number
)[0] = be64_to_cpu(data0
);
1007 ((u64
*)part_number
)[1] = be64_to_cpu(data1
);
1009 for (i
= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0
;
1010 i
<= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3
; i
++) {
1012 data1
= steer_ctrl
= 0;
1014 status
= vxge_hw_vpath_fw_api(vpath
,
1015 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY
,
1016 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
,
1017 0, &data0
, &data1
, &steer_ctrl
);
1018 if (status
!= VXGE_HW_OK
)
1021 ((u64
*)product_desc
)[j
++] = be64_to_cpu(data0
);
1022 ((u64
*)product_desc
)[j
++] = be64_to_cpu(data1
);
1029 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
1030 * Returns pci function mode
1032 static enum vxge_hw_status
1033 __vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath
*vpath
,
1034 struct vxge_hw_device_hw_info
*hw_info
)
1036 u64 data0
, data1
= 0, steer_ctrl
= 0;
1037 enum vxge_hw_status status
;
1041 status
= vxge_hw_vpath_fw_api(vpath
,
1042 VXGE_HW_FW_API_GET_FUNC_MODE
,
1043 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
,
1044 0, &data0
, &data1
, &steer_ctrl
);
1045 if (status
!= VXGE_HW_OK
)
1048 hw_info
->function_mode
= VXGE_HW_GET_FUNC_MODE_VAL(data0
);
1053 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
1054 * from MAC address table.
1056 static enum vxge_hw_status
1057 __vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath
*vpath
,
1058 u8
*macaddr
, u8
*macaddr_mask
)
1060 u64 action
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY
,
1061 data0
= 0, data1
= 0, steer_ctrl
= 0;
1062 enum vxge_hw_status status
;
1066 status
= vxge_hw_vpath_fw_api(vpath
, action
,
1067 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
,
1068 0, &data0
, &data1
, &steer_ctrl
);
1069 if (status
!= VXGE_HW_OK
)
1072 data0
= VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0
);
1073 data1
= VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
1076 for (i
= ETH_ALEN
; i
> 0; i
--) {
1077 macaddr
[i
- 1] = (u8
) (data0
& 0xFF);
1080 macaddr_mask
[i
- 1] = (u8
) (data1
& 0xFF);
1084 action
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY
;
1085 data0
= 0, data1
= 0, steer_ctrl
= 0;
1087 } while (!is_valid_ether_addr(macaddr
));
1093 * vxge_hw_device_hw_info_get - Get the hw information
1094 * Returns the vpath mask that has the bits set for each vpath allocated
1095 * for the driver, FW version information and the first mac addresse for
1098 enum vxge_hw_status __devinit
1099 vxge_hw_device_hw_info_get(void __iomem
*bar0
,
1100 struct vxge_hw_device_hw_info
*hw_info
)
1104 struct vxge_hw_toc_reg __iomem
*toc
;
1105 struct vxge_hw_mrpcim_reg __iomem
*mrpcim_reg
;
1106 struct vxge_hw_common_reg __iomem
*common_reg
;
1107 struct vxge_hw_vpmgmt_reg __iomem
*vpmgmt_reg
;
1108 enum vxge_hw_status status
;
1109 struct __vxge_hw_virtualpath vpath
;
1111 memset(hw_info
, 0, sizeof(struct vxge_hw_device_hw_info
));
1113 toc
= __vxge_hw_device_toc_get(bar0
);
1115 status
= VXGE_HW_ERR_CRITICAL
;
1119 val64
= readq(&toc
->toc_common_pointer
);
1120 common_reg
= (struct vxge_hw_common_reg __iomem
*)(bar0
+ val64
);
1122 status
= __vxge_hw_device_vpath_reset_in_prog_check(
1123 (u64 __iomem
*)&common_reg
->vpath_rst_in_prog
);
1124 if (status
!= VXGE_HW_OK
)
1127 hw_info
->vpath_mask
= readq(&common_reg
->vpath_assignments
);
1129 val64
= readq(&common_reg
->host_type_assignments
);
1131 hw_info
->host_type
=
1132 (u32
)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64
);
1134 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
1136 if (!((hw_info
->vpath_mask
) & vxge_mBIT(i
)))
1139 val64
= readq(&toc
->toc_vpmgmt_pointer
[i
]);
1141 vpmgmt_reg
= (struct vxge_hw_vpmgmt_reg __iomem
*)
1144 hw_info
->func_id
= __vxge_hw_vpath_func_id_get(vpmgmt_reg
);
1145 if (__vxge_hw_device_access_rights_get(hw_info
->host_type
,
1147 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
) {
1149 val64
= readq(&toc
->toc_mrpcim_pointer
);
1151 mrpcim_reg
= (struct vxge_hw_mrpcim_reg __iomem
*)
1154 writeq(0, &mrpcim_reg
->xgmac_gen_fw_memo_mask
);
1158 val64
= readq(&toc
->toc_vpath_pointer
[i
]);
1160 vpath
.vp_reg
= (struct vxge_hw_vpath_reg __iomem
*)
1164 status
= __vxge_hw_vpath_pci_func_mode_get(&vpath
, hw_info
);
1165 if (status
!= VXGE_HW_OK
)
1168 status
= __vxge_hw_vpath_fw_ver_get(&vpath
, hw_info
);
1169 if (status
!= VXGE_HW_OK
)
1172 status
= __vxge_hw_vpath_card_info_get(&vpath
, hw_info
);
1173 if (status
!= VXGE_HW_OK
)
1179 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
1180 if (!((hw_info
->vpath_mask
) & vxge_mBIT(i
)))
1183 val64
= readq(&toc
->toc_vpath_pointer
[i
]);
1184 vpath
.vp_reg
= (struct vxge_hw_vpath_reg __iomem
*)
1188 status
= __vxge_hw_vpath_addr_get(&vpath
,
1189 hw_info
->mac_addrs
[i
],
1190 hw_info
->mac_addr_masks
[i
]);
1191 if (status
!= VXGE_HW_OK
)
1199 * vxge_hw_device_initialize - Initialize Titan device.
1200 * Initialize Titan device. Note that all the arguments of this public API
1201 * are 'IN', including @hldev. Driver cooperates with
1202 * OS to find new Titan device, locate its PCI and memory spaces.
1204 * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
1205 * to enable the latter to perform Titan hardware initialization.
1207 enum vxge_hw_status __devinit
1208 vxge_hw_device_initialize(
1209 struct __vxge_hw_device
**devh
,
1210 struct vxge_hw_device_attr
*attr
,
1211 struct vxge_hw_device_config
*device_config
)
1215 struct __vxge_hw_device
*hldev
= NULL
;
1216 enum vxge_hw_status status
= VXGE_HW_OK
;
1218 status
= __vxge_hw_device_config_check(device_config
);
1219 if (status
!= VXGE_HW_OK
)
1222 hldev
= (struct __vxge_hw_device
*)
1223 vzalloc(sizeof(struct __vxge_hw_device
));
1224 if (hldev
== NULL
) {
1225 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1229 hldev
->magic
= VXGE_HW_DEVICE_MAGIC
;
1231 vxge_hw_device_debug_set(hldev
, VXGE_ERR
, VXGE_COMPONENT_ALL
);
1234 memcpy(&hldev
->config
, device_config
,
1235 sizeof(struct vxge_hw_device_config
));
1237 hldev
->bar0
= attr
->bar0
;
1238 hldev
->pdev
= attr
->pdev
;
1240 hldev
->uld_callbacks
.link_up
= attr
->uld_callbacks
.link_up
;
1241 hldev
->uld_callbacks
.link_down
= attr
->uld_callbacks
.link_down
;
1242 hldev
->uld_callbacks
.crit_err
= attr
->uld_callbacks
.crit_err
;
1244 __vxge_hw_device_pci_e_init(hldev
);
1246 status
= __vxge_hw_device_reg_addr_get(hldev
);
1247 if (status
!= VXGE_HW_OK
) {
1252 __vxge_hw_device_host_info_get(hldev
);
1254 /* Incrementing for stats blocks */
1257 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
1258 if (!(hldev
->vpath_assignments
& vxge_mBIT(i
)))
1261 if (device_config
->vp_config
[i
].ring
.enable
==
1262 VXGE_HW_RING_ENABLE
)
1263 nblocks
+= device_config
->vp_config
[i
].ring
.ring_blocks
;
1265 if (device_config
->vp_config
[i
].fifo
.enable
==
1266 VXGE_HW_FIFO_ENABLE
)
1267 nblocks
+= device_config
->vp_config
[i
].fifo
.fifo_blocks
;
1271 if (__vxge_hw_blockpool_create(hldev
,
1273 device_config
->dma_blockpool_initial
+ nblocks
,
1274 device_config
->dma_blockpool_max
+ nblocks
) != VXGE_HW_OK
) {
1276 vxge_hw_device_terminate(hldev
);
1277 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1281 status
= __vxge_hw_device_initialize(hldev
);
1282 if (status
!= VXGE_HW_OK
) {
1283 vxge_hw_device_terminate(hldev
);
1293 * vxge_hw_device_terminate - Terminate Titan device.
1294 * Terminate HW device.
1297 vxge_hw_device_terminate(struct __vxge_hw_device
*hldev
)
1299 vxge_assert(hldev
->magic
== VXGE_HW_DEVICE_MAGIC
);
1301 hldev
->magic
= VXGE_HW_DEVICE_DEAD
;
1302 __vxge_hw_blockpool_destroy(&hldev
->block_pool
);
1307 * vxge_hw_device_stats_get - Get the device hw statistics.
1308 * Returns the vpath h/w stats for the device.
1311 vxge_hw_device_stats_get(struct __vxge_hw_device
*hldev
,
1312 struct vxge_hw_device_stats_hw_info
*hw_stats
)
1315 enum vxge_hw_status status
= VXGE_HW_OK
;
1317 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
1318 if (!(hldev
->vpaths_deployed
& vxge_mBIT(i
)) ||
1319 (hldev
->virtual_paths
[i
].vp_open
==
1320 VXGE_HW_VP_NOT_OPEN
))
1323 memcpy(hldev
->virtual_paths
[i
].hw_stats_sav
,
1324 hldev
->virtual_paths
[i
].hw_stats
,
1325 sizeof(struct vxge_hw_vpath_stats_hw_info
));
1327 status
= __vxge_hw_vpath_stats_get(
1328 &hldev
->virtual_paths
[i
],
1329 hldev
->virtual_paths
[i
].hw_stats
);
1332 memcpy(hw_stats
, &hldev
->stats
.hw_dev_info_stats
,
1333 sizeof(struct vxge_hw_device_stats_hw_info
));
1339 * vxge_hw_driver_stats_get - Get the device sw statistics.
1340 * Returns the vpath s/w stats for the device.
1342 enum vxge_hw_status
vxge_hw_driver_stats_get(
1343 struct __vxge_hw_device
*hldev
,
1344 struct vxge_hw_device_stats_sw_info
*sw_stats
)
1346 enum vxge_hw_status status
= VXGE_HW_OK
;
1348 memcpy(sw_stats
, &hldev
->stats
.sw_dev_info_stats
,
1349 sizeof(struct vxge_hw_device_stats_sw_info
));
1355 * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
1356 * and offset and perform an operation
1357 * Get the statistics from the given location and offset.
1360 vxge_hw_mrpcim_stats_access(struct __vxge_hw_device
*hldev
,
1361 u32 operation
, u32 location
, u32 offset
, u64
*stat
)
1364 enum vxge_hw_status status
= VXGE_HW_OK
;
1366 status
= __vxge_hw_device_is_privilaged(hldev
->host_type
,
1368 if (status
!= VXGE_HW_OK
)
1371 val64
= VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation
) |
1372 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE
|
1373 VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location
) |
1374 VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset
);
1376 status
= __vxge_hw_pio_mem_write64(val64
,
1377 &hldev
->mrpcim_reg
->xmac_stats_sys_cmd
,
1378 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE
,
1379 hldev
->config
.device_poll_millis
);
1381 if ((status
== VXGE_HW_OK
) && (operation
== VXGE_HW_STATS_OP_READ
))
1382 *stat
= readq(&hldev
->mrpcim_reg
->xmac_stats_sys_data
);
1390 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
1391 * Get the Statistics on aggregate port
1393 static enum vxge_hw_status
1394 vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device
*hldev
, u32 port
,
1395 struct vxge_hw_xmac_aggr_stats
*aggr_stats
)
1399 u32 offset
= VXGE_HW_STATS_AGGRn_OFFSET
;
1400 enum vxge_hw_status status
= VXGE_HW_OK
;
1402 val64
= (u64
*)aggr_stats
;
1404 status
= __vxge_hw_device_is_privilaged(hldev
->host_type
,
1406 if (status
!= VXGE_HW_OK
)
1409 for (i
= 0; i
< sizeof(struct vxge_hw_xmac_aggr_stats
) / 8; i
++) {
1410 status
= vxge_hw_mrpcim_stats_access(hldev
,
1411 VXGE_HW_STATS_OP_READ
,
1412 VXGE_HW_STATS_LOC_AGGR
,
1413 ((offset
+ (104 * port
)) >> 3), val64
);
1414 if (status
!= VXGE_HW_OK
)
1425 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
1426 * Get the Statistics on port
1428 static enum vxge_hw_status
1429 vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device
*hldev
, u32 port
,
1430 struct vxge_hw_xmac_port_stats
*port_stats
)
1433 enum vxge_hw_status status
= VXGE_HW_OK
;
1436 val64
= (u64
*) port_stats
;
1438 status
= __vxge_hw_device_is_privilaged(hldev
->host_type
,
1440 if (status
!= VXGE_HW_OK
)
1443 for (i
= 0; i
< sizeof(struct vxge_hw_xmac_port_stats
) / 8; i
++) {
1444 status
= vxge_hw_mrpcim_stats_access(hldev
,
1445 VXGE_HW_STATS_OP_READ
,
1446 VXGE_HW_STATS_LOC_AGGR
,
1447 ((offset
+ (608 * port
)) >> 3), val64
);
1448 if (status
!= VXGE_HW_OK
)
1460 * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
1461 * Get the XMAC Statistics
1464 vxge_hw_device_xmac_stats_get(struct __vxge_hw_device
*hldev
,
1465 struct vxge_hw_xmac_stats
*xmac_stats
)
1467 enum vxge_hw_status status
= VXGE_HW_OK
;
1470 status
= vxge_hw_device_xmac_aggr_stats_get(hldev
,
1471 0, &xmac_stats
->aggr_stats
[0]);
1473 if (status
!= VXGE_HW_OK
)
1476 status
= vxge_hw_device_xmac_aggr_stats_get(hldev
,
1477 1, &xmac_stats
->aggr_stats
[1]);
1478 if (status
!= VXGE_HW_OK
)
1481 for (i
= 0; i
<= VXGE_HW_MAC_MAX_MAC_PORT_ID
; i
++) {
1483 status
= vxge_hw_device_xmac_port_stats_get(hldev
,
1484 i
, &xmac_stats
->port_stats
[i
]);
1485 if (status
!= VXGE_HW_OK
)
1489 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
1491 if (!(hldev
->vpaths_deployed
& vxge_mBIT(i
)))
1494 status
= __vxge_hw_vpath_xmac_tx_stats_get(
1495 &hldev
->virtual_paths
[i
],
1496 &xmac_stats
->vpath_tx_stats
[i
]);
1497 if (status
!= VXGE_HW_OK
)
1500 status
= __vxge_hw_vpath_xmac_rx_stats_get(
1501 &hldev
->virtual_paths
[i
],
1502 &xmac_stats
->vpath_rx_stats
[i
]);
1503 if (status
!= VXGE_HW_OK
)
1511 * vxge_hw_device_debug_set - Set the debug module, level and timestamp
1512 * This routine is used to dynamically change the debug output
1514 void vxge_hw_device_debug_set(struct __vxge_hw_device
*hldev
,
1515 enum vxge_debug_level level
, u32 mask
)
1520 #if defined(VXGE_DEBUG_TRACE_MASK) || \
1521 defined(VXGE_DEBUG_ERR_MASK)
1522 hldev
->debug_module_mask
= mask
;
1523 hldev
->debug_level
= level
;
1526 #if defined(VXGE_DEBUG_ERR_MASK)
1527 hldev
->level_err
= level
& VXGE_ERR
;
1530 #if defined(VXGE_DEBUG_TRACE_MASK)
1531 hldev
->level_trace
= level
& VXGE_TRACE
;
1536 * vxge_hw_device_error_level_get - Get the error level
1537 * This routine returns the current error level set
1539 u32
vxge_hw_device_error_level_get(struct __vxge_hw_device
*hldev
)
1541 #if defined(VXGE_DEBUG_ERR_MASK)
1545 return hldev
->level_err
;
1552 * vxge_hw_device_trace_level_get - Get the trace level
1553 * This routine returns the current trace level set
1555 u32
vxge_hw_device_trace_level_get(struct __vxge_hw_device
*hldev
)
1557 #if defined(VXGE_DEBUG_TRACE_MASK)
1561 return hldev
->level_trace
;
1568 * vxge_hw_getpause_data -Pause frame frame generation and reception.
1569 * Returns the Pause frame generation and reception capability of the NIC.
1571 enum vxge_hw_status
vxge_hw_device_getpause_data(struct __vxge_hw_device
*hldev
,
1572 u32 port
, u32
*tx
, u32
*rx
)
1575 enum vxge_hw_status status
= VXGE_HW_OK
;
1577 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
)) {
1578 status
= VXGE_HW_ERR_INVALID_DEVICE
;
1582 if (port
> VXGE_HW_MAC_MAX_MAC_PORT_ID
) {
1583 status
= VXGE_HW_ERR_INVALID_PORT
;
1587 if (!(hldev
->access_rights
& VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
)) {
1588 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
1592 val64
= readq(&hldev
->mrpcim_reg
->rxmac_pause_cfg_port
[port
]);
1593 if (val64
& VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN
)
1595 if (val64
& VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN
)
1602 * vxge_hw_device_setpause_data - set/reset pause frame generation.
1603 * It can be used to set or reset Pause frame generation or reception
1604 * support of the NIC.
1606 enum vxge_hw_status
vxge_hw_device_setpause_data(struct __vxge_hw_device
*hldev
,
1607 u32 port
, u32 tx
, u32 rx
)
1610 enum vxge_hw_status status
= VXGE_HW_OK
;
1612 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
)) {
1613 status
= VXGE_HW_ERR_INVALID_DEVICE
;
1617 if (port
> VXGE_HW_MAC_MAX_MAC_PORT_ID
) {
1618 status
= VXGE_HW_ERR_INVALID_PORT
;
1622 status
= __vxge_hw_device_is_privilaged(hldev
->host_type
,
1624 if (status
!= VXGE_HW_OK
)
1627 val64
= readq(&hldev
->mrpcim_reg
->rxmac_pause_cfg_port
[port
]);
1629 val64
|= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN
;
1631 val64
&= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN
;
1633 val64
|= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN
;
1635 val64
&= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN
;
1637 writeq(val64
, &hldev
->mrpcim_reg
->rxmac_pause_cfg_port
[port
]);
1642 u16
vxge_hw_device_link_width_get(struct __vxge_hw_device
*hldev
)
1644 int link_width
, exp_cap
;
1647 exp_cap
= pci_find_capability(hldev
->pdev
, PCI_CAP_ID_EXP
);
1648 pci_read_config_word(hldev
->pdev
, exp_cap
+ PCI_EXP_LNKSTA
, &lnk
);
1649 link_width
= (lnk
& VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH
) >> 4;
1654 * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1655 * This function returns the index of memory block
1658 __vxge_hw_ring_block_memblock_idx(u8
*block
)
1660 return (u32
)*((u64
*)(block
+ VXGE_HW_RING_MEMBLOCK_IDX_OFFSET
));
1664 * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
1665 * This function sets index to a memory block
1668 __vxge_hw_ring_block_memblock_idx_set(u8
*block
, u32 memblock_idx
)
1670 *((u64
*)(block
+ VXGE_HW_RING_MEMBLOCK_IDX_OFFSET
)) = memblock_idx
;
1674 * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
1676 * Sets the next block pointer in RxD block
1679 __vxge_hw_ring_block_next_pointer_set(u8
*block
, dma_addr_t dma_next
)
1681 *((u64
*)(block
+ VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET
)) = dma_next
;
1685 * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
1687 * Returns the dma address of the first RxD block
1689 static u64
__vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring
*ring
)
1691 struct vxge_hw_mempool_dma
*dma_object
;
1693 dma_object
= ring
->mempool
->memblocks_dma_arr
;
1694 vxge_assert(dma_object
!= NULL
);
1696 return dma_object
->addr
;
1700 * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
1701 * This function returns the dma address of a given item
1703 static dma_addr_t
__vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool
*mempoolh
,
1708 struct vxge_hw_mempool_dma
*memblock_dma_object
;
1709 ptrdiff_t dma_item_offset
;
1711 /* get owner memblock index */
1712 memblock_idx
= __vxge_hw_ring_block_memblock_idx(item
);
1714 /* get owner memblock by memblock index */
1715 memblock
= mempoolh
->memblocks_arr
[memblock_idx
];
1717 /* get memblock DMA object by memblock index */
1718 memblock_dma_object
= mempoolh
->memblocks_dma_arr
+ memblock_idx
;
1720 /* calculate offset in the memblock of this item */
1721 dma_item_offset
= (u8
*)item
- (u8
*)memblock
;
1723 return memblock_dma_object
->addr
+ dma_item_offset
;
1727 * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
1728 * This function returns the dma address of a given item
1730 static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool
*mempoolh
,
1731 struct __vxge_hw_ring
*ring
, u32 from
,
1734 u8
*to_item
, *from_item
;
1737 /* get "from" RxD block */
1738 from_item
= mempoolh
->items_arr
[from
];
1739 vxge_assert(from_item
);
1741 /* get "to" RxD block */
1742 to_item
= mempoolh
->items_arr
[to
];
1743 vxge_assert(to_item
);
1745 /* return address of the beginning of previous RxD block */
1746 to_dma
= __vxge_hw_ring_item_dma_addr(mempoolh
, to_item
);
1748 /* set next pointer for this RxD block to point on
1749 * previous item's DMA start address */
1750 __vxge_hw_ring_block_next_pointer_set(from_item
, to_dma
);
1754 * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
1756 * This function is callback passed to __vxge_hw_mempool_create to create memory
1757 * pool for RxD block
1760 __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool
*mempoolh
,
1762 struct vxge_hw_mempool_dma
*dma_object
,
1763 u32 index
, u32 is_last
)
1766 void *item
= mempoolh
->items_arr
[index
];
1767 struct __vxge_hw_ring
*ring
=
1768 (struct __vxge_hw_ring
*)mempoolh
->userdata
;
1770 /* format rxds array */
1771 for (i
= 0; i
< ring
->rxds_per_block
; i
++) {
1772 void *rxdblock_priv
;
1774 struct vxge_hw_ring_rxd_1
*rxdp
;
1776 u32 reserve_index
= ring
->channel
.reserve_ptr
-
1777 (index
* ring
->rxds_per_block
+ i
+ 1);
1778 u32 memblock_item_idx
;
1780 ring
->channel
.reserve_arr
[reserve_index
] = ((u8
*)item
) +
1783 /* Note: memblock_item_idx is index of the item within
1784 * the memblock. For instance, in case of three RxD-blocks
1785 * per memblock this value can be 0, 1 or 2. */
1786 rxdblock_priv
= __vxge_hw_mempool_item_priv(mempoolh
,
1787 memblock_index
, item
,
1788 &memblock_item_idx
);
1790 rxdp
= (struct vxge_hw_ring_rxd_1
*)
1791 ring
->channel
.reserve_arr
[reserve_index
];
1793 uld_priv
= ((u8
*)rxdblock_priv
+ ring
->rxd_priv_size
* i
);
1795 /* pre-format Host_Control */
1796 rxdp
->host_control
= (u64
)(size_t)uld_priv
;
1799 __vxge_hw_ring_block_memblock_idx_set(item
, memblock_index
);
1802 /* link last one with first one */
1803 __vxge_hw_ring_rxdblock_link(mempoolh
, ring
, index
, 0);
1807 /* link this RxD block with previous one */
1808 __vxge_hw_ring_rxdblock_link(mempoolh
, ring
, index
- 1, index
);
1813 * __vxge_hw_ring_replenish - Initial replenish of RxDs
1814 * This function replenishes the RxDs from reserve array to work array
1817 vxge_hw_ring_replenish(struct __vxge_hw_ring
*ring
)
1820 struct __vxge_hw_channel
*channel
;
1821 enum vxge_hw_status status
= VXGE_HW_OK
;
1823 channel
= &ring
->channel
;
1825 while (vxge_hw_channel_dtr_count(channel
) > 0) {
1827 status
= vxge_hw_ring_rxd_reserve(ring
, &rxd
);
1829 vxge_assert(status
== VXGE_HW_OK
);
1831 if (ring
->rxd_init
) {
1832 status
= ring
->rxd_init(rxd
, channel
->userdata
);
1833 if (status
!= VXGE_HW_OK
) {
1834 vxge_hw_ring_rxd_free(ring
, rxd
);
1839 vxge_hw_ring_rxd_post(ring
, rxd
);
1841 status
= VXGE_HW_OK
;
1847 * __vxge_hw_ring_create - Create a Ring
1848 * This function creates Ring and initializes it.
1850 static enum vxge_hw_status
1851 __vxge_hw_ring_create(struct __vxge_hw_vpath_handle
*vp
,
1852 struct vxge_hw_ring_attr
*attr
)
1854 enum vxge_hw_status status
= VXGE_HW_OK
;
1855 struct __vxge_hw_ring
*ring
;
1857 struct vxge_hw_ring_config
*config
;
1858 struct __vxge_hw_device
*hldev
;
1860 struct vxge_hw_mempool_cbs ring_mp_callback
;
1862 if ((vp
== NULL
) || (attr
== NULL
)) {
1863 status
= VXGE_HW_FAIL
;
1867 hldev
= vp
->vpath
->hldev
;
1868 vp_id
= vp
->vpath
->vp_id
;
1870 config
= &hldev
->config
.vp_config
[vp_id
].ring
;
1872 ring_length
= config
->ring_blocks
*
1873 vxge_hw_ring_rxds_per_block_get(config
->buffer_mode
);
1875 ring
= (struct __vxge_hw_ring
*)__vxge_hw_channel_allocate(vp
,
1876 VXGE_HW_CHANNEL_TYPE_RING
,
1878 attr
->per_rxd_space
,
1882 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
1886 vp
->vpath
->ringh
= ring
;
1887 ring
->vp_id
= vp_id
;
1888 ring
->vp_reg
= vp
->vpath
->vp_reg
;
1889 ring
->common_reg
= hldev
->common_reg
;
1890 ring
->stats
= &vp
->vpath
->sw_stats
->ring_stats
;
1891 ring
->config
= config
;
1892 ring
->callback
= attr
->callback
;
1893 ring
->rxd_init
= attr
->rxd_init
;
1894 ring
->rxd_term
= attr
->rxd_term
;
1895 ring
->buffer_mode
= config
->buffer_mode
;
1896 ring
->rxds_limit
= config
->rxds_limit
;
1898 ring
->rxd_size
= vxge_hw_ring_rxd_size_get(config
->buffer_mode
);
1899 ring
->rxd_priv_size
=
1900 sizeof(struct __vxge_hw_ring_rxd_priv
) + attr
->per_rxd_space
;
1901 ring
->per_rxd_space
= attr
->per_rxd_space
;
1903 ring
->rxd_priv_size
=
1904 ((ring
->rxd_priv_size
+ VXGE_CACHE_LINE_SIZE
- 1) /
1905 VXGE_CACHE_LINE_SIZE
) * VXGE_CACHE_LINE_SIZE
;
1907 /* how many RxDs can fit into one block. Depends on configured
1909 ring
->rxds_per_block
=
1910 vxge_hw_ring_rxds_per_block_get(config
->buffer_mode
);
1912 /* calculate actual RxD block private size */
1913 ring
->rxdblock_priv_size
= ring
->rxd_priv_size
* ring
->rxds_per_block
;
1914 ring_mp_callback
.item_func_alloc
= __vxge_hw_ring_mempool_item_alloc
;
1915 ring
->mempool
= __vxge_hw_mempool_create(hldev
,
1918 ring
->rxdblock_priv_size
,
1919 ring
->config
->ring_blocks
,
1920 ring
->config
->ring_blocks
,
1924 if (ring
->mempool
== NULL
) {
1925 __vxge_hw_ring_delete(vp
);
1926 return VXGE_HW_ERR_OUT_OF_MEMORY
;
1929 status
= __vxge_hw_channel_initialize(&ring
->channel
);
1930 if (status
!= VXGE_HW_OK
) {
1931 __vxge_hw_ring_delete(vp
);
1936 * Specifying rxd_init callback means two things:
1937 * 1) rxds need to be initialized by driver at channel-open time;
1938 * 2) rxds need to be posted at channel-open time
1939 * (that's what the initial_replenish() below does)
1940 * Currently we don't have a case when the 1) is done without the 2).
1942 if (ring
->rxd_init
) {
1943 status
= vxge_hw_ring_replenish(ring
);
1944 if (status
!= VXGE_HW_OK
) {
1945 __vxge_hw_ring_delete(vp
);
1950 /* initial replenish will increment the counter in its post() routine,
1951 * we have to reset it */
1952 ring
->stats
->common_stats
.usage_cnt
= 0;
1958 * __vxge_hw_ring_abort - Returns the RxD
1959 * This function terminates the RxDs of ring
1961 static enum vxge_hw_status
__vxge_hw_ring_abort(struct __vxge_hw_ring
*ring
)
1964 struct __vxge_hw_channel
*channel
;
1966 channel
= &ring
->channel
;
1969 vxge_hw_channel_dtr_try_complete(channel
, &rxdh
);
1974 vxge_hw_channel_dtr_complete(channel
);
1977 ring
->rxd_term(rxdh
, VXGE_HW_RXD_STATE_POSTED
,
1980 vxge_hw_channel_dtr_free(channel
, rxdh
);
1987 * __vxge_hw_ring_reset - Resets the ring
1988 * This function resets the ring during vpath reset operation
1990 static enum vxge_hw_status
__vxge_hw_ring_reset(struct __vxge_hw_ring
*ring
)
1992 enum vxge_hw_status status
= VXGE_HW_OK
;
1993 struct __vxge_hw_channel
*channel
;
1995 channel
= &ring
->channel
;
1997 __vxge_hw_ring_abort(ring
);
1999 status
= __vxge_hw_channel_reset(channel
);
2001 if (status
!= VXGE_HW_OK
)
2004 if (ring
->rxd_init
) {
2005 status
= vxge_hw_ring_replenish(ring
);
2006 if (status
!= VXGE_HW_OK
)
2014 * __vxge_hw_ring_delete - Removes the ring
2015 * This function freeup the memory pool and removes the ring
2017 static enum vxge_hw_status
__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle
*vp
)
2019 struct __vxge_hw_ring
*ring
= vp
->vpath
->ringh
;
2021 __vxge_hw_ring_abort(ring
);
2024 __vxge_hw_mempool_destroy(ring
->mempool
);
2026 vp
->vpath
->ringh
= NULL
;
2027 __vxge_hw_channel_free(&ring
->channel
);
2033 * __vxge_hw_mempool_grow
2034 * Will resize mempool up to %num_allocate value.
2036 static enum vxge_hw_status
2037 __vxge_hw_mempool_grow(struct vxge_hw_mempool
*mempool
, u32 num_allocate
,
2040 u32 i
, first_time
= mempool
->memblocks_allocated
== 0 ? 1 : 0;
2041 u32 n_items
= mempool
->items_per_memblock
;
2042 u32 start_block_idx
= mempool
->memblocks_allocated
;
2043 u32 end_block_idx
= mempool
->memblocks_allocated
+ num_allocate
;
2044 enum vxge_hw_status status
= VXGE_HW_OK
;
2048 if (end_block_idx
> mempool
->memblocks_max
) {
2049 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
2053 for (i
= start_block_idx
; i
< end_block_idx
; i
++) {
2055 u32 is_last
= ((end_block_idx
- 1) == i
);
2056 struct vxge_hw_mempool_dma
*dma_object
=
2057 mempool
->memblocks_dma_arr
+ i
;
2060 /* allocate memblock's private part. Each DMA memblock
2061 * has a space allocated for item's private usage upon
2062 * mempool's user request. Each time mempool grows, it will
2063 * allocate new memblock and its private part at once.
2064 * This helps to minimize memory usage a lot. */
2065 mempool
->memblocks_priv_arr
[i
] =
2066 vzalloc(mempool
->items_priv_size
* n_items
);
2067 if (mempool
->memblocks_priv_arr
[i
] == NULL
) {
2068 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
2072 /* allocate DMA-capable memblock */
2073 mempool
->memblocks_arr
[i
] =
2074 __vxge_hw_blockpool_malloc(mempool
->devh
,
2075 mempool
->memblock_size
, dma_object
);
2076 if (mempool
->memblocks_arr
[i
] == NULL
) {
2077 vfree(mempool
->memblocks_priv_arr
[i
]);
2078 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
2083 mempool
->memblocks_allocated
++;
2085 memset(mempool
->memblocks_arr
[i
], 0, mempool
->memblock_size
);
2087 the_memblock
= mempool
->memblocks_arr
[i
];
2089 /* fill the items hash array */
2090 for (j
= 0; j
< n_items
; j
++) {
2091 u32 index
= i
* n_items
+ j
;
2093 if (first_time
&& index
>= mempool
->items_initial
)
2096 mempool
->items_arr
[index
] =
2097 ((char *)the_memblock
+ j
*mempool
->item_size
);
2099 /* let caller to do more job on each item */
2100 if (mempool
->item_func_alloc
!= NULL
)
2101 mempool
->item_func_alloc(mempool
, i
,
2102 dma_object
, index
, is_last
);
2104 mempool
->items_current
= index
+ 1;
2107 if (first_time
&& mempool
->items_current
==
2108 mempool
->items_initial
)
2116 * vxge_hw_mempool_create
2117 * This function will create memory pool object. Pool may grow but will
2118 * never shrink. Pool consists of number of dynamically allocated blocks
2119 * with size enough to hold %items_initial number of items. Memory is
2120 * DMA-able but client must map/unmap before interoperating with the device.
2122 static struct vxge_hw_mempool
*
2123 __vxge_hw_mempool_create(
2124 struct __vxge_hw_device
*devh
,
2127 u32 items_priv_size
,
2130 struct vxge_hw_mempool_cbs
*mp_callback
,
2133 enum vxge_hw_status status
= VXGE_HW_OK
;
2134 u32 memblocks_to_allocate
;
2135 struct vxge_hw_mempool
*mempool
= NULL
;
2138 if (memblock_size
< item_size
) {
2139 status
= VXGE_HW_FAIL
;
2143 mempool
= (struct vxge_hw_mempool
*)
2144 vzalloc(sizeof(struct vxge_hw_mempool
));
2145 if (mempool
== NULL
) {
2146 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
2150 mempool
->devh
= devh
;
2151 mempool
->memblock_size
= memblock_size
;
2152 mempool
->items_max
= items_max
;
2153 mempool
->items_initial
= items_initial
;
2154 mempool
->item_size
= item_size
;
2155 mempool
->items_priv_size
= items_priv_size
;
2156 mempool
->item_func_alloc
= mp_callback
->item_func_alloc
;
2157 mempool
->userdata
= userdata
;
2159 mempool
->memblocks_allocated
= 0;
2161 mempool
->items_per_memblock
= memblock_size
/ item_size
;
2163 mempool
->memblocks_max
= (items_max
+ mempool
->items_per_memblock
- 1) /
2164 mempool
->items_per_memblock
;
2166 /* allocate array of memblocks */
2167 mempool
->memblocks_arr
=
2168 (void **) vzalloc(sizeof(void *) * mempool
->memblocks_max
);
2169 if (mempool
->memblocks_arr
== NULL
) {
2170 __vxge_hw_mempool_destroy(mempool
);
2171 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
2176 /* allocate array of private parts of items per memblocks */
2177 mempool
->memblocks_priv_arr
=
2178 (void **) vzalloc(sizeof(void *) * mempool
->memblocks_max
);
2179 if (mempool
->memblocks_priv_arr
== NULL
) {
2180 __vxge_hw_mempool_destroy(mempool
);
2181 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
2186 /* allocate array of memblocks DMA objects */
2187 mempool
->memblocks_dma_arr
= (struct vxge_hw_mempool_dma
*)
2188 vzalloc(sizeof(struct vxge_hw_mempool_dma
) *
2189 mempool
->memblocks_max
);
2191 if (mempool
->memblocks_dma_arr
== NULL
) {
2192 __vxge_hw_mempool_destroy(mempool
);
2193 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
2198 /* allocate hash array of items */
2199 mempool
->items_arr
=
2200 (void **) vzalloc(sizeof(void *) * mempool
->items_max
);
2201 if (mempool
->items_arr
== NULL
) {
2202 __vxge_hw_mempool_destroy(mempool
);
2203 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
2208 /* calculate initial number of memblocks */
2209 memblocks_to_allocate
= (mempool
->items_initial
+
2210 mempool
->items_per_memblock
- 1) /
2211 mempool
->items_per_memblock
;
2213 /* pre-allocate the mempool */
2214 status
= __vxge_hw_mempool_grow(mempool
, memblocks_to_allocate
,
2216 if (status
!= VXGE_HW_OK
) {
2217 __vxge_hw_mempool_destroy(mempool
);
2218 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
2228 * vxge_hw_mempool_destroy
2230 static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool
*mempool
)
2233 struct __vxge_hw_device
*devh
= mempool
->devh
;
2235 for (i
= 0; i
< mempool
->memblocks_allocated
; i
++) {
2236 struct vxge_hw_mempool_dma
*dma_object
;
2238 vxge_assert(mempool
->memblocks_arr
[i
]);
2239 vxge_assert(mempool
->memblocks_dma_arr
+ i
);
2241 dma_object
= mempool
->memblocks_dma_arr
+ i
;
2243 for (j
= 0; j
< mempool
->items_per_memblock
; j
++) {
2244 u32 index
= i
* mempool
->items_per_memblock
+ j
;
2246 /* to skip last partially filled(if any) memblock */
2247 if (index
>= mempool
->items_current
)
2251 vfree(mempool
->memblocks_priv_arr
[i
]);
2253 __vxge_hw_blockpool_free(devh
, mempool
->memblocks_arr
[i
],
2254 mempool
->memblock_size
, dma_object
);
2257 vfree(mempool
->items_arr
);
2259 vfree(mempool
->memblocks_dma_arr
);
2261 vfree(mempool
->memblocks_priv_arr
);
2263 vfree(mempool
->memblocks_arr
);
2269 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
2270 * Check the fifo configuration
2272 static enum vxge_hw_status
2273 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config
*fifo_config
)
2275 if ((fifo_config
->fifo_blocks
< VXGE_HW_MIN_FIFO_BLOCKS
) ||
2276 (fifo_config
->fifo_blocks
> VXGE_HW_MAX_FIFO_BLOCKS
))
2277 return VXGE_HW_BADCFG_FIFO_BLOCKS
;
2283 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
2284 * Check the vpath configuration
2286 static enum vxge_hw_status
2287 __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config
*vp_config
)
2289 enum vxge_hw_status status
;
2291 if ((vp_config
->min_bandwidth
< VXGE_HW_VPATH_BANDWIDTH_MIN
) ||
2292 (vp_config
->min_bandwidth
>
2293 VXGE_HW_VPATH_BANDWIDTH_MAX
))
2294 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH
;
2296 status
= __vxge_hw_device_fifo_config_check(&vp_config
->fifo
);
2297 if (status
!= VXGE_HW_OK
)
2300 if ((vp_config
->mtu
!= VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU
) &&
2301 ((vp_config
->mtu
< VXGE_HW_VPATH_MIN_INITIAL_MTU
) ||
2302 (vp_config
->mtu
> VXGE_HW_VPATH_MAX_INITIAL_MTU
)))
2303 return VXGE_HW_BADCFG_VPATH_MTU
;
2305 if ((vp_config
->rpa_strip_vlan_tag
!=
2306 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT
) &&
2307 (vp_config
->rpa_strip_vlan_tag
!=
2308 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE
) &&
2309 (vp_config
->rpa_strip_vlan_tag
!=
2310 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE
))
2311 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG
;
2317 * __vxge_hw_device_config_check - Check device configuration.
2318 * Check the device configuration
2320 static enum vxge_hw_status
2321 __vxge_hw_device_config_check(struct vxge_hw_device_config
*new_config
)
2324 enum vxge_hw_status status
;
2326 if ((new_config
->intr_mode
!= VXGE_HW_INTR_MODE_IRQLINE
) &&
2327 (new_config
->intr_mode
!= VXGE_HW_INTR_MODE_MSIX
) &&
2328 (new_config
->intr_mode
!= VXGE_HW_INTR_MODE_MSIX_ONE_SHOT
) &&
2329 (new_config
->intr_mode
!= VXGE_HW_INTR_MODE_DEF
))
2330 return VXGE_HW_BADCFG_INTR_MODE
;
2332 if ((new_config
->rts_mac_en
!= VXGE_HW_RTS_MAC_DISABLE
) &&
2333 (new_config
->rts_mac_en
!= VXGE_HW_RTS_MAC_ENABLE
))
2334 return VXGE_HW_BADCFG_RTS_MAC_EN
;
2336 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
2337 status
= __vxge_hw_device_vpath_config_check(
2338 &new_config
->vp_config
[i
]);
2339 if (status
!= VXGE_HW_OK
)
2347 * vxge_hw_device_config_default_get - Initialize device config with defaults.
2348 * Initialize Titan device config with default values.
2350 enum vxge_hw_status __devinit
2351 vxge_hw_device_config_default_get(struct vxge_hw_device_config
*device_config
)
2355 device_config
->dma_blockpool_initial
=
2356 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE
;
2357 device_config
->dma_blockpool_max
= VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE
;
2358 device_config
->intr_mode
= VXGE_HW_INTR_MODE_DEF
;
2359 device_config
->rth_en
= VXGE_HW_RTH_DEFAULT
;
2360 device_config
->rth_it_type
= VXGE_HW_RTH_IT_TYPE_DEFAULT
;
2361 device_config
->device_poll_millis
= VXGE_HW_DEF_DEVICE_POLL_MILLIS
;
2362 device_config
->rts_mac_en
= VXGE_HW_RTS_MAC_DEFAULT
;
2364 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
2366 device_config
->vp_config
[i
].vp_id
= i
;
2368 device_config
->vp_config
[i
].min_bandwidth
=
2369 VXGE_HW_VPATH_BANDWIDTH_DEFAULT
;
2371 device_config
->vp_config
[i
].ring
.enable
= VXGE_HW_RING_DEFAULT
;
2373 device_config
->vp_config
[i
].ring
.ring_blocks
=
2374 VXGE_HW_DEF_RING_BLOCKS
;
2376 device_config
->vp_config
[i
].ring
.buffer_mode
=
2377 VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT
;
2379 device_config
->vp_config
[i
].ring
.scatter_mode
=
2380 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT
;
2382 device_config
->vp_config
[i
].ring
.rxds_limit
=
2383 VXGE_HW_DEF_RING_RXDS_LIMIT
;
2385 device_config
->vp_config
[i
].fifo
.enable
= VXGE_HW_FIFO_ENABLE
;
2387 device_config
->vp_config
[i
].fifo
.fifo_blocks
=
2388 VXGE_HW_MIN_FIFO_BLOCKS
;
2390 device_config
->vp_config
[i
].fifo
.max_frags
=
2391 VXGE_HW_MAX_FIFO_FRAGS
;
2393 device_config
->vp_config
[i
].fifo
.memblock_size
=
2394 VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE
;
2396 device_config
->vp_config
[i
].fifo
.alignment_size
=
2397 VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE
;
2399 device_config
->vp_config
[i
].fifo
.intr
=
2400 VXGE_HW_FIFO_QUEUE_INTR_DEFAULT
;
2402 device_config
->vp_config
[i
].fifo
.no_snoop_bits
=
2403 VXGE_HW_FIFO_NO_SNOOP_DEFAULT
;
2404 device_config
->vp_config
[i
].tti
.intr_enable
=
2405 VXGE_HW_TIM_INTR_DEFAULT
;
2407 device_config
->vp_config
[i
].tti
.btimer_val
=
2408 VXGE_HW_USE_FLASH_DEFAULT
;
2410 device_config
->vp_config
[i
].tti
.timer_ac_en
=
2411 VXGE_HW_USE_FLASH_DEFAULT
;
2413 device_config
->vp_config
[i
].tti
.timer_ci_en
=
2414 VXGE_HW_USE_FLASH_DEFAULT
;
2416 device_config
->vp_config
[i
].tti
.timer_ri_en
=
2417 VXGE_HW_USE_FLASH_DEFAULT
;
2419 device_config
->vp_config
[i
].tti
.rtimer_val
=
2420 VXGE_HW_USE_FLASH_DEFAULT
;
2422 device_config
->vp_config
[i
].tti
.util_sel
=
2423 VXGE_HW_USE_FLASH_DEFAULT
;
2425 device_config
->vp_config
[i
].tti
.ltimer_val
=
2426 VXGE_HW_USE_FLASH_DEFAULT
;
2428 device_config
->vp_config
[i
].tti
.urange_a
=
2429 VXGE_HW_USE_FLASH_DEFAULT
;
2431 device_config
->vp_config
[i
].tti
.uec_a
=
2432 VXGE_HW_USE_FLASH_DEFAULT
;
2434 device_config
->vp_config
[i
].tti
.urange_b
=
2435 VXGE_HW_USE_FLASH_DEFAULT
;
2437 device_config
->vp_config
[i
].tti
.uec_b
=
2438 VXGE_HW_USE_FLASH_DEFAULT
;
2440 device_config
->vp_config
[i
].tti
.urange_c
=
2441 VXGE_HW_USE_FLASH_DEFAULT
;
2443 device_config
->vp_config
[i
].tti
.uec_c
=
2444 VXGE_HW_USE_FLASH_DEFAULT
;
2446 device_config
->vp_config
[i
].tti
.uec_d
=
2447 VXGE_HW_USE_FLASH_DEFAULT
;
2449 device_config
->vp_config
[i
].rti
.intr_enable
=
2450 VXGE_HW_TIM_INTR_DEFAULT
;
2452 device_config
->vp_config
[i
].rti
.btimer_val
=
2453 VXGE_HW_USE_FLASH_DEFAULT
;
2455 device_config
->vp_config
[i
].rti
.timer_ac_en
=
2456 VXGE_HW_USE_FLASH_DEFAULT
;
2458 device_config
->vp_config
[i
].rti
.timer_ci_en
=
2459 VXGE_HW_USE_FLASH_DEFAULT
;
2461 device_config
->vp_config
[i
].rti
.timer_ri_en
=
2462 VXGE_HW_USE_FLASH_DEFAULT
;
2464 device_config
->vp_config
[i
].rti
.rtimer_val
=
2465 VXGE_HW_USE_FLASH_DEFAULT
;
2467 device_config
->vp_config
[i
].rti
.util_sel
=
2468 VXGE_HW_USE_FLASH_DEFAULT
;
2470 device_config
->vp_config
[i
].rti
.ltimer_val
=
2471 VXGE_HW_USE_FLASH_DEFAULT
;
2473 device_config
->vp_config
[i
].rti
.urange_a
=
2474 VXGE_HW_USE_FLASH_DEFAULT
;
2476 device_config
->vp_config
[i
].rti
.uec_a
=
2477 VXGE_HW_USE_FLASH_DEFAULT
;
2479 device_config
->vp_config
[i
].rti
.urange_b
=
2480 VXGE_HW_USE_FLASH_DEFAULT
;
2482 device_config
->vp_config
[i
].rti
.uec_b
=
2483 VXGE_HW_USE_FLASH_DEFAULT
;
2485 device_config
->vp_config
[i
].rti
.urange_c
=
2486 VXGE_HW_USE_FLASH_DEFAULT
;
2488 device_config
->vp_config
[i
].rti
.uec_c
=
2489 VXGE_HW_USE_FLASH_DEFAULT
;
2491 device_config
->vp_config
[i
].rti
.uec_d
=
2492 VXGE_HW_USE_FLASH_DEFAULT
;
2494 device_config
->vp_config
[i
].mtu
=
2495 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU
;
2497 device_config
->vp_config
[i
].rpa_strip_vlan_tag
=
2498 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT
;
2505 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
2506 * Set the swapper bits appropriately for the lagacy section.
2508 static enum vxge_hw_status
2509 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem
*legacy_reg
)
2512 enum vxge_hw_status status
= VXGE_HW_OK
;
2514 val64
= readq(&legacy_reg
->toc_swapper_fb
);
2520 case VXGE_HW_SWAPPER_INITIAL_VALUE
:
2523 case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED
:
2524 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE
,
2525 &legacy_reg
->pifm_rd_swap_en
);
2526 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE
,
2527 &legacy_reg
->pifm_rd_flip_en
);
2528 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE
,
2529 &legacy_reg
->pifm_wr_swap_en
);
2530 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE
,
2531 &legacy_reg
->pifm_wr_flip_en
);
2534 case VXGE_HW_SWAPPER_BYTE_SWAPPED
:
2535 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE
,
2536 &legacy_reg
->pifm_rd_swap_en
);
2537 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE
,
2538 &legacy_reg
->pifm_wr_swap_en
);
2541 case VXGE_HW_SWAPPER_BIT_FLIPPED
:
2542 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE
,
2543 &legacy_reg
->pifm_rd_flip_en
);
2544 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE
,
2545 &legacy_reg
->pifm_wr_flip_en
);
2551 val64
= readq(&legacy_reg
->toc_swapper_fb
);
2553 if (val64
!= VXGE_HW_SWAPPER_INITIAL_VALUE
)
2554 status
= VXGE_HW_ERR_SWAPPER_CTRL
;
2560 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
2561 * Set the swapper bits appropriately for the vpath.
2563 static enum vxge_hw_status
2564 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem
*vpath_reg
)
2566 #ifndef __BIG_ENDIAN
2569 val64
= readq(&vpath_reg
->vpath_general_cfg1
);
2571 val64
|= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN
;
2572 writeq(val64
, &vpath_reg
->vpath_general_cfg1
);
2579 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
2580 * Set the swapper bits appropriately for the vpath.
2582 static enum vxge_hw_status
2583 __vxge_hw_kdfc_swapper_set(
2584 struct vxge_hw_legacy_reg __iomem
*legacy_reg
,
2585 struct vxge_hw_vpath_reg __iomem
*vpath_reg
)
2589 val64
= readq(&legacy_reg
->pifm_wr_swap_en
);
2591 if (val64
== VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE
) {
2592 val64
= readq(&vpath_reg
->kdfcctl_cfg0
);
2595 val64
|= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0
|
2596 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1
|
2597 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2
;
2599 writeq(val64
, &vpath_reg
->kdfcctl_cfg0
);
2607 * vxge_hw_mgmt_reg_read - Read Titan register.
2610 vxge_hw_mgmt_reg_read(struct __vxge_hw_device
*hldev
,
2611 enum vxge_hw_mgmt_reg_type type
,
2612 u32 index
, u32 offset
, u64
*value
)
2614 enum vxge_hw_status status
= VXGE_HW_OK
;
2616 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
)) {
2617 status
= VXGE_HW_ERR_INVALID_DEVICE
;
2622 case vxge_hw_mgmt_reg_type_legacy
:
2623 if (offset
> sizeof(struct vxge_hw_legacy_reg
) - 8) {
2624 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2627 *value
= readq((void __iomem
*)hldev
->legacy_reg
+ offset
);
2629 case vxge_hw_mgmt_reg_type_toc
:
2630 if (offset
> sizeof(struct vxge_hw_toc_reg
) - 8) {
2631 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2634 *value
= readq((void __iomem
*)hldev
->toc_reg
+ offset
);
2636 case vxge_hw_mgmt_reg_type_common
:
2637 if (offset
> sizeof(struct vxge_hw_common_reg
) - 8) {
2638 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2641 *value
= readq((void __iomem
*)hldev
->common_reg
+ offset
);
2643 case vxge_hw_mgmt_reg_type_mrpcim
:
2644 if (!(hldev
->access_rights
&
2645 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
)) {
2646 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
2649 if (offset
> sizeof(struct vxge_hw_mrpcim_reg
) - 8) {
2650 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2653 *value
= readq((void __iomem
*)hldev
->mrpcim_reg
+ offset
);
2655 case vxge_hw_mgmt_reg_type_srpcim
:
2656 if (!(hldev
->access_rights
&
2657 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
)) {
2658 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
2661 if (index
> VXGE_HW_TITAN_SRPCIM_REG_SPACES
- 1) {
2662 status
= VXGE_HW_ERR_INVALID_INDEX
;
2665 if (offset
> sizeof(struct vxge_hw_srpcim_reg
) - 8) {
2666 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2669 *value
= readq((void __iomem
*)hldev
->srpcim_reg
[index
] +
2672 case vxge_hw_mgmt_reg_type_vpmgmt
:
2673 if ((index
> VXGE_HW_TITAN_VPMGMT_REG_SPACES
- 1) ||
2674 (!(hldev
->vpath_assignments
& vxge_mBIT(index
)))) {
2675 status
= VXGE_HW_ERR_INVALID_INDEX
;
2678 if (offset
> sizeof(struct vxge_hw_vpmgmt_reg
) - 8) {
2679 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2682 *value
= readq((void __iomem
*)hldev
->vpmgmt_reg
[index
] +
2685 case vxge_hw_mgmt_reg_type_vpath
:
2686 if ((index
> VXGE_HW_TITAN_VPATH_REG_SPACES
- 1) ||
2687 (!(hldev
->vpath_assignments
& vxge_mBIT(index
)))) {
2688 status
= VXGE_HW_ERR_INVALID_INDEX
;
2691 if (index
> VXGE_HW_TITAN_VPATH_REG_SPACES
- 1) {
2692 status
= VXGE_HW_ERR_INVALID_INDEX
;
2695 if (offset
> sizeof(struct vxge_hw_vpath_reg
) - 8) {
2696 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2699 *value
= readq((void __iomem
*)hldev
->vpath_reg
[index
] +
2703 status
= VXGE_HW_ERR_INVALID_TYPE
;
2712 * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
2715 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device
*hldev
, u64 vpath_mask
)
2717 struct vxge_hw_vpmgmt_reg __iomem
*vpmgmt_reg
;
2718 enum vxge_hw_status status
= VXGE_HW_OK
;
2721 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
2722 if (!((vpath_mask
) & vxge_mBIT(i
)))
2724 vpmgmt_reg
= hldev
->vpmgmt_reg
[i
];
2725 for (j
= 0; j
< VXGE_HW_MAC_MAX_MAC_PORT_ID
; j
++) {
2726 if (readq(&vpmgmt_reg
->rxmac_cfg0_port_vpmgmt_clone
[j
])
2727 & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS
)
2728 return VXGE_HW_FAIL
;
2734 * vxge_hw_mgmt_reg_Write - Write Titan register.
2737 vxge_hw_mgmt_reg_write(struct __vxge_hw_device
*hldev
,
2738 enum vxge_hw_mgmt_reg_type type
,
2739 u32 index
, u32 offset
, u64 value
)
2741 enum vxge_hw_status status
= VXGE_HW_OK
;
2743 if ((hldev
== NULL
) || (hldev
->magic
!= VXGE_HW_DEVICE_MAGIC
)) {
2744 status
= VXGE_HW_ERR_INVALID_DEVICE
;
2749 case vxge_hw_mgmt_reg_type_legacy
:
2750 if (offset
> sizeof(struct vxge_hw_legacy_reg
) - 8) {
2751 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2754 writeq(value
, (void __iomem
*)hldev
->legacy_reg
+ offset
);
2756 case vxge_hw_mgmt_reg_type_toc
:
2757 if (offset
> sizeof(struct vxge_hw_toc_reg
) - 8) {
2758 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2761 writeq(value
, (void __iomem
*)hldev
->toc_reg
+ offset
);
2763 case vxge_hw_mgmt_reg_type_common
:
2764 if (offset
> sizeof(struct vxge_hw_common_reg
) - 8) {
2765 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2768 writeq(value
, (void __iomem
*)hldev
->common_reg
+ offset
);
2770 case vxge_hw_mgmt_reg_type_mrpcim
:
2771 if (!(hldev
->access_rights
&
2772 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
)) {
2773 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
2776 if (offset
> sizeof(struct vxge_hw_mrpcim_reg
) - 8) {
2777 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2780 writeq(value
, (void __iomem
*)hldev
->mrpcim_reg
+ offset
);
2782 case vxge_hw_mgmt_reg_type_srpcim
:
2783 if (!(hldev
->access_rights
&
2784 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM
)) {
2785 status
= VXGE_HW_ERR_PRIVILAGED_OPEARATION
;
2788 if (index
> VXGE_HW_TITAN_SRPCIM_REG_SPACES
- 1) {
2789 status
= VXGE_HW_ERR_INVALID_INDEX
;
2792 if (offset
> sizeof(struct vxge_hw_srpcim_reg
) - 8) {
2793 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2796 writeq(value
, (void __iomem
*)hldev
->srpcim_reg
[index
] +
2800 case vxge_hw_mgmt_reg_type_vpmgmt
:
2801 if ((index
> VXGE_HW_TITAN_VPMGMT_REG_SPACES
- 1) ||
2802 (!(hldev
->vpath_assignments
& vxge_mBIT(index
)))) {
2803 status
= VXGE_HW_ERR_INVALID_INDEX
;
2806 if (offset
> sizeof(struct vxge_hw_vpmgmt_reg
) - 8) {
2807 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2810 writeq(value
, (void __iomem
*)hldev
->vpmgmt_reg
[index
] +
2813 case vxge_hw_mgmt_reg_type_vpath
:
2814 if ((index
> VXGE_HW_TITAN_VPATH_REG_SPACES
-1) ||
2815 (!(hldev
->vpath_assignments
& vxge_mBIT(index
)))) {
2816 status
= VXGE_HW_ERR_INVALID_INDEX
;
2819 if (offset
> sizeof(struct vxge_hw_vpath_reg
) - 8) {
2820 status
= VXGE_HW_ERR_INVALID_OFFSET
;
2823 writeq(value
, (void __iomem
*)hldev
->vpath_reg
[index
] +
2827 status
= VXGE_HW_ERR_INVALID_TYPE
;
2835 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2837 * This function is callback passed to __vxge_hw_mempool_create to create memory
2841 __vxge_hw_fifo_mempool_item_alloc(
2842 struct vxge_hw_mempool
*mempoolh
,
2843 u32 memblock_index
, struct vxge_hw_mempool_dma
*dma_object
,
2844 u32 index
, u32 is_last
)
2846 u32 memblock_item_idx
;
2847 struct __vxge_hw_fifo_txdl_priv
*txdl_priv
;
2848 struct vxge_hw_fifo_txd
*txdp
=
2849 (struct vxge_hw_fifo_txd
*)mempoolh
->items_arr
[index
];
2850 struct __vxge_hw_fifo
*fifo
=
2851 (struct __vxge_hw_fifo
*)mempoolh
->userdata
;
2852 void *memblock
= mempoolh
->memblocks_arr
[memblock_index
];
2856 txdp
->host_control
= (u64
) (size_t)
2857 __vxge_hw_mempool_item_priv(mempoolh
, memblock_index
, txdp
,
2858 &memblock_item_idx
);
2860 txdl_priv
= __vxge_hw_fifo_txdl_priv(fifo
, txdp
);
2862 vxge_assert(txdl_priv
);
2864 fifo
->channel
.reserve_arr
[fifo
->channel
.reserve_ptr
- 1 - index
] = txdp
;
2866 /* pre-format HW's TxDL's private */
2867 txdl_priv
->dma_offset
= (char *)txdp
- (char *)memblock
;
2868 txdl_priv
->dma_addr
= dma_object
->addr
+ txdl_priv
->dma_offset
;
2869 txdl_priv
->dma_handle
= dma_object
->handle
;
2870 txdl_priv
->memblock
= memblock
;
2871 txdl_priv
->first_txdp
= txdp
;
2872 txdl_priv
->next_txdl_priv
= NULL
;
2873 txdl_priv
->alloc_frags
= 0;
2877 * __vxge_hw_fifo_create - Create a FIFO
2878 * This function creates FIFO and initializes it.
2880 static enum vxge_hw_status
2881 __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle
*vp
,
2882 struct vxge_hw_fifo_attr
*attr
)
2884 enum vxge_hw_status status
= VXGE_HW_OK
;
2885 struct __vxge_hw_fifo
*fifo
;
2886 struct vxge_hw_fifo_config
*config
;
2887 u32 txdl_size
, txdl_per_memblock
;
2888 struct vxge_hw_mempool_cbs fifo_mp_callback
;
2889 struct __vxge_hw_virtualpath
*vpath
;
2891 if ((vp
== NULL
) || (attr
== NULL
)) {
2892 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2896 config
= &vpath
->hldev
->config
.vp_config
[vpath
->vp_id
].fifo
;
2898 txdl_size
= config
->max_frags
* sizeof(struct vxge_hw_fifo_txd
);
2900 txdl_per_memblock
= config
->memblock_size
/ txdl_size
;
2902 fifo
= (struct __vxge_hw_fifo
*)__vxge_hw_channel_allocate(vp
,
2903 VXGE_HW_CHANNEL_TYPE_FIFO
,
2904 config
->fifo_blocks
* txdl_per_memblock
,
2905 attr
->per_txdl_space
, attr
->userdata
);
2908 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
2912 vpath
->fifoh
= fifo
;
2913 fifo
->nofl_db
= vpath
->nofl_db
;
2915 fifo
->vp_id
= vpath
->vp_id
;
2916 fifo
->vp_reg
= vpath
->vp_reg
;
2917 fifo
->stats
= &vpath
->sw_stats
->fifo_stats
;
2919 fifo
->config
= config
;
2921 /* apply "interrupts per txdl" attribute */
2922 fifo
->interrupt_type
= VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ
;
2924 if (fifo
->config
->intr
)
2925 fifo
->interrupt_type
= VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST
;
2927 fifo
->no_snoop_bits
= config
->no_snoop_bits
;
2930 * FIFO memory management strategy:
2932 * TxDL split into three independent parts:
2934 * - TxD HW private part
2935 * - driver private part
2937 * Adaptative memory allocation used. i.e. Memory allocated on
2938 * demand with the size which will fit into one memory block.
2939 * One memory block may contain more than one TxDL.
2941 * During "reserve" operations more memory can be allocated on demand
2942 * for example due to FIFO full condition.
2944 * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
2945 * routine which will essentially stop the channel and free resources.
2948 /* TxDL common private size == TxDL private + driver private */
2950 sizeof(struct __vxge_hw_fifo_txdl_priv
) + attr
->per_txdl_space
;
2951 fifo
->priv_size
= ((fifo
->priv_size
+ VXGE_CACHE_LINE_SIZE
- 1) /
2952 VXGE_CACHE_LINE_SIZE
) * VXGE_CACHE_LINE_SIZE
;
2954 fifo
->per_txdl_space
= attr
->per_txdl_space
;
2956 /* recompute txdl size to be cacheline aligned */
2957 fifo
->txdl_size
= txdl_size
;
2958 fifo
->txdl_per_memblock
= txdl_per_memblock
;
2960 fifo
->txdl_term
= attr
->txdl_term
;
2961 fifo
->callback
= attr
->callback
;
2963 if (fifo
->txdl_per_memblock
== 0) {
2964 __vxge_hw_fifo_delete(vp
);
2965 status
= VXGE_HW_ERR_INVALID_BLOCK_SIZE
;
2969 fifo_mp_callback
.item_func_alloc
= __vxge_hw_fifo_mempool_item_alloc
;
2972 __vxge_hw_mempool_create(vpath
->hldev
,
2973 fifo
->config
->memblock_size
,
2976 (fifo
->config
->fifo_blocks
* fifo
->txdl_per_memblock
),
2977 (fifo
->config
->fifo_blocks
* fifo
->txdl_per_memblock
),
2981 if (fifo
->mempool
== NULL
) {
2982 __vxge_hw_fifo_delete(vp
);
2983 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
2987 status
= __vxge_hw_channel_initialize(&fifo
->channel
);
2988 if (status
!= VXGE_HW_OK
) {
2989 __vxge_hw_fifo_delete(vp
);
2993 vxge_assert(fifo
->channel
.reserve_ptr
);
2999 * __vxge_hw_fifo_abort - Returns the TxD
3000 * This function terminates the TxDs of fifo
3002 static enum vxge_hw_status
__vxge_hw_fifo_abort(struct __vxge_hw_fifo
*fifo
)
3007 vxge_hw_channel_dtr_try_complete(&fifo
->channel
, &txdlh
);
3012 vxge_hw_channel_dtr_complete(&fifo
->channel
);
3014 if (fifo
->txdl_term
) {
3015 fifo
->txdl_term(txdlh
,
3016 VXGE_HW_TXDL_STATE_POSTED
,
3017 fifo
->channel
.userdata
);
3020 vxge_hw_channel_dtr_free(&fifo
->channel
, txdlh
);
3027 * __vxge_hw_fifo_reset - Resets the fifo
3028 * This function resets the fifo during vpath reset operation
3030 static enum vxge_hw_status
__vxge_hw_fifo_reset(struct __vxge_hw_fifo
*fifo
)
3032 enum vxge_hw_status status
= VXGE_HW_OK
;
3034 __vxge_hw_fifo_abort(fifo
);
3035 status
= __vxge_hw_channel_reset(&fifo
->channel
);
3041 * __vxge_hw_fifo_delete - Removes the FIFO
3042 * This function freeup the memory pool and removes the FIFO
3044 static enum vxge_hw_status
3045 __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle
*vp
)
3047 struct __vxge_hw_fifo
*fifo
= vp
->vpath
->fifoh
;
3049 __vxge_hw_fifo_abort(fifo
);
3052 __vxge_hw_mempool_destroy(fifo
->mempool
);
3054 vp
->vpath
->fifoh
= NULL
;
3056 __vxge_hw_channel_free(&fifo
->channel
);
3062 * __vxge_hw_vpath_pci_read - Read the content of given address
3063 * in pci config space.
3064 * Read from the vpath pci config space.
3066 static enum vxge_hw_status
3067 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath
*vpath
,
3068 u32 phy_func_0
, u32 offset
, u32
*val
)
3071 enum vxge_hw_status status
= VXGE_HW_OK
;
3072 struct vxge_hw_vpath_reg __iomem
*vp_reg
= vpath
->vp_reg
;
3074 val64
= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset
);
3077 val64
|= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0
;
3079 writeq(val64
, &vp_reg
->pci_config_access_cfg1
);
3081 writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ
,
3082 &vp_reg
->pci_config_access_cfg2
);
3085 status
= __vxge_hw_device_register_poll(
3086 &vp_reg
->pci_config_access_cfg2
,
3087 VXGE_HW_INTR_MASK_ALL
, VXGE_HW_DEF_DEVICE_POLL_MILLIS
);
3089 if (status
!= VXGE_HW_OK
)
3092 val64
= readq(&vp_reg
->pci_config_access_status
);
3094 if (val64
& VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR
) {
3095 status
= VXGE_HW_FAIL
;
3098 *val
= (u32
)vxge_bVALn(val64
, 32, 32);
3104 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
3105 * @hldev: HW device.
3106 * @on_off: TRUE if flickering to be on, FALSE to be off
3108 * Flicker the link LED.
3111 vxge_hw_device_flick_link_led(struct __vxge_hw_device
*hldev
, u64 on_off
)
3113 struct __vxge_hw_virtualpath
*vpath
;
3114 u64 data0
, data1
= 0, steer_ctrl
= 0;
3115 enum vxge_hw_status status
;
3117 if (hldev
== NULL
) {
3118 status
= VXGE_HW_ERR_INVALID_DEVICE
;
3122 vpath
= &hldev
->virtual_paths
[hldev
->first_vp_id
];
3125 status
= vxge_hw_vpath_fw_api(vpath
,
3126 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL
,
3127 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO
,
3128 0, &data0
, &data1
, &steer_ctrl
);
3134 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3137 __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle
*vp
,
3138 u32 action
, u32 rts_table
, u32 offset
,
3139 u64
*data0
, u64
*data1
)
3141 enum vxge_hw_status status
;
3145 status
= VXGE_HW_ERR_INVALID_HANDLE
;
3150 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT
) ||
3152 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT
) ||
3154 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK
) ||
3156 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY
)) {
3157 steer_ctrl
= VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL
;
3160 status
= vxge_hw_vpath_fw_api(vp
->vpath
, action
, rts_table
, offset
,
3161 data0
, data1
, &steer_ctrl
);
3162 if (status
!= VXGE_HW_OK
)
3165 if ((rts_table
!= VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
) ||
3167 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT
))
3174 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3177 __vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle
*vp
, u32 action
,
3178 u32 rts_table
, u32 offset
, u64 steer_data0
,
3181 u64 data0
, data1
= 0, steer_ctrl
= 0;
3182 enum vxge_hw_status status
;
3185 status
= VXGE_HW_ERR_INVALID_HANDLE
;
3189 data0
= steer_data0
;
3191 if ((rts_table
== VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
) ||
3193 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT
))
3194 data1
= steer_data1
;
3196 status
= vxge_hw_vpath_fw_api(vp
->vpath
, action
, rts_table
, offset
,
3197 &data0
, &data1
, &steer_ctrl
);
3203 * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3205 enum vxge_hw_status
vxge_hw_vpath_rts_rth_set(
3206 struct __vxge_hw_vpath_handle
*vp
,
3207 enum vxge_hw_rth_algoritms algorithm
,
3208 struct vxge_hw_rth_hash_types
*hash_type
,
3212 enum vxge_hw_status status
= VXGE_HW_OK
;
3215 status
= VXGE_HW_ERR_INVALID_HANDLE
;
3219 status
= __vxge_hw_vpath_rts_table_get(vp
,
3220 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY
,
3221 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG
,
3223 if (status
!= VXGE_HW_OK
)
3226 data0
&= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3227 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3229 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN
|
3230 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size
) |
3231 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm
);
3233 if (hash_type
->hash_type_tcpipv4_en
)
3234 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN
;
3236 if (hash_type
->hash_type_ipv4_en
)
3237 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN
;
3239 if (hash_type
->hash_type_tcpipv6_en
)
3240 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN
;
3242 if (hash_type
->hash_type_ipv6_en
)
3243 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN
;
3245 if (hash_type
->hash_type_tcpipv6ex_en
)
3247 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN
;
3249 if (hash_type
->hash_type_ipv6ex_en
)
3250 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN
;
3252 if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0
))
3253 data0
&= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE
;
3255 data0
|= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE
;
3257 status
= __vxge_hw_vpath_rts_table_set(vp
,
3258 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY
,
3259 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG
,
3266 vxge_hw_rts_rth_data0_data1_get(u32 j
, u64
*data0
, u64
*data1
,
3267 u16 flag
, u8
*itable
)
3271 *data0
= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j
)|
3272 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN
|
3273 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3277 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j
)|
3278 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN
|
3279 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3282 *data1
= VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j
)|
3283 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN
|
3284 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3288 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j
)|
3289 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN
|
3290 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3297 * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3299 enum vxge_hw_status
vxge_hw_vpath_rts_rth_itable_set(
3300 struct __vxge_hw_vpath_handle
**vpath_handles
,
3306 u32 i
, j
, action
, rts_table
;
3310 enum vxge_hw_status status
= VXGE_HW_OK
;
3311 struct __vxge_hw_vpath_handle
*vp
= vpath_handles
[0];
3314 status
= VXGE_HW_ERR_INVALID_HANDLE
;
3318 max_entries
= (((u32
)1) << itable_size
);
3320 if (vp
->vpath
->hldev
->config
.rth_it_type
3321 == VXGE_HW_RTH_IT_TYPE_SOLO_IT
) {
3322 action
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY
;
3324 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT
;
3326 for (j
= 0; j
< max_entries
; j
++) {
3331 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3334 status
= __vxge_hw_vpath_rts_table_set(vpath_handles
[0],
3335 action
, rts_table
, j
, data0
, data1
);
3337 if (status
!= VXGE_HW_OK
)
3341 for (j
= 0; j
< max_entries
; j
++) {
3346 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN
|
3347 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3350 status
= __vxge_hw_vpath_rts_table_set(
3351 vpath_handles
[mtable
[itable
[j
]]], action
,
3352 rts_table
, j
, data0
, data1
);
3354 if (status
!= VXGE_HW_OK
)
3358 action
= VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY
;
3360 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT
;
3361 for (i
= 0; i
< vpath_count
; i
++) {
3363 for (j
= 0; j
< max_entries
;) {
3368 while (j
< max_entries
) {
3369 if (mtable
[itable
[j
]] != i
) {
3373 vxge_hw_rts_rth_data0_data1_get(j
,
3374 &data0
, &data1
, 1, itable
);
3379 while (j
< max_entries
) {
3380 if (mtable
[itable
[j
]] != i
) {
3384 vxge_hw_rts_rth_data0_data1_get(j
,
3385 &data0
, &data1
, 2, itable
);
3390 while (j
< max_entries
) {
3391 if (mtable
[itable
[j
]] != i
) {
3395 vxge_hw_rts_rth_data0_data1_get(j
,
3396 &data0
, &data1
, 3, itable
);
3401 while (j
< max_entries
) {
3402 if (mtable
[itable
[j
]] != i
) {
3406 vxge_hw_rts_rth_data0_data1_get(j
,
3407 &data0
, &data1
, 4, itable
);
3413 status
= __vxge_hw_vpath_rts_table_set(
3418 if (status
!= VXGE_HW_OK
)
3429 * vxge_hw_vpath_check_leak - Check for memory leak
3430 * @ringh: Handle to the ring object used for receive
3432 * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3433 * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3434 * Returns: VXGE_HW_FAIL, if leak has occurred.
3438 vxge_hw_vpath_check_leak(struct __vxge_hw_ring
*ring
)
3440 enum vxge_hw_status status
= VXGE_HW_OK
;
3441 u64 rxd_new_count
, rxd_spat
;
3446 rxd_new_count
= readl(&ring
->vp_reg
->prc_rxd_doorbell
);
3447 rxd_spat
= readq(&ring
->vp_reg
->prc_cfg6
);
3448 rxd_spat
= VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat
);
3450 if (rxd_new_count
>= rxd_spat
)
3451 status
= VXGE_HW_FAIL
;
3457 * __vxge_hw_vpath_mgmt_read
3458 * This routine reads the vpath_mgmt registers
3460 static enum vxge_hw_status
3461 __vxge_hw_vpath_mgmt_read(
3462 struct __vxge_hw_device
*hldev
,
3463 struct __vxge_hw_virtualpath
*vpath
)
3465 u32 i
, mtu
= 0, max_pyld
= 0;
3467 enum vxge_hw_status status
= VXGE_HW_OK
;
3469 for (i
= 0; i
< VXGE_HW_MAC_MAX_MAC_PORT_ID
; i
++) {
3471 val64
= readq(&vpath
->vpmgmt_reg
->
3472 rxmac_cfg0_port_vpmgmt_clone
[i
]);
3475 VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3481 vpath
->max_mtu
= mtu
+ VXGE_HW_MAC_HEADER_MAX_SIZE
;
3483 val64
= readq(&vpath
->vpmgmt_reg
->xmac_vsport_choices_vp
);
3485 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3486 if (val64
& vxge_mBIT(i
))
3487 vpath
->vsport_number
= i
;
3490 val64
= readq(&vpath
->vpmgmt_reg
->xgmac_gen_status_vpmgmt_clone
);
3492 if (val64
& VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK
)
3493 VXGE_HW_DEVICE_LINK_STATE_SET(vpath
->hldev
, VXGE_HW_LINK_UP
);
3495 VXGE_HW_DEVICE_LINK_STATE_SET(vpath
->hldev
, VXGE_HW_LINK_DOWN
);
3501 * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
3502 * This routine checks the vpath_rst_in_prog register to see if
3503 * adapter completed the reset process for the vpath
3505 static enum vxge_hw_status
3506 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath
*vpath
)
3508 enum vxge_hw_status status
;
3510 status
= __vxge_hw_device_register_poll(
3511 &vpath
->hldev
->common_reg
->vpath_rst_in_prog
,
3512 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
3513 1 << (16 - vpath
->vp_id
)),
3514 vpath
->hldev
->config
.device_poll_millis
);
3520 * __vxge_hw_vpath_reset
3521 * This routine resets the vpath on the device
3523 static enum vxge_hw_status
3524 __vxge_hw_vpath_reset(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3527 enum vxge_hw_status status
= VXGE_HW_OK
;
3529 val64
= VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id
));
3531 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(val64
, 0, 32),
3532 &hldev
->common_reg
->cmn_rsthdlr_cfg0
);
3538 * __vxge_hw_vpath_sw_reset
3539 * This routine resets the vpath structures
3541 static enum vxge_hw_status
3542 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3544 enum vxge_hw_status status
= VXGE_HW_OK
;
3545 struct __vxge_hw_virtualpath
*vpath
;
3547 vpath
= (struct __vxge_hw_virtualpath
*)&hldev
->virtual_paths
[vp_id
];
3550 status
= __vxge_hw_ring_reset(vpath
->ringh
);
3551 if (status
!= VXGE_HW_OK
)
3556 status
= __vxge_hw_fifo_reset(vpath
->fifoh
);
3562 * __vxge_hw_vpath_prc_configure
3563 * This routine configures the prc registers of virtual path using the config
3567 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3570 struct __vxge_hw_virtualpath
*vpath
;
3571 struct vxge_hw_vp_config
*vp_config
;
3572 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3574 vpath
= &hldev
->virtual_paths
[vp_id
];
3575 vp_reg
= vpath
->vp_reg
;
3576 vp_config
= vpath
->vp_config
;
3578 if (vp_config
->ring
.enable
== VXGE_HW_RING_DISABLE
)
3581 val64
= readq(&vp_reg
->prc_cfg1
);
3582 val64
|= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE
;
3583 writeq(val64
, &vp_reg
->prc_cfg1
);
3585 val64
= readq(&vpath
->vp_reg
->prc_cfg6
);
3586 val64
|= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN
;
3587 writeq(val64
, &vpath
->vp_reg
->prc_cfg6
);
3589 val64
= readq(&vp_reg
->prc_cfg7
);
3591 if (vpath
->vp_config
->ring
.scatter_mode
!=
3592 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT
) {
3594 val64
&= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
3596 switch (vpath
->vp_config
->ring
.scatter_mode
) {
3597 case VXGE_HW_RING_SCATTER_MODE_A
:
3598 val64
|= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3599 VXGE_HW_PRC_CFG7_SCATTER_MODE_A
);
3601 case VXGE_HW_RING_SCATTER_MODE_B
:
3602 val64
|= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3603 VXGE_HW_PRC_CFG7_SCATTER_MODE_B
);
3605 case VXGE_HW_RING_SCATTER_MODE_C
:
3606 val64
|= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3607 VXGE_HW_PRC_CFG7_SCATTER_MODE_C
);
3612 writeq(val64
, &vp_reg
->prc_cfg7
);
3614 writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
3615 __vxge_hw_ring_first_block_address_get(
3616 vpath
->ringh
) >> 3), &vp_reg
->prc_cfg5
);
3618 val64
= readq(&vp_reg
->prc_cfg4
);
3619 val64
|= VXGE_HW_PRC_CFG4_IN_SVC
;
3620 val64
&= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
3622 val64
|= VXGE_HW_PRC_CFG4_RING_MODE(
3623 VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER
);
3625 if (hldev
->config
.rth_en
== VXGE_HW_RTH_DISABLE
)
3626 val64
|= VXGE_HW_PRC_CFG4_RTH_DISABLE
;
3628 val64
&= ~VXGE_HW_PRC_CFG4_RTH_DISABLE
;
3630 writeq(val64
, &vp_reg
->prc_cfg4
);
3634 * __vxge_hw_vpath_kdfc_configure
3635 * This routine configures the kdfc registers of virtual path using the
3638 static enum vxge_hw_status
3639 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3643 enum vxge_hw_status status
= VXGE_HW_OK
;
3644 struct __vxge_hw_virtualpath
*vpath
;
3645 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3647 vpath
= &hldev
->virtual_paths
[vp_id
];
3648 vp_reg
= vpath
->vp_reg
;
3649 status
= __vxge_hw_kdfc_swapper_set(hldev
->legacy_reg
, vp_reg
);
3651 if (status
!= VXGE_HW_OK
)
3654 val64
= readq(&vp_reg
->kdfc_drbl_triplet_total
);
3656 vpath
->max_kdfc_db
=
3657 (u32
)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
3660 if (vpath
->vp_config
->fifo
.enable
== VXGE_HW_FIFO_ENABLE
) {
3662 vpath
->max_nofl_db
= vpath
->max_kdfc_db
;
3664 if (vpath
->max_nofl_db
<
3665 ((vpath
->vp_config
->fifo
.memblock_size
/
3666 (vpath
->vp_config
->fifo
.max_frags
*
3667 sizeof(struct vxge_hw_fifo_txd
))) *
3668 vpath
->vp_config
->fifo
.fifo_blocks
)) {
3670 return VXGE_HW_BADCFG_FIFO_BLOCKS
;
3672 val64
= VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
3673 (vpath
->max_nofl_db
*2)-1);
3676 writeq(val64
, &vp_reg
->kdfc_fifo_trpl_partition
);
3678 writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE
,
3679 &vp_reg
->kdfc_fifo_trpl_ctrl
);
3681 val64
= readq(&vp_reg
->kdfc_trpl_fifo_0_ctrl
);
3683 val64
&= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
3684 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
3686 val64
|= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
3687 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY
) |
3688 #ifndef __BIG_ENDIAN
3689 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN
|
3691 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
3693 writeq(val64
, &vp_reg
->kdfc_trpl_fifo_0_ctrl
);
3694 writeq((u64
)0, &vp_reg
->kdfc_trpl_fifo_0_wb_address
);
3696 vpath_stride
= readq(&hldev
->toc_reg
->toc_kdfc_vpath_stride
);
3699 (struct __vxge_hw_non_offload_db_wrapper __iomem
*)
3700 (hldev
->kdfc
+ (vp_id
*
3701 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
3708 * __vxge_hw_vpath_mac_configure
3709 * This routine configures the mac of virtual path using the config passed
3711 static enum vxge_hw_status
3712 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3715 enum vxge_hw_status status
= VXGE_HW_OK
;
3716 struct __vxge_hw_virtualpath
*vpath
;
3717 struct vxge_hw_vp_config
*vp_config
;
3718 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3720 vpath
= &hldev
->virtual_paths
[vp_id
];
3721 vp_reg
= vpath
->vp_reg
;
3722 vp_config
= vpath
->vp_config
;
3724 writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
3725 vpath
->vsport_number
), &vp_reg
->xmac_vsport_choice
);
3727 if (vp_config
->ring
.enable
== VXGE_HW_RING_ENABLE
) {
3729 val64
= readq(&vp_reg
->xmac_rpa_vcfg
);
3731 if (vp_config
->rpa_strip_vlan_tag
!=
3732 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT
) {
3733 if (vp_config
->rpa_strip_vlan_tag
)
3734 val64
|= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG
;
3736 val64
&= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG
;
3739 writeq(val64
, &vp_reg
->xmac_rpa_vcfg
);
3740 val64
= readq(&vp_reg
->rxmac_vcfg0
);
3742 if (vp_config
->mtu
!=
3743 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU
) {
3744 val64
&= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
3745 if ((vp_config
->mtu
+
3746 VXGE_HW_MAC_HEADER_MAX_SIZE
) < vpath
->max_mtu
)
3747 val64
|= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3749 VXGE_HW_MAC_HEADER_MAX_SIZE
);
3751 val64
|= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3755 writeq(val64
, &vp_reg
->rxmac_vcfg0
);
3757 val64
= readq(&vp_reg
->rxmac_vcfg1
);
3759 val64
&= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
3760 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE
);
3762 if (hldev
->config
.rth_it_type
==
3763 VXGE_HW_RTH_IT_TYPE_MULTI_IT
) {
3764 val64
|= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
3766 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE
;
3769 writeq(val64
, &vp_reg
->rxmac_vcfg1
);
3775 * __vxge_hw_vpath_tim_configure
3776 * This routine configures the tim registers of virtual path using the config
3779 static enum vxge_hw_status
3780 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device
*hldev
, u32 vp_id
)
3783 enum vxge_hw_status status
= VXGE_HW_OK
;
3784 struct __vxge_hw_virtualpath
*vpath
;
3785 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
3786 struct vxge_hw_vp_config
*config
;
3788 vpath
= &hldev
->virtual_paths
[vp_id
];
3789 vp_reg
= vpath
->vp_reg
;
3790 config
= vpath
->vp_config
;
3792 writeq((u64
)0, &vp_reg
->tim_dest_addr
);
3793 writeq((u64
)0, &vp_reg
->tim_vpath_map
);
3794 writeq((u64
)0, &vp_reg
->tim_bitmap
);
3795 writeq((u64
)0, &vp_reg
->tim_remap
);
3797 if (config
->ring
.enable
== VXGE_HW_RING_ENABLE
)
3798 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
3799 (vp_id
* VXGE_HW_MAX_INTR_PER_VP
) +
3800 VXGE_HW_VPATH_INTR_RX
), &vp_reg
->tim_ring_assn
);
3802 val64
= readq(&vp_reg
->tim_pci_cfg
);
3803 val64
|= VXGE_HW_TIM_PCI_CFG_ADD_PAD
;
3804 writeq(val64
, &vp_reg
->tim_pci_cfg
);
3806 if (config
->fifo
.enable
== VXGE_HW_FIFO_ENABLE
) {
3808 val64
= readq(&vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3810 if (config
->tti
.btimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3811 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3813 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3814 config
->tti
.btimer_val
);
3817 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN
;
3819 if (config
->tti
.timer_ac_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3820 if (config
->tti
.timer_ac_en
)
3821 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC
;
3823 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC
;
3826 if (config
->tti
.timer_ci_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3827 if (config
->tti
.timer_ci_en
)
3828 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
3830 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
3833 if (config
->tti
.urange_a
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3834 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3835 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3836 config
->tti
.urange_a
);
3839 if (config
->tti
.urange_b
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3840 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3841 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3842 config
->tti
.urange_b
);
3845 if (config
->tti
.urange_c
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3846 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3847 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3848 config
->tti
.urange_c
);
3851 writeq(val64
, &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3852 val64
= readq(&vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3854 if (config
->tti
.uec_a
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3855 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3856 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3860 if (config
->tti
.uec_b
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3861 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3862 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3866 if (config
->tti
.uec_c
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3867 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3868 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3872 if (config
->tti
.uec_d
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3873 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3874 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3878 writeq(val64
, &vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3879 val64
= readq(&vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3881 if (config
->tti
.timer_ri_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3882 if (config
->tti
.timer_ri_en
)
3883 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI
;
3885 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI
;
3888 if (config
->tti
.rtimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3889 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3891 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3892 config
->tti
.rtimer_val
);
3895 if (config
->tti
.util_sel
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3896 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3897 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3898 config
->tti
.util_sel
);
3901 if (config
->tti
.ltimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3902 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3904 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3905 config
->tti
.ltimer_val
);
3908 writeq(val64
, &vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_TX
]);
3911 if (config
->ring
.enable
== VXGE_HW_RING_ENABLE
) {
3913 val64
= readq(&vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3915 if (config
->rti
.btimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3916 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3918 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3919 config
->rti
.btimer_val
);
3922 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN
;
3924 if (config
->rti
.timer_ac_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3925 if (config
->rti
.timer_ac_en
)
3926 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC
;
3928 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC
;
3931 if (config
->rti
.timer_ci_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3932 if (config
->rti
.timer_ci_en
)
3933 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
3935 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
3938 if (config
->rti
.urange_a
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3939 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3940 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3941 config
->rti
.urange_a
);
3944 if (config
->rti
.urange_b
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3945 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3946 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3947 config
->rti
.urange_b
);
3950 if (config
->rti
.urange_c
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3951 val64
&= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3952 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3953 config
->rti
.urange_c
);
3956 writeq(val64
, &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3957 val64
= readq(&vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3959 if (config
->rti
.uec_a
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3960 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3961 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3965 if (config
->rti
.uec_b
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3966 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3967 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3971 if (config
->rti
.uec_c
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3972 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3973 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3977 if (config
->rti
.uec_d
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3978 val64
&= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3979 val64
|= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3983 writeq(val64
, &vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3984 val64
= readq(&vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_RX
]);
3986 if (config
->rti
.timer_ri_en
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3987 if (config
->rti
.timer_ri_en
)
3988 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI
;
3990 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI
;
3993 if (config
->rti
.rtimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
3994 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3996 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3997 config
->rti
.rtimer_val
);
4000 if (config
->rti
.util_sel
!= VXGE_HW_USE_FLASH_DEFAULT
) {
4001 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4002 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
4003 config
->rti
.util_sel
);
4006 if (config
->rti
.ltimer_val
!= VXGE_HW_USE_FLASH_DEFAULT
) {
4007 val64
&= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4009 val64
|= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4010 config
->rti
.ltimer_val
);
4013 writeq(val64
, &vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_RX
]);
4017 writeq(val64
, &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_EINTA
]);
4018 writeq(val64
, &vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_EINTA
]);
4019 writeq(val64
, &vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_EINTA
]);
4020 writeq(val64
, &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_BMAP
]);
4021 writeq(val64
, &vp_reg
->tim_cfg2_int_num
[VXGE_HW_VPATH_INTR_BMAP
]);
4022 writeq(val64
, &vp_reg
->tim_cfg3_int_num
[VXGE_HW_VPATH_INTR_BMAP
]);
4028 vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device
*hldev
, u32 vp_id
)
4030 struct __vxge_hw_virtualpath
*vpath
;
4031 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
4032 struct vxge_hw_vp_config
*config
;
4035 vpath
= &hldev
->virtual_paths
[vp_id
];
4036 vp_reg
= vpath
->vp_reg
;
4037 config
= vpath
->vp_config
;
4039 if (config
->fifo
.enable
== VXGE_HW_FIFO_ENABLE
) {
4040 val64
= readq(&vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_TX
]);
4042 if (config
->tti
.timer_ci_en
!= VXGE_HW_TIM_TIMER_CI_ENABLE
) {
4043 config
->tti
.timer_ci_en
= VXGE_HW_TIM_TIMER_CI_ENABLE
;
4044 val64
|= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI
;
4046 &vp_reg
->tim_cfg1_int_num
[VXGE_HW_VPATH_INTR_TX
]);
4051 * __vxge_hw_vpath_initialize
4052 * This routine is the final phase of init which initializes the
4053 * registers of the vpath using the configuration passed.
4055 static enum vxge_hw_status
4056 __vxge_hw_vpath_initialize(struct __vxge_hw_device
*hldev
, u32 vp_id
)
4060 enum vxge_hw_status status
= VXGE_HW_OK
;
4061 struct __vxge_hw_virtualpath
*vpath
;
4062 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
4064 vpath
= &hldev
->virtual_paths
[vp_id
];
4066 if (!(hldev
->vpath_assignments
& vxge_mBIT(vp_id
))) {
4067 status
= VXGE_HW_ERR_VPATH_NOT_AVAILABLE
;
4070 vp_reg
= vpath
->vp_reg
;
4072 status
= __vxge_hw_vpath_swapper_set(vpath
->vp_reg
);
4074 if (status
!= VXGE_HW_OK
)
4077 status
= __vxge_hw_vpath_mac_configure(hldev
, vp_id
);
4079 if (status
!= VXGE_HW_OK
)
4082 status
= __vxge_hw_vpath_kdfc_configure(hldev
, vp_id
);
4084 if (status
!= VXGE_HW_OK
)
4087 status
= __vxge_hw_vpath_tim_configure(hldev
, vp_id
);
4089 if (status
!= VXGE_HW_OK
)
4092 val64
= readq(&vp_reg
->rtdma_rd_optimization_ctrl
);
4094 /* Get MRRS value from device control */
4095 status
= __vxge_hw_vpath_pci_read(vpath
, 1, 0x78, &val32
);
4097 if (status
== VXGE_HW_OK
) {
4098 val32
= (val32
& VXGE_HW_PCI_EXP_DEVCTL_READRQ
) >> 12;
4100 ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
4102 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32
);
4104 val64
|= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE
;
4107 val64
&= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
4109 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
4110 VXGE_HW_MAX_PAYLOAD_SIZE_512
);
4112 val64
|= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN
;
4113 writeq(val64
, &vp_reg
->rtdma_rd_optimization_ctrl
);
4120 * __vxge_hw_vp_initialize - Initialize Virtual Path structure
4121 * This routine is the initial phase of init which resets the vpath and
4122 * initializes the software support structures.
4124 static enum vxge_hw_status
4125 __vxge_hw_vp_initialize(struct __vxge_hw_device
*hldev
, u32 vp_id
,
4126 struct vxge_hw_vp_config
*config
)
4128 struct __vxge_hw_virtualpath
*vpath
;
4129 enum vxge_hw_status status
= VXGE_HW_OK
;
4131 if (!(hldev
->vpath_assignments
& vxge_mBIT(vp_id
))) {
4132 status
= VXGE_HW_ERR_VPATH_NOT_AVAILABLE
;
4136 vpath
= &hldev
->virtual_paths
[vp_id
];
4138 spin_lock_init(&hldev
->virtual_paths
[vp_id
].lock
);
4139 vpath
->vp_id
= vp_id
;
4140 vpath
->vp_open
= VXGE_HW_VP_OPEN
;
4141 vpath
->hldev
= hldev
;
4142 vpath
->vp_config
= config
;
4143 vpath
->vp_reg
= hldev
->vpath_reg
[vp_id
];
4144 vpath
->vpmgmt_reg
= hldev
->vpmgmt_reg
[vp_id
];
4146 __vxge_hw_vpath_reset(hldev
, vp_id
);
4148 status
= __vxge_hw_vpath_reset_check(vpath
);
4149 if (status
!= VXGE_HW_OK
) {
4150 memset(vpath
, 0, sizeof(struct __vxge_hw_virtualpath
));
4154 status
= __vxge_hw_vpath_mgmt_read(hldev
, vpath
);
4155 if (status
!= VXGE_HW_OK
) {
4156 memset(vpath
, 0, sizeof(struct __vxge_hw_virtualpath
));
4160 INIT_LIST_HEAD(&vpath
->vpath_handles
);
4162 vpath
->sw_stats
= &hldev
->stats
.sw_dev_info_stats
.vpath_info
[vp_id
];
4164 VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev
->tim_int_mask0
,
4165 hldev
->tim_int_mask1
, vp_id
);
4167 status
= __vxge_hw_vpath_initialize(hldev
, vp_id
);
4168 if (status
!= VXGE_HW_OK
)
4169 __vxge_hw_vp_terminate(hldev
, vp_id
);
4175 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4176 * This routine closes all channels it opened and freeup memory
4179 __vxge_hw_vp_terminate(struct __vxge_hw_device
*hldev
, u32 vp_id
)
4181 struct __vxge_hw_virtualpath
*vpath
;
4183 vpath
= &hldev
->virtual_paths
[vp_id
];
4185 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
)
4188 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath
->hldev
->tim_int_mask0
,
4189 vpath
->hldev
->tim_int_mask1
, vpath
->vp_id
);
4190 hldev
->stats
.hw_dev_info_stats
.vpath_info
[vpath
->vp_id
] = NULL
;
4192 memset(vpath
, 0, sizeof(struct __vxge_hw_virtualpath
));
4198 * vxge_hw_vpath_mtu_set - Set MTU.
4199 * Set new MTU value. Example, to use jumbo frames:
4200 * vxge_hw_vpath_mtu_set(my_device, 9600);
4203 vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle
*vp
, u32 new_mtu
)
4206 enum vxge_hw_status status
= VXGE_HW_OK
;
4207 struct __vxge_hw_virtualpath
*vpath
;
4210 status
= VXGE_HW_ERR_INVALID_HANDLE
;
4215 new_mtu
+= VXGE_HW_MAC_HEADER_MAX_SIZE
;
4217 if ((new_mtu
< VXGE_HW_MIN_MTU
) || (new_mtu
> vpath
->max_mtu
))
4218 status
= VXGE_HW_ERR_INVALID_MTU_SIZE
;
4220 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
4222 val64
&= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4223 val64
|= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu
);
4225 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
4227 vpath
->vp_config
->mtu
= new_mtu
- VXGE_HW_MAC_HEADER_MAX_SIZE
;
4234 * vxge_hw_vpath_open - Open a virtual path on a given adapter
4235 * This function is used to open access to virtual path of an
4236 * adapter for offload, GRO operations. This function returns
4240 vxge_hw_vpath_open(struct __vxge_hw_device
*hldev
,
4241 struct vxge_hw_vpath_attr
*attr
,
4242 struct __vxge_hw_vpath_handle
**vpath_handle
)
4244 struct __vxge_hw_virtualpath
*vpath
;
4245 struct __vxge_hw_vpath_handle
*vp
;
4246 enum vxge_hw_status status
;
4248 vpath
= &hldev
->virtual_paths
[attr
->vp_id
];
4250 if (vpath
->vp_open
== VXGE_HW_VP_OPEN
) {
4251 status
= VXGE_HW_ERR_INVALID_STATE
;
4252 goto vpath_open_exit1
;
4255 status
= __vxge_hw_vp_initialize(hldev
, attr
->vp_id
,
4256 &hldev
->config
.vp_config
[attr
->vp_id
]);
4258 if (status
!= VXGE_HW_OK
)
4259 goto vpath_open_exit1
;
4261 vp
= (struct __vxge_hw_vpath_handle
*)
4262 vzalloc(sizeof(struct __vxge_hw_vpath_handle
));
4264 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4265 goto vpath_open_exit2
;
4270 if (vpath
->vp_config
->fifo
.enable
== VXGE_HW_FIFO_ENABLE
) {
4271 status
= __vxge_hw_fifo_create(vp
, &attr
->fifo_attr
);
4272 if (status
!= VXGE_HW_OK
)
4273 goto vpath_open_exit6
;
4276 if (vpath
->vp_config
->ring
.enable
== VXGE_HW_RING_ENABLE
) {
4277 status
= __vxge_hw_ring_create(vp
, &attr
->ring_attr
);
4278 if (status
!= VXGE_HW_OK
)
4279 goto vpath_open_exit7
;
4281 __vxge_hw_vpath_prc_configure(hldev
, attr
->vp_id
);
4284 vpath
->fifoh
->tx_intr_num
=
4285 (attr
->vp_id
* VXGE_HW_MAX_INTR_PER_VP
) +
4286 VXGE_HW_VPATH_INTR_TX
;
4288 vpath
->stats_block
= __vxge_hw_blockpool_block_allocate(hldev
,
4289 VXGE_HW_BLOCK_SIZE
);
4291 if (vpath
->stats_block
== NULL
) {
4292 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4293 goto vpath_open_exit8
;
4296 vpath
->hw_stats
= (struct vxge_hw_vpath_stats_hw_info
*)vpath
->
4297 stats_block
->memblock
;
4298 memset(vpath
->hw_stats
, 0,
4299 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4301 hldev
->stats
.hw_dev_info_stats
.vpath_info
[attr
->vp_id
] =
4304 vpath
->hw_stats_sav
=
4305 &hldev
->stats
.hw_dev_info_stats
.vpath_info_sav
[attr
->vp_id
];
4306 memset(vpath
->hw_stats_sav
, 0,
4307 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4309 writeq(vpath
->stats_block
->dma_addr
, &vpath
->vp_reg
->stats_cfg
);
4311 status
= vxge_hw_vpath_stats_enable(vp
);
4312 if (status
!= VXGE_HW_OK
)
4313 goto vpath_open_exit8
;
4315 list_add(&vp
->item
, &vpath
->vpath_handles
);
4317 hldev
->vpaths_deployed
|= vxge_mBIT(vpath
->vp_id
);
4321 attr
->fifo_attr
.userdata
= vpath
->fifoh
;
4322 attr
->ring_attr
.userdata
= vpath
->ringh
;
4327 if (vpath
->ringh
!= NULL
)
4328 __vxge_hw_ring_delete(vp
);
4330 if (vpath
->fifoh
!= NULL
)
4331 __vxge_hw_fifo_delete(vp
);
4335 __vxge_hw_vp_terminate(hldev
, attr
->vp_id
);
4342 * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4344 * @vp: Handle got from previous vpath open
4346 * This function is used to close access to virtual path opened
4350 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle
*vp
)
4352 struct __vxge_hw_virtualpath
*vpath
= vp
->vpath
;
4353 struct __vxge_hw_ring
*ring
= vpath
->ringh
;
4354 struct vxgedev
*vdev
= netdev_priv(vpath
->hldev
->ndev
);
4355 u64 new_count
, val64
, val164
;
4358 new_count
= readq(&vpath
->vp_reg
->rxdmem_size
);
4359 new_count
&= 0x1fff;
4361 new_count
= ring
->config
->ring_blocks
* VXGE_HW_BLOCK_SIZE
/ 8;
4363 val164
= VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count
);
4365 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164
),
4366 &vpath
->vp_reg
->prc_rxd_doorbell
);
4367 readl(&vpath
->vp_reg
->prc_rxd_doorbell
);
4370 val64
= readq(&vpath
->vp_reg
->prc_cfg6
);
4371 val64
= VXGE_HW_PRC_CFG6_RXD_SPAT(val64
);
4375 * Each RxD is of 4 qwords
4377 new_count
-= (val64
+ 1);
4378 val64
= min(val164
, new_count
) / 4;
4380 ring
->rxds_limit
= min(ring
->rxds_limit
, val64
);
4381 if (ring
->rxds_limit
< 4)
4382 ring
->rxds_limit
= 4;
4386 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4387 * This function is used to close access to virtual path opened
4390 enum vxge_hw_status
vxge_hw_vpath_close(struct __vxge_hw_vpath_handle
*vp
)
4392 struct __vxge_hw_virtualpath
*vpath
= NULL
;
4393 struct __vxge_hw_device
*devh
= NULL
;
4394 u32 vp_id
= vp
->vpath
->vp_id
;
4395 u32 is_empty
= TRUE
;
4396 enum vxge_hw_status status
= VXGE_HW_OK
;
4399 devh
= vpath
->hldev
;
4401 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4402 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4403 goto vpath_close_exit
;
4406 list_del(&vp
->item
);
4408 if (!list_empty(&vpath
->vpath_handles
)) {
4409 list_add(&vp
->item
, &vpath
->vpath_handles
);
4414 status
= VXGE_HW_FAIL
;
4415 goto vpath_close_exit
;
4418 devh
->vpaths_deployed
&= ~vxge_mBIT(vp_id
);
4420 if (vpath
->ringh
!= NULL
)
4421 __vxge_hw_ring_delete(vp
);
4423 if (vpath
->fifoh
!= NULL
)
4424 __vxge_hw_fifo_delete(vp
);
4426 if (vpath
->stats_block
!= NULL
)
4427 __vxge_hw_blockpool_block_free(devh
, vpath
->stats_block
);
4431 __vxge_hw_vp_terminate(devh
, vp_id
);
4433 spin_lock(&vpath
->lock
);
4434 vpath
->vp_open
= VXGE_HW_VP_NOT_OPEN
;
4435 spin_unlock(&vpath
->lock
);
4442 * vxge_hw_vpath_reset - Resets vpath
4443 * This function is used to request a reset of vpath
4445 enum vxge_hw_status
vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle
*vp
)
4447 enum vxge_hw_status status
;
4449 struct __vxge_hw_virtualpath
*vpath
= vp
->vpath
;
4451 vp_id
= vpath
->vp_id
;
4453 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4454 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4458 status
= __vxge_hw_vpath_reset(vpath
->hldev
, vp_id
);
4459 if (status
== VXGE_HW_OK
)
4460 vpath
->sw_stats
->soft_reset_cnt
++;
4466 * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
4467 * This function poll's for the vpath reset completion and re initializes
4471 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle
*vp
)
4473 struct __vxge_hw_virtualpath
*vpath
= NULL
;
4474 enum vxge_hw_status status
;
4475 struct __vxge_hw_device
*hldev
;
4478 vp_id
= vp
->vpath
->vp_id
;
4480 hldev
= vpath
->hldev
;
4482 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4483 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4487 status
= __vxge_hw_vpath_reset_check(vpath
);
4488 if (status
!= VXGE_HW_OK
)
4491 status
= __vxge_hw_vpath_sw_reset(hldev
, vp_id
);
4492 if (status
!= VXGE_HW_OK
)
4495 status
= __vxge_hw_vpath_initialize(hldev
, vp_id
);
4496 if (status
!= VXGE_HW_OK
)
4499 if (vpath
->ringh
!= NULL
)
4500 __vxge_hw_vpath_prc_configure(hldev
, vp_id
);
4502 memset(vpath
->hw_stats
, 0,
4503 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4505 memset(vpath
->hw_stats_sav
, 0,
4506 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4508 writeq(vpath
->stats_block
->dma_addr
,
4509 &vpath
->vp_reg
->stats_cfg
);
4511 status
= vxge_hw_vpath_stats_enable(vp
);
4518 * vxge_hw_vpath_enable - Enable vpath.
4519 * This routine clears the vpath reset thereby enabling a vpath
4520 * to start forwarding frames and generating interrupts.
4523 vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle
*vp
)
4525 struct __vxge_hw_device
*hldev
;
4528 hldev
= vp
->vpath
->hldev
;
4530 val64
= VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
4531 1 << (16 - vp
->vpath
->vp_id
));
4533 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(val64
, 0, 32),
4534 &hldev
->common_reg
->cmn_rsthdlr_cfg1
);
4538 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4539 * Enable the DMA vpath statistics. The function is to be called to re-enable
4540 * the adapter to update stats into the host memory
4542 static enum vxge_hw_status
4543 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle
*vp
)
4545 enum vxge_hw_status status
= VXGE_HW_OK
;
4546 struct __vxge_hw_virtualpath
*vpath
;
4550 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4551 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4555 memcpy(vpath
->hw_stats_sav
, vpath
->hw_stats
,
4556 sizeof(struct vxge_hw_vpath_stats_hw_info
));
4558 status
= __vxge_hw_vpath_stats_get(vpath
, vpath
->hw_stats
);
4564 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4565 * and offset and perform an operation
4567 static enum vxge_hw_status
4568 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath
*vpath
,
4569 u32 operation
, u32 offset
, u64
*stat
)
4572 enum vxge_hw_status status
= VXGE_HW_OK
;
4573 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
4575 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4576 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4577 goto vpath_stats_access_exit
;
4580 vp_reg
= vpath
->vp_reg
;
4582 val64
= VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation
) |
4583 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE
|
4584 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset
);
4586 status
= __vxge_hw_pio_mem_write64(val64
,
4587 &vp_reg
->xmac_stats_access_cmd
,
4588 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE
,
4589 vpath
->hldev
->config
.device_poll_millis
);
4591 if ((status
== VXGE_HW_OK
) && (operation
== VXGE_HW_STATS_OP_READ
))
4592 *stat
= readq(&vp_reg
->xmac_stats_access_data
);
4596 vpath_stats_access_exit
:
4601 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4603 static enum vxge_hw_status
4604 __vxge_hw_vpath_xmac_tx_stats_get(
4605 struct __vxge_hw_virtualpath
*vpath
,
4606 struct vxge_hw_xmac_vpath_tx_stats
*vpath_tx_stats
)
4610 u32 offset
= VXGE_HW_STATS_VPATH_TX_OFFSET
;
4611 enum vxge_hw_status status
= VXGE_HW_OK
;
4613 val64
= (u64
*) vpath_tx_stats
;
4615 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4616 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4620 for (i
= 0; i
< sizeof(struct vxge_hw_xmac_vpath_tx_stats
) / 8; i
++) {
4621 status
= __vxge_hw_vpath_stats_access(vpath
,
4622 VXGE_HW_STATS_OP_READ
,
4624 if (status
!= VXGE_HW_OK
)
4634 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4636 static enum vxge_hw_status
4637 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath
*vpath
,
4638 struct vxge_hw_xmac_vpath_rx_stats
*vpath_rx_stats
)
4641 enum vxge_hw_status status
= VXGE_HW_OK
;
4643 u32 offset
= VXGE_HW_STATS_VPATH_RX_OFFSET
;
4644 val64
= (u64
*) vpath_rx_stats
;
4646 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4647 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4650 for (i
= 0; i
< sizeof(struct vxge_hw_xmac_vpath_rx_stats
) / 8; i
++) {
4651 status
= __vxge_hw_vpath_stats_access(vpath
,
4652 VXGE_HW_STATS_OP_READ
,
4653 offset
>> 3, val64
);
4654 if (status
!= VXGE_HW_OK
)
4665 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4667 static enum vxge_hw_status
4668 __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath
*vpath
,
4669 struct vxge_hw_vpath_stats_hw_info
*hw_stats
)
4672 enum vxge_hw_status status
= VXGE_HW_OK
;
4673 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
4675 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
4676 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
4679 vp_reg
= vpath
->vp_reg
;
4681 val64
= readq(&vp_reg
->vpath_debug_stats0
);
4682 hw_stats
->ini_num_mwr_sent
=
4683 (u32
)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64
);
4685 val64
= readq(&vp_reg
->vpath_debug_stats1
);
4686 hw_stats
->ini_num_mrd_sent
=
4687 (u32
)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64
);
4689 val64
= readq(&vp_reg
->vpath_debug_stats2
);
4690 hw_stats
->ini_num_cpl_rcvd
=
4691 (u32
)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64
);
4693 val64
= readq(&vp_reg
->vpath_debug_stats3
);
4694 hw_stats
->ini_num_mwr_byte_sent
=
4695 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64
);
4697 val64
= readq(&vp_reg
->vpath_debug_stats4
);
4698 hw_stats
->ini_num_cpl_byte_rcvd
=
4699 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64
);
4701 val64
= readq(&vp_reg
->vpath_debug_stats5
);
4702 hw_stats
->wrcrdtarb_xoff
=
4703 (u32
)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64
);
4705 val64
= readq(&vp_reg
->vpath_debug_stats6
);
4706 hw_stats
->rdcrdtarb_xoff
=
4707 (u32
)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64
);
4709 val64
= readq(&vp_reg
->vpath_genstats_count01
);
4710 hw_stats
->vpath_genstats_count0
=
4711 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4714 val64
= readq(&vp_reg
->vpath_genstats_count01
);
4715 hw_stats
->vpath_genstats_count1
=
4716 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4719 val64
= readq(&vp_reg
->vpath_genstats_count23
);
4720 hw_stats
->vpath_genstats_count2
=
4721 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4724 val64
= readq(&vp_reg
->vpath_genstats_count01
);
4725 hw_stats
->vpath_genstats_count3
=
4726 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4729 val64
= readq(&vp_reg
->vpath_genstats_count4
);
4730 hw_stats
->vpath_genstats_count4
=
4731 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4734 val64
= readq(&vp_reg
->vpath_genstats_count5
);
4735 hw_stats
->vpath_genstats_count5
=
4736 (u32
)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4739 status
= __vxge_hw_vpath_xmac_tx_stats_get(vpath
, &hw_stats
->tx_stats
);
4740 if (status
!= VXGE_HW_OK
)
4743 status
= __vxge_hw_vpath_xmac_rx_stats_get(vpath
, &hw_stats
->rx_stats
);
4744 if (status
!= VXGE_HW_OK
)
4747 VXGE_HW_VPATH_STATS_PIO_READ(
4748 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET
);
4750 hw_stats
->prog_event_vnum0
=
4751 (u32
)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64
);
4753 hw_stats
->prog_event_vnum1
=
4754 (u32
)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64
);
4756 VXGE_HW_VPATH_STATS_PIO_READ(
4757 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET
);
4759 hw_stats
->prog_event_vnum2
=
4760 (u32
)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64
);
4762 hw_stats
->prog_event_vnum3
=
4763 (u32
)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64
);
4765 val64
= readq(&vp_reg
->rx_multi_cast_stats
);
4766 hw_stats
->rx_multi_cast_frame_discard
=
4767 (u16
)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64
);
4769 val64
= readq(&vp_reg
->rx_frm_transferred
);
4770 hw_stats
->rx_frm_transferred
=
4771 (u32
)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64
);
4773 val64
= readq(&vp_reg
->rxd_returned
);
4774 hw_stats
->rxd_returned
=
4775 (u16
)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64
);
4777 val64
= readq(&vp_reg
->dbg_stats_rx_mpa
);
4778 hw_stats
->rx_mpa_len_fail_frms
=
4779 (u16
)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64
);
4780 hw_stats
->rx_mpa_mrk_fail_frms
=
4781 (u16
)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64
);
4782 hw_stats
->rx_mpa_crc_fail_frms
=
4783 (u16
)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64
);
4785 val64
= readq(&vp_reg
->dbg_stats_rx_fau
);
4786 hw_stats
->rx_permitted_frms
=
4787 (u16
)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64
);
4788 hw_stats
->rx_vp_reset_discarded_frms
=
4789 (u16
)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64
);
4790 hw_stats
->rx_wol_frms
=
4791 (u16
)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64
);
4793 val64
= readq(&vp_reg
->tx_vp_reset_discarded_frms
);
4794 hw_stats
->tx_vp_reset_discarded_frms
=
4795 (u16
)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4802 static void vxge_os_dma_malloc_async(struct pci_dev
*pdev
, void *devh
,
4809 flags
= GFP_ATOMIC
| GFP_DMA
;
4811 flags
= GFP_KERNEL
| GFP_DMA
;
4813 vaddr
= kmalloc((size
), flags
);
4815 vxge_hw_blockpool_block_add(devh
, vaddr
, size
, pdev
, pdev
);
4818 static void vxge_os_dma_free(struct pci_dev
*pdev
, const void *vaddr
,
4819 struct pci_dev
**p_dma_acch
)
4821 unsigned long misaligned
= *(unsigned long *)p_dma_acch
;
4822 u8
*tmp
= (u8
*)vaddr
;
4828 * __vxge_hw_blockpool_create - Create block pool
4831 static enum vxge_hw_status
4832 __vxge_hw_blockpool_create(struct __vxge_hw_device
*hldev
,
4833 struct __vxge_hw_blockpool
*blockpool
,
4838 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
4840 dma_addr_t dma_addr
;
4841 struct pci_dev
*dma_handle
;
4842 struct pci_dev
*acc_handle
;
4843 enum vxge_hw_status status
= VXGE_HW_OK
;
4845 if (blockpool
== NULL
) {
4846 status
= VXGE_HW_FAIL
;
4847 goto blockpool_create_exit
;
4850 blockpool
->hldev
= hldev
;
4851 blockpool
->block_size
= VXGE_HW_BLOCK_SIZE
;
4852 blockpool
->pool_size
= 0;
4853 blockpool
->pool_max
= pool_max
;
4854 blockpool
->req_out
= 0;
4856 INIT_LIST_HEAD(&blockpool
->free_block_list
);
4857 INIT_LIST_HEAD(&blockpool
->free_entry_list
);
4859 for (i
= 0; i
< pool_size
+ pool_max
; i
++) {
4860 entry
= kzalloc(sizeof(struct __vxge_hw_blockpool_entry
),
4862 if (entry
== NULL
) {
4863 __vxge_hw_blockpool_destroy(blockpool
);
4864 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4865 goto blockpool_create_exit
;
4867 list_add(&entry
->item
, &blockpool
->free_entry_list
);
4870 for (i
= 0; i
< pool_size
; i
++) {
4872 memblock
= vxge_os_dma_malloc(
4878 if (memblock
== NULL
) {
4879 __vxge_hw_blockpool_destroy(blockpool
);
4880 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4881 goto blockpool_create_exit
;
4884 dma_addr
= pci_map_single(hldev
->pdev
, memblock
,
4885 VXGE_HW_BLOCK_SIZE
, PCI_DMA_BIDIRECTIONAL
);
4887 if (unlikely(pci_dma_mapping_error(hldev
->pdev
,
4890 vxge_os_dma_free(hldev
->pdev
, memblock
, &acc_handle
);
4891 __vxge_hw_blockpool_destroy(blockpool
);
4892 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4893 goto blockpool_create_exit
;
4896 if (!list_empty(&blockpool
->free_entry_list
))
4897 entry
= (struct __vxge_hw_blockpool_entry
*)
4898 list_first_entry(&blockpool
->free_entry_list
,
4899 struct __vxge_hw_blockpool_entry
,
4904 kzalloc(sizeof(struct __vxge_hw_blockpool_entry
),
4906 if (entry
!= NULL
) {
4907 list_del(&entry
->item
);
4908 entry
->length
= VXGE_HW_BLOCK_SIZE
;
4909 entry
->memblock
= memblock
;
4910 entry
->dma_addr
= dma_addr
;
4911 entry
->acc_handle
= acc_handle
;
4912 entry
->dma_handle
= dma_handle
;
4913 list_add(&entry
->item
,
4914 &blockpool
->free_block_list
);
4915 blockpool
->pool_size
++;
4917 __vxge_hw_blockpool_destroy(blockpool
);
4918 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
4919 goto blockpool_create_exit
;
4923 blockpool_create_exit
:
4928 * __vxge_hw_blockpool_destroy - Deallocates the block pool
4931 static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool
*blockpool
)
4934 struct __vxge_hw_device
*hldev
;
4935 struct list_head
*p
, *n
;
4938 if (blockpool
== NULL
) {
4943 hldev
= blockpool
->hldev
;
4945 list_for_each_safe(p
, n
, &blockpool
->free_block_list
) {
4947 pci_unmap_single(hldev
->pdev
,
4948 ((struct __vxge_hw_blockpool_entry
*)p
)->dma_addr
,
4949 ((struct __vxge_hw_blockpool_entry
*)p
)->length
,
4950 PCI_DMA_BIDIRECTIONAL
);
4952 vxge_os_dma_free(hldev
->pdev
,
4953 ((struct __vxge_hw_blockpool_entry
*)p
)->memblock
,
4954 &((struct __vxge_hw_blockpool_entry
*) p
)->acc_handle
);
4957 &((struct __vxge_hw_blockpool_entry
*)p
)->item
);
4959 blockpool
->pool_size
--;
4962 list_for_each_safe(p
, n
, &blockpool
->free_entry_list
) {
4964 &((struct __vxge_hw_blockpool_entry
*)p
)->item
);
4973 * __vxge_hw_blockpool_blocks_add - Request additional blocks
4976 void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool
*blockpool
)
4980 if ((blockpool
->pool_size
+ blockpool
->req_out
) <
4981 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE
) {
4982 nreq
= VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE
;
4983 blockpool
->req_out
+= nreq
;
4986 for (i
= 0; i
< nreq
; i
++)
4987 vxge_os_dma_malloc_async(
4988 ((struct __vxge_hw_device
*)blockpool
->hldev
)->pdev
,
4989 blockpool
->hldev
, VXGE_HW_BLOCK_SIZE
);
4993 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
4996 void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool
*blockpool
)
4998 struct list_head
*p
, *n
;
5000 list_for_each_safe(p
, n
, &blockpool
->free_block_list
) {
5002 if (blockpool
->pool_size
< blockpool
->pool_max
)
5006 ((struct __vxge_hw_device
*)blockpool
->hldev
)->pdev
,
5007 ((struct __vxge_hw_blockpool_entry
*)p
)->dma_addr
,
5008 ((struct __vxge_hw_blockpool_entry
*)p
)->length
,
5009 PCI_DMA_BIDIRECTIONAL
);
5012 ((struct __vxge_hw_device
*)blockpool
->hldev
)->pdev
,
5013 ((struct __vxge_hw_blockpool_entry
*)p
)->memblock
,
5014 &((struct __vxge_hw_blockpool_entry
*)p
)->acc_handle
);
5016 list_del(&((struct __vxge_hw_blockpool_entry
*)p
)->item
);
5018 list_add(p
, &blockpool
->free_entry_list
);
5020 blockpool
->pool_size
--;
5026 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
5027 * Adds a block to block pool
5029 static void vxge_hw_blockpool_block_add(struct __vxge_hw_device
*devh
,
5032 struct pci_dev
*dma_h
,
5033 struct pci_dev
*acc_handle
)
5035 struct __vxge_hw_blockpool
*blockpool
;
5036 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
5037 dma_addr_t dma_addr
;
5038 enum vxge_hw_status status
= VXGE_HW_OK
;
5041 blockpool
= &devh
->block_pool
;
5043 if (block_addr
== NULL
) {
5044 blockpool
->req_out
--;
5045 status
= VXGE_HW_FAIL
;
5049 dma_addr
= pci_map_single(devh
->pdev
, block_addr
, length
,
5050 PCI_DMA_BIDIRECTIONAL
);
5052 if (unlikely(pci_dma_mapping_error(devh
->pdev
, dma_addr
))) {
5054 vxge_os_dma_free(devh
->pdev
, block_addr
, &acc_handle
);
5055 blockpool
->req_out
--;
5056 status
= VXGE_HW_FAIL
;
5061 if (!list_empty(&blockpool
->free_entry_list
))
5062 entry
= (struct __vxge_hw_blockpool_entry
*)
5063 list_first_entry(&blockpool
->free_entry_list
,
5064 struct __vxge_hw_blockpool_entry
,
5068 entry
= (struct __vxge_hw_blockpool_entry
*)
5069 vmalloc(sizeof(struct __vxge_hw_blockpool_entry
));
5071 list_del(&entry
->item
);
5073 if (entry
!= NULL
) {
5074 entry
->length
= length
;
5075 entry
->memblock
= block_addr
;
5076 entry
->dma_addr
= dma_addr
;
5077 entry
->acc_handle
= acc_handle
;
5078 entry
->dma_handle
= dma_h
;
5079 list_add(&entry
->item
, &blockpool
->free_block_list
);
5080 blockpool
->pool_size
++;
5081 status
= VXGE_HW_OK
;
5083 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
5085 blockpool
->req_out
--;
5087 req_out
= blockpool
->req_out
;
5093 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
5094 * Allocates a block of memory of given size, either from block pool
5095 * or by calling vxge_os_dma_malloc()
5098 __vxge_hw_blockpool_malloc(struct __vxge_hw_device
*devh
, u32 size
,
5099 struct vxge_hw_mempool_dma
*dma_object
)
5101 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
5102 struct __vxge_hw_blockpool
*blockpool
;
5103 void *memblock
= NULL
;
5104 enum vxge_hw_status status
= VXGE_HW_OK
;
5106 blockpool
= &devh
->block_pool
;
5108 if (size
!= blockpool
->block_size
) {
5110 memblock
= vxge_os_dma_malloc(devh
->pdev
, size
,
5111 &dma_object
->handle
,
5112 &dma_object
->acc_handle
);
5114 if (memblock
== NULL
) {
5115 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
5119 dma_object
->addr
= pci_map_single(devh
->pdev
, memblock
, size
,
5120 PCI_DMA_BIDIRECTIONAL
);
5122 if (unlikely(pci_dma_mapping_error(devh
->pdev
,
5123 dma_object
->addr
))) {
5124 vxge_os_dma_free(devh
->pdev
, memblock
,
5125 &dma_object
->acc_handle
);
5126 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
5132 if (!list_empty(&blockpool
->free_block_list
))
5133 entry
= (struct __vxge_hw_blockpool_entry
*)
5134 list_first_entry(&blockpool
->free_block_list
,
5135 struct __vxge_hw_blockpool_entry
,
5138 if (entry
!= NULL
) {
5139 list_del(&entry
->item
);
5140 dma_object
->addr
= entry
->dma_addr
;
5141 dma_object
->handle
= entry
->dma_handle
;
5142 dma_object
->acc_handle
= entry
->acc_handle
;
5143 memblock
= entry
->memblock
;
5145 list_add(&entry
->item
,
5146 &blockpool
->free_entry_list
);
5147 blockpool
->pool_size
--;
5150 if (memblock
!= NULL
)
5151 __vxge_hw_blockpool_blocks_add(blockpool
);
5158 * __vxge_hw_blockpool_free - Frees the memory allcoated with
5159 __vxge_hw_blockpool_malloc
5162 __vxge_hw_blockpool_free(struct __vxge_hw_device
*devh
,
5163 void *memblock
, u32 size
,
5164 struct vxge_hw_mempool_dma
*dma_object
)
5166 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
5167 struct __vxge_hw_blockpool
*blockpool
;
5168 enum vxge_hw_status status
= VXGE_HW_OK
;
5170 blockpool
= &devh
->block_pool
;
5172 if (size
!= blockpool
->block_size
) {
5173 pci_unmap_single(devh
->pdev
, dma_object
->addr
, size
,
5174 PCI_DMA_BIDIRECTIONAL
);
5175 vxge_os_dma_free(devh
->pdev
, memblock
, &dma_object
->acc_handle
);
5178 if (!list_empty(&blockpool
->free_entry_list
))
5179 entry
= (struct __vxge_hw_blockpool_entry
*)
5180 list_first_entry(&blockpool
->free_entry_list
,
5181 struct __vxge_hw_blockpool_entry
,
5185 entry
= (struct __vxge_hw_blockpool_entry
*)
5187 struct __vxge_hw_blockpool_entry
));
5189 list_del(&entry
->item
);
5191 if (entry
!= NULL
) {
5192 entry
->length
= size
;
5193 entry
->memblock
= memblock
;
5194 entry
->dma_addr
= dma_object
->addr
;
5195 entry
->acc_handle
= dma_object
->acc_handle
;
5196 entry
->dma_handle
= dma_object
->handle
;
5197 list_add(&entry
->item
,
5198 &blockpool
->free_block_list
);
5199 blockpool
->pool_size
++;
5200 status
= VXGE_HW_OK
;
5202 status
= VXGE_HW_ERR_OUT_OF_MEMORY
;
5204 if (status
== VXGE_HW_OK
)
5205 __vxge_hw_blockpool_blocks_remove(blockpool
);
5210 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5211 * This function allocates a block from block pool or from the system
5213 static struct __vxge_hw_blockpool_entry
*
5214 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device
*devh
, u32 size
)
5216 struct __vxge_hw_blockpool_entry
*entry
= NULL
;
5217 struct __vxge_hw_blockpool
*blockpool
;
5219 blockpool
= &devh
->block_pool
;
5221 if (size
== blockpool
->block_size
) {
5223 if (!list_empty(&blockpool
->free_block_list
))
5224 entry
= (struct __vxge_hw_blockpool_entry
*)
5225 list_first_entry(&blockpool
->free_block_list
,
5226 struct __vxge_hw_blockpool_entry
,
5229 if (entry
!= NULL
) {
5230 list_del(&entry
->item
);
5231 blockpool
->pool_size
--;
5236 __vxge_hw_blockpool_blocks_add(blockpool
);
5242 * __vxge_hw_blockpool_block_free - Frees a block from block pool
5244 * @entry: Entry of block to be freed
5246 * This function frees a block from block pool
5249 __vxge_hw_blockpool_block_free(struct __vxge_hw_device
*devh
,
5250 struct __vxge_hw_blockpool_entry
*entry
)
5252 struct __vxge_hw_blockpool
*blockpool
;
5254 blockpool
= &devh
->block_pool
;
5256 if (entry
->length
== blockpool
->block_size
) {
5257 list_add(&entry
->item
, &blockpool
->free_block_list
);
5258 blockpool
->pool_size
++;
5261 __vxge_hw_blockpool_blocks_remove(blockpool
);