1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14 #include <linux/etherdevice.h>
16 #include "vxge-traffic.h"
17 #include "vxge-config.h"
18 #include "vxge-main.h"
21 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
22 * @vp: Virtual Path handle.
24 * Enable vpath interrupts. The function is to be executed the last in
25 * vpath initialization sequence.
27 * See also: vxge_hw_vpath_intr_disable()
29 enum vxge_hw_status
vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle
*vp
)
33 struct __vxge_hw_virtualpath
*vpath
;
34 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
35 enum vxge_hw_status status
= VXGE_HW_OK
;
37 status
= VXGE_HW_ERR_INVALID_HANDLE
;
43 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
44 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
48 vp_reg
= vpath
->vp_reg
;
50 writeq(VXGE_HW_INTR_MASK_ALL
, &vp_reg
->kdfcctl_errors_reg
);
52 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
53 &vp_reg
->general_errors_reg
);
55 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
56 &vp_reg
->pci_config_errors_reg
);
58 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
59 &vp_reg
->mrpcim_to_vpath_alarm_reg
);
61 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
62 &vp_reg
->srpcim_to_vpath_alarm_reg
);
64 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
65 &vp_reg
->vpath_ppif_int_status
);
67 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
68 &vp_reg
->srpcim_msg_to_vpath_reg
);
70 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
71 &vp_reg
->vpath_pcipif_int_status
);
73 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
74 &vp_reg
->prc_alarm_reg
);
76 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
77 &vp_reg
->wrdma_alarm_status
);
79 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
80 &vp_reg
->asic_ntwk_vp_err_reg
);
82 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
83 &vp_reg
->xgmac_vp_int_status
);
85 val64
= readq(&vp_reg
->vpath_general_int_status
);
87 /* Mask unwanted interrupts */
89 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
90 &vp_reg
->vpath_pcipif_int_mask
);
92 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
93 &vp_reg
->srpcim_msg_to_vpath_mask
);
95 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
96 &vp_reg
->srpcim_to_vpath_alarm_mask
);
98 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
99 &vp_reg
->mrpcim_to_vpath_alarm_mask
);
101 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
102 &vp_reg
->pci_config_errors_mask
);
104 /* Unmask the individual interrupts */
106 writeq((u32
)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW
|
107 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW
|
108 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ
|
109 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR
), 0, 32),
110 &vp_reg
->general_errors_mask
);
112 __vxge_hw_pio_mem_write32_upper(
113 (u32
)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR
|
114 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR
|
115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON
|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON
|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR
|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR
), 0, 32),
119 &vp_reg
->kdfcctl_errors_mask
);
121 __vxge_hw_pio_mem_write32_upper(0, &vp_reg
->vpath_ppif_int_mask
);
123 __vxge_hw_pio_mem_write32_upper(
124 (u32
)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP
, 0, 32),
125 &vp_reg
->prc_alarm_mask
);
127 __vxge_hw_pio_mem_write32_upper(0, &vp_reg
->wrdma_alarm_mask
);
128 __vxge_hw_pio_mem_write32_upper(0, &vp_reg
->xgmac_vp_int_mask
);
130 if (vpath
->hldev
->first_vp_id
!= vpath
->vp_id
)
131 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
132 &vp_reg
->asic_ntwk_vp_err_mask
);
134 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn((
135 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT
|
136 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK
), 0, 32),
137 &vp_reg
->asic_ntwk_vp_err_mask
);
139 __vxge_hw_pio_mem_write32_upper(0,
140 &vp_reg
->vpath_general_int_mask
);
147 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
148 * @vp: Virtual Path handle.
150 * Disable vpath interrupts. The function is to be executed the last in
151 * vpath initialization sequence.
153 * See also: vxge_hw_vpath_intr_enable()
155 enum vxge_hw_status
vxge_hw_vpath_intr_disable(
156 struct __vxge_hw_vpath_handle
*vp
)
160 struct __vxge_hw_virtualpath
*vpath
;
161 enum vxge_hw_status status
= VXGE_HW_OK
;
162 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
164 status
= VXGE_HW_ERR_INVALID_HANDLE
;
170 if (vpath
->vp_open
== VXGE_HW_VP_NOT_OPEN
) {
171 status
= VXGE_HW_ERR_VPATH_NOT_OPEN
;
174 vp_reg
= vpath
->vp_reg
;
176 __vxge_hw_pio_mem_write32_upper(
177 (u32
)VXGE_HW_INTR_MASK_ALL
,
178 &vp_reg
->vpath_general_int_mask
);
180 val64
= VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath
->vp_id
));
182 writeq(VXGE_HW_INTR_MASK_ALL
, &vp_reg
->kdfcctl_errors_mask
);
184 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
185 &vp_reg
->general_errors_mask
);
187 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
188 &vp_reg
->pci_config_errors_mask
);
190 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
191 &vp_reg
->mrpcim_to_vpath_alarm_mask
);
193 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
194 &vp_reg
->srpcim_to_vpath_alarm_mask
);
196 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
197 &vp_reg
->vpath_ppif_int_mask
);
199 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
200 &vp_reg
->srpcim_msg_to_vpath_mask
);
202 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
203 &vp_reg
->vpath_pcipif_int_mask
);
205 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
206 &vp_reg
->wrdma_alarm_mask
);
208 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
209 &vp_reg
->prc_alarm_mask
);
211 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
212 &vp_reg
->xgmac_vp_int_mask
);
214 __vxge_hw_pio_mem_write32_upper((u32
)VXGE_HW_INTR_MASK_ALL
,
215 &vp_reg
->asic_ntwk_vp_err_mask
);
222 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
223 * @channeh: Channel for rx or tx handle
226 * The function masks the msix interrupt for the given msix_id
230 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel
*channel
, int msix_id
)
233 __vxge_hw_pio_mem_write32_upper(
234 (u32
)vxge_bVALn(vxge_mBIT(msix_id
>> 2), 0, 32),
235 &channel
->common_reg
->set_msix_mask_vect
[msix_id
%4]);
239 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
240 * @channeh: Channel for rx or tx handle
243 * The function unmasks the msix interrupt for the given msix_id
248 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel
*channel
, int msix_id
)
251 __vxge_hw_pio_mem_write32_upper(
252 (u32
)vxge_bVALn(vxge_mBIT(msix_id
>> 2), 0, 32),
253 &channel
->common_reg
->clear_msix_mask_vect
[msix_id
%4]);
257 * vxge_hw_device_set_intr_type - Updates the configuration
258 * with new interrupt type.
259 * @hldev: HW device handle.
260 * @intr_mode: New interrupt type
262 u32
vxge_hw_device_set_intr_type(struct __vxge_hw_device
*hldev
, u32 intr_mode
)
265 if ((intr_mode
!= VXGE_HW_INTR_MODE_IRQLINE
) &&
266 (intr_mode
!= VXGE_HW_INTR_MODE_MSIX
) &&
267 (intr_mode
!= VXGE_HW_INTR_MODE_MSIX_ONE_SHOT
) &&
268 (intr_mode
!= VXGE_HW_INTR_MODE_DEF
))
269 intr_mode
= VXGE_HW_INTR_MODE_IRQLINE
;
271 hldev
->config
.intr_mode
= intr_mode
;
276 * vxge_hw_device_intr_enable - Enable interrupts.
277 * @hldev: HW device handle.
278 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
279 * the type(s) of interrupts to enable.
281 * Enable Titan interrupts. The function is to be executed the last in
282 * Titan initialization sequence.
284 * See also: vxge_hw_device_intr_disable()
286 void vxge_hw_device_intr_enable(struct __vxge_hw_device
*hldev
)
292 vxge_hw_device_mask_all(hldev
);
294 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
296 if (!(hldev
->vpaths_deployed
& vxge_mBIT(i
)))
299 vxge_hw_vpath_intr_enable(
300 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev
->virtual_paths
[i
]));
303 if (hldev
->config
.intr_mode
== VXGE_HW_INTR_MODE_IRQLINE
) {
304 val64
= hldev
->tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] |
305 hldev
->tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
];
308 writeq(val64
, &hldev
->common_reg
->tim_int_status0
);
310 writeq(~val64
, &hldev
->common_reg
->tim_int_mask0
);
313 val32
= hldev
->tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] |
314 hldev
->tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
];
317 __vxge_hw_pio_mem_write32_upper(val32
,
318 &hldev
->common_reg
->tim_int_status1
);
320 __vxge_hw_pio_mem_write32_upper(~val32
,
321 &hldev
->common_reg
->tim_int_mask1
);
325 val64
= readq(&hldev
->common_reg
->titan_general_int_status
);
327 vxge_hw_device_unmask_all(hldev
);
331 * vxge_hw_device_intr_disable - Disable Titan interrupts.
332 * @hldev: HW device handle.
333 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
334 * the type(s) of interrupts to disable.
336 * Disable Titan interrupts.
338 * See also: vxge_hw_device_intr_enable()
340 void vxge_hw_device_intr_disable(struct __vxge_hw_device
*hldev
)
344 vxge_hw_device_mask_all(hldev
);
346 /* mask all the tim interrupts */
347 writeq(VXGE_HW_INTR_MASK_ALL
, &hldev
->common_reg
->tim_int_mask0
);
348 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32
,
349 &hldev
->common_reg
->tim_int_mask1
);
351 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
353 if (!(hldev
->vpaths_deployed
& vxge_mBIT(i
)))
356 vxge_hw_vpath_intr_disable(
357 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev
->virtual_paths
[i
]));
362 * vxge_hw_device_mask_all - Mask all device interrupts.
363 * @hldev: HW device handle.
365 * Mask all device interrupts.
367 * See also: vxge_hw_device_unmask_all()
369 void vxge_hw_device_mask_all(struct __vxge_hw_device
*hldev
)
373 val64
= VXGE_HW_TITAN_MASK_ALL_INT_ALARM
|
374 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC
;
376 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(val64
, 0, 32),
377 &hldev
->common_reg
->titan_mask_all_int
);
381 * vxge_hw_device_unmask_all - Unmask all device interrupts.
382 * @hldev: HW device handle.
384 * Unmask all device interrupts.
386 * See also: vxge_hw_device_mask_all()
388 void vxge_hw_device_unmask_all(struct __vxge_hw_device
*hldev
)
392 if (hldev
->config
.intr_mode
== VXGE_HW_INTR_MODE_IRQLINE
)
393 val64
= VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC
;
395 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(val64
, 0, 32),
396 &hldev
->common_reg
->titan_mask_all_int
);
400 * vxge_hw_device_flush_io - Flush io writes.
401 * @hldev: HW device handle.
403 * The function performs a read operation to flush io writes.
407 void vxge_hw_device_flush_io(struct __vxge_hw_device
*hldev
)
411 val32
= readl(&hldev
->common_reg
->titan_general_int_status
);
415 * __vxge_hw_device_handle_error - Handle error
418 * @type: Error type. Please see enum vxge_hw_event{}
422 static enum vxge_hw_status
423 __vxge_hw_device_handle_error(struct __vxge_hw_device
*hldev
, u32 vp_id
,
424 enum vxge_hw_event type
)
427 case VXGE_HW_EVENT_UNKNOWN
:
429 case VXGE_HW_EVENT_RESET_START
:
430 case VXGE_HW_EVENT_RESET_COMPLETE
:
431 case VXGE_HW_EVENT_LINK_DOWN
:
432 case VXGE_HW_EVENT_LINK_UP
:
434 case VXGE_HW_EVENT_ALARM_CLEARED
:
436 case VXGE_HW_EVENT_ECCERR
:
437 case VXGE_HW_EVENT_MRPCIM_ECCERR
:
439 case VXGE_HW_EVENT_FIFO_ERR
:
440 case VXGE_HW_EVENT_VPATH_ERR
:
441 case VXGE_HW_EVENT_CRITICAL_ERR
:
442 case VXGE_HW_EVENT_SERR
:
444 case VXGE_HW_EVENT_SRPCIM_SERR
:
445 case VXGE_HW_EVENT_MRPCIM_SERR
:
447 case VXGE_HW_EVENT_SLOT_FREEZE
:
455 if (hldev
->uld_callbacks
.crit_err
)
456 hldev
->uld_callbacks
.crit_err(
457 (struct __vxge_hw_device
*)hldev
,
465 * __vxge_hw_device_handle_link_down_ind
466 * @hldev: HW device handle.
468 * Link down indication handler. The function is invoked by HW when
469 * Titan indicates that the link is down.
471 static enum vxge_hw_status
472 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device
*hldev
)
475 * If the previous link state is not down, return.
477 if (hldev
->link_state
== VXGE_HW_LINK_DOWN
)
480 hldev
->link_state
= VXGE_HW_LINK_DOWN
;
483 if (hldev
->uld_callbacks
.link_down
)
484 hldev
->uld_callbacks
.link_down(hldev
);
490 * __vxge_hw_device_handle_link_up_ind
491 * @hldev: HW device handle.
493 * Link up indication handler. The function is invoked by HW when
494 * Titan indicates that the link is up for programmable amount of time.
496 static enum vxge_hw_status
497 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device
*hldev
)
500 * If the previous link state is not down, return.
502 if (hldev
->link_state
== VXGE_HW_LINK_UP
)
505 hldev
->link_state
= VXGE_HW_LINK_UP
;
508 if (hldev
->uld_callbacks
.link_up
)
509 hldev
->uld_callbacks
.link_up(hldev
);
515 * __vxge_hw_vpath_alarm_process - Process Alarms.
516 * @vpath: Virtual Path.
517 * @skip_alarms: Do not clear the alarms
519 * Process vpath alarms.
522 static enum vxge_hw_status
523 __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath
*vpath
,
529 struct __vxge_hw_device
*hldev
= NULL
;
530 enum vxge_hw_event alarm_event
= VXGE_HW_EVENT_UNKNOWN
;
532 struct vxge_hw_vpath_stats_sw_info
*sw_stats
;
533 struct vxge_hw_vpath_reg __iomem
*vp_reg
;
536 alarm_event
= VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN
,
541 hldev
= vpath
->hldev
;
542 vp_reg
= vpath
->vp_reg
;
543 alarm_status
= readq(&vp_reg
->vpath_general_int_status
);
545 if (alarm_status
== VXGE_HW_ALL_FOXES
) {
546 alarm_event
= VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE
,
551 sw_stats
= vpath
->sw_stats
;
553 if (alarm_status
& ~(
554 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT
|
555 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT
|
556 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT
|
557 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT
)) {
558 sw_stats
->error_stats
.unknown_alarms
++;
560 alarm_event
= VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN
,
565 if (alarm_status
& VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT
) {
567 val64
= readq(&vp_reg
->xgmac_vp_int_status
);
570 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT
) {
572 val64
= readq(&vp_reg
->asic_ntwk_vp_err_reg
);
575 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT
) &&
577 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK
))) ||
579 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR
) &&
581 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR
)
583 sw_stats
->error_stats
.network_sustained_fault
++;
586 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT
,
587 &vp_reg
->asic_ntwk_vp_err_mask
);
589 __vxge_hw_device_handle_link_down_ind(hldev
);
590 alarm_event
= VXGE_HW_SET_LEVEL(
591 VXGE_HW_EVENT_LINK_DOWN
, alarm_event
);
595 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK
) &&
597 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT
))) ||
599 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR
) &&
601 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR
)
604 sw_stats
->error_stats
.network_sustained_ok
++;
607 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK
,
608 &vp_reg
->asic_ntwk_vp_err_mask
);
610 __vxge_hw_device_handle_link_up_ind(hldev
);
611 alarm_event
= VXGE_HW_SET_LEVEL(
612 VXGE_HW_EVENT_LINK_UP
, alarm_event
);
615 writeq(VXGE_HW_INTR_MASK_ALL
,
616 &vp_reg
->asic_ntwk_vp_err_reg
);
618 alarm_event
= VXGE_HW_SET_LEVEL(
619 VXGE_HW_EVENT_ALARM_CLEARED
, alarm_event
);
626 if (alarm_status
& VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT
) {
628 pic_status
= readq(&vp_reg
->vpath_ppif_int_status
);
631 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT
) {
633 val64
= readq(&vp_reg
->general_errors_reg
);
634 mask64
= readq(&vp_reg
->general_errors_mask
);
637 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET
) &
639 sw_stats
->error_stats
.ini_serr_det
++;
641 alarm_event
= VXGE_HW_SET_LEVEL(
642 VXGE_HW_EVENT_SERR
, alarm_event
);
646 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW
) &
648 sw_stats
->error_stats
.dblgen_fifo0_overflow
++;
650 alarm_event
= VXGE_HW_SET_LEVEL(
651 VXGE_HW_EVENT_FIFO_ERR
, alarm_event
);
655 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR
) &
657 sw_stats
->error_stats
.statsb_pif_chain_error
++;
660 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ
) &
662 sw_stats
->error_stats
.statsb_drop_timeout
++;
665 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS
) &
667 sw_stats
->error_stats
.target_illegal_access
++;
670 writeq(VXGE_HW_INTR_MASK_ALL
,
671 &vp_reg
->general_errors_reg
);
672 alarm_event
= VXGE_HW_SET_LEVEL(
673 VXGE_HW_EVENT_ALARM_CLEARED
,
679 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT
) {
681 val64
= readq(&vp_reg
->kdfcctl_errors_reg
);
682 mask64
= readq(&vp_reg
->kdfcctl_errors_mask
);
685 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR
) &
687 sw_stats
->error_stats
.kdfcctl_fifo0_overwrite
++;
689 alarm_event
= VXGE_HW_SET_LEVEL(
690 VXGE_HW_EVENT_FIFO_ERR
,
695 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON
) &
697 sw_stats
->error_stats
.kdfcctl_fifo0_poison
++;
699 alarm_event
= VXGE_HW_SET_LEVEL(
700 VXGE_HW_EVENT_FIFO_ERR
,
705 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR
) &
707 sw_stats
->error_stats
.kdfcctl_fifo0_dma_error
++;
709 alarm_event
= VXGE_HW_SET_LEVEL(
710 VXGE_HW_EVENT_FIFO_ERR
,
715 writeq(VXGE_HW_INTR_MASK_ALL
,
716 &vp_reg
->kdfcctl_errors_reg
);
717 alarm_event
= VXGE_HW_SET_LEVEL(
718 VXGE_HW_EVENT_ALARM_CLEARED
,
725 if (alarm_status
& VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT
) {
727 val64
= readq(&vp_reg
->wrdma_alarm_status
);
729 if (val64
& VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT
) {
731 val64
= readq(&vp_reg
->prc_alarm_reg
);
732 mask64
= readq(&vp_reg
->prc_alarm_mask
);
734 if ((val64
& VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP
)&
736 sw_stats
->error_stats
.prc_ring_bumps
++;
738 if ((val64
& VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR
) &
740 sw_stats
->error_stats
.prc_rxdcm_sc_err
++;
742 alarm_event
= VXGE_HW_SET_LEVEL(
743 VXGE_HW_EVENT_VPATH_ERR
,
747 if ((val64
& VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT
)
749 sw_stats
->error_stats
.prc_rxdcm_sc_abort
++;
751 alarm_event
= VXGE_HW_SET_LEVEL(
752 VXGE_HW_EVENT_VPATH_ERR
,
756 if ((val64
& VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR
)
758 sw_stats
->error_stats
.prc_quanta_size_err
++;
760 alarm_event
= VXGE_HW_SET_LEVEL(
761 VXGE_HW_EVENT_VPATH_ERR
,
766 writeq(VXGE_HW_INTR_MASK_ALL
,
767 &vp_reg
->prc_alarm_reg
);
768 alarm_event
= VXGE_HW_SET_LEVEL(
769 VXGE_HW_EVENT_ALARM_CLEARED
,
775 hldev
->stats
.sw_dev_err_stats
.vpath_alarms
++;
777 if ((alarm_event
== VXGE_HW_EVENT_ALARM_CLEARED
) ||
778 (alarm_event
== VXGE_HW_EVENT_UNKNOWN
))
781 __vxge_hw_device_handle_error(hldev
, vpath
->vp_id
, alarm_event
);
783 if (alarm_event
== VXGE_HW_EVENT_SERR
)
784 return VXGE_HW_ERR_CRITICAL
;
786 return (alarm_event
== VXGE_HW_EVENT_SLOT_FREEZE
) ?
787 VXGE_HW_ERR_SLOT_FREEZE
:
788 (alarm_event
== VXGE_HW_EVENT_FIFO_ERR
) ? VXGE_HW_ERR_FIFO
:
793 * vxge_hw_device_begin_irq - Begin IRQ processing.
794 * @hldev: HW device handle.
795 * @skip_alarms: Do not clear the alarms
796 * @reason: "Reason" for the interrupt, the value of Titan's
797 * general_int_status register.
799 * The function performs two actions, It first checks whether (shared IRQ) the
800 * interrupt was raised by the device. Next, it masks the device interrupts.
803 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
804 * bridge. Therefore, two back-to-back interrupts are potentially possible.
806 * Returns: 0, if the interrupt is not "ours" (note that in this case the
807 * device remain enabled).
808 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
811 enum vxge_hw_status
vxge_hw_device_begin_irq(struct __vxge_hw_device
*hldev
,
812 u32 skip_alarms
, u64
*reason
)
818 enum vxge_hw_status ret
= VXGE_HW_OK
;
820 val64
= readq(&hldev
->common_reg
->titan_general_int_status
);
822 if (unlikely(!val64
)) {
823 /* not Titan interrupt */
825 ret
= VXGE_HW_ERR_WRONG_IRQ
;
829 if (unlikely(val64
== VXGE_HW_ALL_FOXES
)) {
831 adapter_status
= readq(&hldev
->common_reg
->adapter_status
);
833 if (adapter_status
== VXGE_HW_ALL_FOXES
) {
835 __vxge_hw_device_handle_error(hldev
,
836 NULL_VPID
, VXGE_HW_EVENT_SLOT_FREEZE
);
838 ret
= VXGE_HW_ERR_SLOT_FREEZE
;
843 hldev
->stats
.sw_dev_info_stats
.total_intr_cnt
++;
847 vpath_mask
= hldev
->vpaths_deployed
>>
848 (64 - VXGE_HW_MAX_VIRTUAL_PATHS
);
851 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask
)) {
852 hldev
->stats
.sw_dev_info_stats
.traffic_intr_cnt
++;
857 hldev
->stats
.sw_dev_info_stats
.not_traffic_intr_cnt
++;
860 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT
)) {
862 enum vxge_hw_status error_level
= VXGE_HW_OK
;
864 hldev
->stats
.sw_dev_err_stats
.vpath_alarms
++;
866 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
868 if (!(hldev
->vpaths_deployed
& vxge_mBIT(i
)))
871 ret
= __vxge_hw_vpath_alarm_process(
872 &hldev
->virtual_paths
[i
], skip_alarms
);
874 error_level
= VXGE_HW_SET_LEVEL(ret
, error_level
);
876 if (unlikely((ret
== VXGE_HW_ERR_CRITICAL
) ||
877 (ret
== VXGE_HW_ERR_SLOT_FREEZE
)))
888 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
889 * condition that has caused the Tx and RX interrupt.
892 * Acknowledge (that is, clear) the condition that has caused
893 * the Tx and Rx interrupt.
894 * See also: vxge_hw_device_begin_irq(),
895 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
897 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device
*hldev
)
900 if ((hldev
->tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] != 0) ||
901 (hldev
->tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
] != 0)) {
902 writeq((hldev
->tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] |
903 hldev
->tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
]),
904 &hldev
->common_reg
->tim_int_status0
);
907 if ((hldev
->tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] != 0) ||
908 (hldev
->tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
] != 0)) {
909 __vxge_hw_pio_mem_write32_upper(
910 (hldev
->tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] |
911 hldev
->tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
]),
912 &hldev
->common_reg
->tim_int_status1
);
917 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
919 * @dtrh: Buffer to return the DTR pointer
921 * Allocates a dtr from the reserve array. If the reserve array is empty,
922 * it swaps the reserve and free arrays.
925 static enum vxge_hw_status
926 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel
*channel
, void **dtrh
)
930 if (channel
->reserve_ptr
- channel
->reserve_top
> 0) {
932 *dtrh
= channel
->reserve_arr
[--channel
->reserve_ptr
];
937 /* switch between empty and full arrays */
939 /* the idea behind such a design is that by having free and reserved
940 * arrays separated we basically separated irq and non-irq parts.
941 * i.e. no additional lock need to be done when we free a resource */
943 if (channel
->length
- channel
->free_ptr
> 0) {
945 tmp_arr
= channel
->reserve_arr
;
946 channel
->reserve_arr
= channel
->free_arr
;
947 channel
->free_arr
= tmp_arr
;
948 channel
->reserve_ptr
= channel
->length
;
949 channel
->reserve_top
= channel
->free_ptr
;
950 channel
->free_ptr
= channel
->length
;
952 channel
->stats
->reserve_free_swaps_cnt
++;
954 goto _alloc_after_swap
;
957 channel
->stats
->full_cnt
++;
960 return VXGE_HW_INF_OUT_OF_DESCRIPTORS
;
964 * vxge_hw_channel_dtr_post - Post a dtr to the channel
968 * Posts a dtr to work array.
972 vxge_hw_channel_dtr_post(struct __vxge_hw_channel
*channel
, void *dtrh
)
974 vxge_assert(channel
->work_arr
[channel
->post_index
] == NULL
);
976 channel
->work_arr
[channel
->post_index
++] = dtrh
;
979 if (channel
->post_index
== channel
->length
)
980 channel
->post_index
= 0;
984 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
986 * @dtr: Buffer to return the next completed DTR pointer
988 * Returns the next completed dtr with out removing it from work array
992 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel
*channel
, void **dtrh
)
994 vxge_assert(channel
->compl_index
< channel
->length
);
996 *dtrh
= channel
->work_arr
[channel
->compl_index
];
1001 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
1002 * @channel: Channel handle
1004 * Removes the next completed dtr from work array
1007 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel
*channel
)
1009 channel
->work_arr
[channel
->compl_index
] = NULL
;
1012 if (++channel
->compl_index
== channel
->length
)
1013 channel
->compl_index
= 0;
1015 channel
->stats
->total_compl_cnt
++;
1019 * vxge_hw_channel_dtr_free - Frees a dtr
1020 * @channel: Channel handle
1023 * Returns the dtr to free array
1026 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel
*channel
, void *dtrh
)
1028 channel
->free_arr
[--channel
->free_ptr
] = dtrh
;
1032 * vxge_hw_channel_dtr_count
1033 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
1035 * Retreive number of DTRs available. This function can not be called
1036 * from data path. ring_initial_replenishi() is the only user.
1038 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel
*channel
)
1040 return (channel
->reserve_ptr
- channel
->reserve_top
) +
1041 (channel
->length
- channel
->free_ptr
);
1045 * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
1046 * @ring: Handle to the ring object used for receive
1047 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
1048 * with a valid handle.
1050 * Reserve Rx descriptor for the subsequent filling-in driver
1051 * and posting on the corresponding channel (@channelh)
1052 * via vxge_hw_ring_rxd_post().
1054 * Returns: VXGE_HW_OK - success.
1055 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
1058 enum vxge_hw_status
vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring
*ring
,
1061 enum vxge_hw_status status
;
1062 struct __vxge_hw_channel
*channel
;
1064 channel
= &ring
->channel
;
1066 status
= vxge_hw_channel_dtr_alloc(channel
, rxdh
);
1068 if (status
== VXGE_HW_OK
) {
1069 struct vxge_hw_ring_rxd_1
*rxdp
=
1070 (struct vxge_hw_ring_rxd_1
*)*rxdh
;
1072 rxdp
->control_0
= rxdp
->control_1
= 0;
1079 * vxge_hw_ring_rxd_free - Free descriptor.
1080 * @ring: Handle to the ring object used for receive
1081 * @rxdh: Descriptor handle.
1083 * Free the reserved descriptor. This operation is "symmetrical" to
1084 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
1087 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
1090 * - reserved (vxge_hw_ring_rxd_reserve);
1092 * - posted (vxge_hw_ring_rxd_post);
1094 * - completed (vxge_hw_ring_rxd_next_completed);
1096 * - and recycled again (vxge_hw_ring_rxd_free).
1098 * For alternative state transitions and more details please refer to
1102 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring
*ring
, void *rxdh
)
1104 struct __vxge_hw_channel
*channel
;
1106 channel
= &ring
->channel
;
1108 vxge_hw_channel_dtr_free(channel
, rxdh
);
1113 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
1114 * @ring: Handle to the ring object used for receive
1115 * @rxdh: Descriptor handle.
1117 * This routine prepares a rxd and posts
1119 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring
*ring
, void *rxdh
)
1121 struct __vxge_hw_channel
*channel
;
1123 channel
= &ring
->channel
;
1125 vxge_hw_channel_dtr_post(channel
, rxdh
);
1129 * vxge_hw_ring_rxd_post_post - Process rxd after post.
1130 * @ring: Handle to the ring object used for receive
1131 * @rxdh: Descriptor handle.
1133 * Processes rxd after post
1135 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring
*ring
, void *rxdh
)
1137 struct vxge_hw_ring_rxd_1
*rxdp
= (struct vxge_hw_ring_rxd_1
*)rxdh
;
1138 struct __vxge_hw_channel
*channel
;
1140 channel
= &ring
->channel
;
1142 rxdp
->control_0
= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER
;
1144 if (ring
->stats
->common_stats
.usage_cnt
> 0)
1145 ring
->stats
->common_stats
.usage_cnt
--;
1149 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
1150 * @ring: Handle to the ring object used for receive
1151 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
1153 * Post descriptor on the ring.
1154 * Prior to posting the descriptor should be filled in accordance with
1155 * Host/Titan interface specification for a given service (LL, etc.).
1158 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring
*ring
, void *rxdh
)
1160 struct vxge_hw_ring_rxd_1
*rxdp
= (struct vxge_hw_ring_rxd_1
*)rxdh
;
1161 struct __vxge_hw_channel
*channel
;
1163 channel
= &ring
->channel
;
1166 rxdp
->control_0
= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER
;
1168 vxge_hw_channel_dtr_post(channel
, rxdh
);
1170 if (ring
->stats
->common_stats
.usage_cnt
> 0)
1171 ring
->stats
->common_stats
.usage_cnt
--;
1175 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
1176 * @ring: Handle to the ring object used for receive
1177 * @rxdh: Descriptor handle.
1179 * Processes rxd after post with memory barrier.
1181 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring
*ring
, void *rxdh
)
1184 vxge_hw_ring_rxd_post_post(ring
, rxdh
);
1188 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
1189 * @ring: Handle to the ring object used for receive
1190 * @rxdh: Descriptor handle. Returned by HW.
1191 * @t_code: Transfer code, as per Titan User Guide,
1192 * Receive Descriptor Format. Returned by HW.
1194 * Retrieve the _next_ completed descriptor.
1195 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
1196 * driver of new completed descriptors. After that
1197 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
1198 * completions (the very first completion is passed by HW via
1199 * vxge_hw_ring_callback_f).
1201 * Implementation-wise, the driver is free to call
1202 * vxge_hw_ring_rxd_next_completed either immediately from inside the
1203 * ring callback, or in a deferred fashion and separate (from HW)
1206 * Non-zero @t_code means failure to fill-in receive buffer(s)
1207 * of the descriptor.
1208 * For instance, parity error detected during the data transfer.
1209 * In this case Titan will complete the descriptor and indicate
1210 * for the host that the received data is not to be used.
1211 * For details please refer to Titan User Guide.
1213 * Returns: VXGE_HW_OK - success.
1214 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1215 * are currently available for processing.
1217 * See also: vxge_hw_ring_callback_f{},
1218 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
1220 enum vxge_hw_status
vxge_hw_ring_rxd_next_completed(
1221 struct __vxge_hw_ring
*ring
, void **rxdh
, u8
*t_code
)
1223 struct __vxge_hw_channel
*channel
;
1224 struct vxge_hw_ring_rxd_1
*rxdp
;
1225 enum vxge_hw_status status
= VXGE_HW_OK
;
1228 channel
= &ring
->channel
;
1230 vxge_hw_channel_dtr_try_complete(channel
, rxdh
);
1232 rxdp
= (struct vxge_hw_ring_rxd_1
*)*rxdh
;
1234 status
= VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS
;
1238 control_0
= rxdp
->control_0
;
1239 own
= control_0
& VXGE_HW_RING_RXD_LIST_OWN_ADAPTER
;
1240 *t_code
= (u8
)VXGE_HW_RING_RXD_T_CODE_GET(control_0
);
1242 /* check whether it is not the end */
1243 if (!own
|| *t_code
== VXGE_HW_RING_T_CODE_FRM_DROP
) {
1245 vxge_assert(((struct vxge_hw_ring_rxd_1
*)rxdp
)->host_control
!=
1249 vxge_hw_channel_dtr_complete(channel
);
1251 vxge_assert(*t_code
!= VXGE_HW_RING_RXD_T_CODE_UNUSED
);
1253 ring
->stats
->common_stats
.usage_cnt
++;
1254 if (ring
->stats
->common_stats
.usage_max
<
1255 ring
->stats
->common_stats
.usage_cnt
)
1256 ring
->stats
->common_stats
.usage_max
=
1257 ring
->stats
->common_stats
.usage_cnt
;
1259 status
= VXGE_HW_OK
;
1263 /* reset it. since we don't want to return
1264 * garbage to the driver */
1266 status
= VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS
;
1272 * vxge_hw_ring_handle_tcode - Handle transfer code.
1273 * @ring: Handle to the ring object used for receive
1274 * @rxdh: Descriptor handle.
1275 * @t_code: One of the enumerated (and documented in the Titan user guide)
1278 * Handle descriptor's transfer code. The latter comes with each completed
1281 * Returns: one of the enum vxge_hw_status{} enumerated types.
1282 * VXGE_HW_OK - for success.
1283 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1285 enum vxge_hw_status
vxge_hw_ring_handle_tcode(
1286 struct __vxge_hw_ring
*ring
, void *rxdh
, u8 t_code
)
1288 struct __vxge_hw_channel
*channel
;
1289 enum vxge_hw_status status
= VXGE_HW_OK
;
1291 channel
= &ring
->channel
;
1293 /* If the t_code is not supported and if the
1294 * t_code is other than 0x5 (unparseable packet
1295 * such as unknown UPV6 header), Drop it !!!
1298 if (t_code
== VXGE_HW_RING_T_CODE_OK
||
1299 t_code
== VXGE_HW_RING_T_CODE_L3_PKT_ERR
) {
1300 status
= VXGE_HW_OK
;
1304 if (t_code
> VXGE_HW_RING_T_CODE_MULTI_ERR
) {
1305 status
= VXGE_HW_ERR_INVALID_TCODE
;
1309 ring
->stats
->rxd_t_code_err_cnt
[t_code
]++;
1315 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1318 * @txdl_ptr: The starting location of the TxDL in host memory
1319 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1320 * @no_snoop: No snoop flags
1322 * This function posts a non-offload doorbell to doorbell FIFO
1325 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo
*fifo
,
1326 u64 txdl_ptr
, u32 num_txds
, u32 no_snoop
)
1328 struct __vxge_hw_channel
*channel
;
1330 channel
= &fifo
->channel
;
1332 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW
) |
1333 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds
) |
1334 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop
),
1335 &fifo
->nofl_db
->control_0
);
1339 writeq(txdl_ptr
, &fifo
->nofl_db
->txdl_ptr
);
1345 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1347 * @fifoh: Handle to the fifo object used for non offload send
1349 u32
vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo
*fifoh
)
1351 return vxge_hw_channel_dtr_count(&fifoh
->channel
);
1355 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1356 * @fifoh: Handle to the fifo object used for non offload send
1357 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1358 * with a valid handle.
1359 * @txdl_priv: Buffer to return the pointer to per txdl space
1361 * Reserve a single TxDL (that is, fifo descriptor)
1362 * for the subsequent filling-in by driver)
1363 * and posting on the corresponding channel (@channelh)
1364 * via vxge_hw_fifo_txdl_post().
1366 * Note: it is the responsibility of driver to reserve multiple descriptors
1367 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1368 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1370 * Returns: VXGE_HW_OK - success;
1371 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1374 enum vxge_hw_status
vxge_hw_fifo_txdl_reserve(
1375 struct __vxge_hw_fifo
*fifo
,
1376 void **txdlh
, void **txdl_priv
)
1378 struct __vxge_hw_channel
*channel
;
1379 enum vxge_hw_status status
;
1382 channel
= &fifo
->channel
;
1384 status
= vxge_hw_channel_dtr_alloc(channel
, txdlh
);
1386 if (status
== VXGE_HW_OK
) {
1387 struct vxge_hw_fifo_txd
*txdp
=
1388 (struct vxge_hw_fifo_txd
*)*txdlh
;
1389 struct __vxge_hw_fifo_txdl_priv
*priv
;
1391 priv
= __vxge_hw_fifo_txdl_priv(fifo
, txdp
);
1393 /* reset the TxDL's private */
1394 priv
->align_dma_offset
= 0;
1395 priv
->align_vaddr_start
= priv
->align_vaddr
;
1396 priv
->align_used_frags
= 0;
1398 priv
->alloc_frags
= fifo
->config
->max_frags
;
1399 priv
->next_txdl_priv
= NULL
;
1401 *txdl_priv
= (void *)(size_t)txdp
->host_control
;
1403 for (i
= 0; i
< fifo
->config
->max_frags
; i
++) {
1404 txdp
= ((struct vxge_hw_fifo_txd
*)*txdlh
) + i
;
1405 txdp
->control_0
= txdp
->control_1
= 0;
1413 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1415 * @fifo: Handle to the fifo object used for non offload send
1416 * @txdlh: Descriptor handle.
1417 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1419 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1420 * @size: Size of the data buffer (in bytes).
1422 * This API is part of the preparation of the transmit descriptor for posting
1423 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1424 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1425 * All three APIs fill in the fields of the fifo descriptor,
1426 * in accordance with the Titan specification.
1429 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo
*fifo
,
1430 void *txdlh
, u32 frag_idx
,
1431 dma_addr_t dma_pointer
, u32 size
)
1433 struct __vxge_hw_fifo_txdl_priv
*txdl_priv
;
1434 struct vxge_hw_fifo_txd
*txdp
, *txdp_last
;
1435 struct __vxge_hw_channel
*channel
;
1437 channel
= &fifo
->channel
;
1439 txdl_priv
= __vxge_hw_fifo_txdl_priv(fifo
, txdlh
);
1440 txdp
= (struct vxge_hw_fifo_txd
*)txdlh
+ txdl_priv
->frags
;
1443 txdp
->control_0
= txdp
->control_1
= 0;
1445 txdp
->control_0
|= VXGE_HW_FIFO_TXD_GATHER_CODE(
1446 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST
);
1447 txdp
->control_1
|= fifo
->interrupt_type
;
1448 txdp
->control_1
|= VXGE_HW_FIFO_TXD_INT_NUMBER(
1450 if (txdl_priv
->frags
) {
1451 txdp_last
= (struct vxge_hw_fifo_txd
*)txdlh
+
1452 (txdl_priv
->frags
- 1);
1453 txdp_last
->control_0
|= VXGE_HW_FIFO_TXD_GATHER_CODE(
1454 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST
);
1458 vxge_assert(frag_idx
< txdl_priv
->alloc_frags
);
1460 txdp
->buffer_pointer
= (u64
)dma_pointer
;
1461 txdp
->control_0
|= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size
);
1462 fifo
->stats
->total_buffers
++;
1467 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1468 * @fifo: Handle to the fifo object used for non offload send
1469 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1470 * @frags: Number of contiguous buffers that are part of a single
1471 * transmit operation.
1473 * Post descriptor on the 'fifo' type channel for transmission.
1474 * Prior to posting the descriptor should be filled in accordance with
1475 * Host/Titan interface specification for a given service (LL, etc.).
1478 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo
*fifo
, void *txdlh
)
1480 struct __vxge_hw_fifo_txdl_priv
*txdl_priv
;
1481 struct vxge_hw_fifo_txd
*txdp_last
;
1482 struct vxge_hw_fifo_txd
*txdp_first
;
1483 struct __vxge_hw_channel
*channel
;
1485 channel
= &fifo
->channel
;
1487 txdl_priv
= __vxge_hw_fifo_txdl_priv(fifo
, txdlh
);
1488 txdp_first
= (struct vxge_hw_fifo_txd
*)txdlh
;
1490 txdp_last
= (struct vxge_hw_fifo_txd
*)txdlh
+ (txdl_priv
->frags
- 1);
1491 txdp_last
->control_0
|=
1492 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST
);
1493 txdp_first
->control_0
|= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER
;
1495 vxge_hw_channel_dtr_post(&fifo
->channel
, txdlh
);
1497 __vxge_hw_non_offload_db_post(fifo
,
1498 (u64
)txdl_priv
->dma_addr
,
1499 txdl_priv
->frags
- 1,
1500 fifo
->no_snoop_bits
);
1502 fifo
->stats
->total_posts
++;
1503 fifo
->stats
->common_stats
.usage_cnt
++;
1504 if (fifo
->stats
->common_stats
.usage_max
<
1505 fifo
->stats
->common_stats
.usage_cnt
)
1506 fifo
->stats
->common_stats
.usage_max
=
1507 fifo
->stats
->common_stats
.usage_cnt
;
1511 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1512 * @fifo: Handle to the fifo object used for non offload send
1513 * @txdlh: Descriptor handle. Returned by HW.
1514 * @t_code: Transfer code, as per Titan User Guide,
1515 * Transmit Descriptor Format.
1518 * Retrieve the _next_ completed descriptor.
1519 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1520 * driver of new completed descriptors. After that
1521 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1522 * completions (the very first completion is passed by HW via
1523 * vxge_hw_channel_callback_f).
1525 * Implementation-wise, the driver is free to call
1526 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1527 * channel callback, or in a deferred fashion and separate (from HW)
1530 * Non-zero @t_code means failure to process the descriptor.
1531 * The failure could happen, for instance, when the link is
1532 * down, in which case Titan completes the descriptor because it
1533 * is not able to send the data out.
1535 * For details please refer to Titan User Guide.
1537 * Returns: VXGE_HW_OK - success.
1538 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1539 * are currently available for processing.
1542 enum vxge_hw_status
vxge_hw_fifo_txdl_next_completed(
1543 struct __vxge_hw_fifo
*fifo
, void **txdlh
,
1544 enum vxge_hw_fifo_tcode
*t_code
)
1546 struct __vxge_hw_channel
*channel
;
1547 struct vxge_hw_fifo_txd
*txdp
;
1548 enum vxge_hw_status status
= VXGE_HW_OK
;
1550 channel
= &fifo
->channel
;
1552 vxge_hw_channel_dtr_try_complete(channel
, txdlh
);
1554 txdp
= (struct vxge_hw_fifo_txd
*)*txdlh
;
1556 status
= VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS
;
1560 /* check whether host owns it */
1561 if (!(txdp
->control_0
& VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER
)) {
1563 vxge_assert(txdp
->host_control
!= 0);
1565 vxge_hw_channel_dtr_complete(channel
);
1567 *t_code
= (u8
)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp
->control_0
);
1569 if (fifo
->stats
->common_stats
.usage_cnt
> 0)
1570 fifo
->stats
->common_stats
.usage_cnt
--;
1572 status
= VXGE_HW_OK
;
1576 /* no more completions */
1578 status
= VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS
;
1584 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1585 * @fifo: Handle to the fifo object used for non offload send
1586 * @txdlh: Descriptor handle.
1587 * @t_code: One of the enumerated (and documented in the Titan user guide)
1590 * Handle descriptor's transfer code. The latter comes with each completed
1593 * Returns: one of the enum vxge_hw_status{} enumerated types.
1594 * VXGE_HW_OK - for success.
1595 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1597 enum vxge_hw_status
vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo
*fifo
,
1599 enum vxge_hw_fifo_tcode t_code
)
1601 struct __vxge_hw_channel
*channel
;
1603 enum vxge_hw_status status
= VXGE_HW_OK
;
1604 channel
= &fifo
->channel
;
1606 if (((t_code
& 0x7) < 0) || ((t_code
& 0x7) > 0x4)) {
1607 status
= VXGE_HW_ERR_INVALID_TCODE
;
1611 fifo
->stats
->txd_t_code_err_cnt
[t_code
]++;
1617 * vxge_hw_fifo_txdl_free - Free descriptor.
1618 * @fifo: Handle to the fifo object used for non offload send
1619 * @txdlh: Descriptor handle.
1621 * Free the reserved descriptor. This operation is "symmetrical" to
1622 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1625 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1628 * - reserved (vxge_hw_fifo_txdl_reserve);
1630 * - posted (vxge_hw_fifo_txdl_post);
1632 * - completed (vxge_hw_fifo_txdl_next_completed);
1634 * - and recycled again (vxge_hw_fifo_txdl_free).
1636 * For alternative state transitions and more details please refer to
1640 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo
*fifo
, void *txdlh
)
1642 struct __vxge_hw_fifo_txdl_priv
*txdl_priv
;
1644 struct __vxge_hw_channel
*channel
;
1646 channel
= &fifo
->channel
;
1648 txdl_priv
= __vxge_hw_fifo_txdl_priv(fifo
,
1649 (struct vxge_hw_fifo_txd
*)txdlh
);
1651 max_frags
= fifo
->config
->max_frags
;
1653 vxge_hw_channel_dtr_free(channel
, txdlh
);
1657 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1658 * to MAC address table.
1659 * @vp: Vpath handle.
1660 * @macaddr: MAC address to be added for this vpath into the list
1661 * @macaddr_mask: MAC address mask for macaddr
1662 * @duplicate_mode: Duplicate MAC address add mode. Please see
1663 * enum vxge_hw_vpath_mac_addr_add_mode{}
1665 * Adds the given mac address and mac address mask into the list for this
1667 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1668 * vxge_hw_vpath_mac_addr_get_next
1672 vxge_hw_vpath_mac_addr_add(
1673 struct __vxge_hw_vpath_handle
*vp
,
1674 u8 (macaddr
)[ETH_ALEN
],
1675 u8 (macaddr_mask
)[ETH_ALEN
],
1676 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode
)
1681 enum vxge_hw_status status
= VXGE_HW_OK
;
1684 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1688 for (i
= 0; i
< ETH_ALEN
; i
++) {
1690 data1
|= (u8
)macaddr
[i
];
1693 data2
|= (u8
)macaddr_mask
[i
];
1696 switch (duplicate_mode
) {
1697 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE
:
1700 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE
:
1703 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE
:
1711 status
= __vxge_hw_vpath_rts_table_set(vp
,
1712 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY
,
1713 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
,
1715 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1
),
1716 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2
)|
1717 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i
));
1723 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1724 * from MAC address table.
1725 * @vp: Vpath handle.
1726 * @macaddr: First MAC address entry for this vpath in the list
1727 * @macaddr_mask: MAC address mask for macaddr
1729 * Returns the first mac address and mac address mask in the list for this
1731 * see also: vxge_hw_vpath_mac_addr_get_next
1735 vxge_hw_vpath_mac_addr_get(
1736 struct __vxge_hw_vpath_handle
*vp
,
1737 u8 (macaddr
)[ETH_ALEN
],
1738 u8 (macaddr_mask
)[ETH_ALEN
])
1743 enum vxge_hw_status status
= VXGE_HW_OK
;
1746 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1750 status
= __vxge_hw_vpath_rts_table_get(vp
,
1751 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY
,
1752 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
,
1755 if (status
!= VXGE_HW_OK
)
1758 data1
= VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1
);
1760 data2
= VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2
);
1762 for (i
= ETH_ALEN
; i
> 0; i
--) {
1763 macaddr
[i
-1] = (u8
)(data1
& 0xFF);
1766 macaddr_mask
[i
-1] = (u8
)(data2
& 0xFF);
1774 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1776 * from MAC address table.
1777 * @vp: Vpath handle.
1778 * @macaddr: Next MAC address entry for this vpath in the list
1779 * @macaddr_mask: MAC address mask for macaddr
1781 * Returns the next mac address and mac address mask in the list for this
1783 * see also: vxge_hw_vpath_mac_addr_get
1787 vxge_hw_vpath_mac_addr_get_next(
1788 struct __vxge_hw_vpath_handle
*vp
,
1789 u8 (macaddr
)[ETH_ALEN
],
1790 u8 (macaddr_mask
)[ETH_ALEN
])
1795 enum vxge_hw_status status
= VXGE_HW_OK
;
1798 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1802 status
= __vxge_hw_vpath_rts_table_get(vp
,
1803 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY
,
1804 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
,
1807 if (status
!= VXGE_HW_OK
)
1810 data1
= VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1
);
1812 data2
= VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2
);
1814 for (i
= ETH_ALEN
; i
> 0; i
--) {
1815 macaddr
[i
-1] = (u8
)(data1
& 0xFF);
1818 macaddr_mask
[i
-1] = (u8
)(data2
& 0xFF);
1827 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1828 * to MAC address table.
1829 * @vp: Vpath handle.
1830 * @macaddr: MAC address to be added for this vpath into the list
1831 * @macaddr_mask: MAC address mask for macaddr
1833 * Delete the given mac address and mac address mask into the list for this
1835 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1836 * vxge_hw_vpath_mac_addr_get_next
1840 vxge_hw_vpath_mac_addr_delete(
1841 struct __vxge_hw_vpath_handle
*vp
,
1842 u8 (macaddr
)[ETH_ALEN
],
1843 u8 (macaddr_mask
)[ETH_ALEN
])
1848 enum vxge_hw_status status
= VXGE_HW_OK
;
1851 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1855 for (i
= 0; i
< ETH_ALEN
; i
++) {
1857 data1
|= (u8
)macaddr
[i
];
1860 data2
|= (u8
)macaddr_mask
[i
];
1863 status
= __vxge_hw_vpath_rts_table_set(vp
,
1864 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY
,
1865 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA
,
1867 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1
),
1868 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2
));
1874 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1876 * @vp: Vpath handle.
1877 * @vid: vlan id to be added for this vpath into the list
1879 * Adds the given vlan id into the list for this vpath.
1880 * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
1881 * vxge_hw_vpath_vid_get_next
1885 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle
*vp
, u64 vid
)
1887 enum vxge_hw_status status
= VXGE_HW_OK
;
1890 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1894 status
= __vxge_hw_vpath_rts_table_set(vp
,
1895 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY
,
1896 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID
,
1897 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid
), 0);
1903 * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
1904 * from vlan id table.
1905 * @vp: Vpath handle.
1906 * @vid: Buffer to return vlan id
1908 * Returns the first vlan id in the list for this vpath.
1909 * see also: vxge_hw_vpath_vid_get_next
1913 vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle
*vp
, u64
*vid
)
1916 enum vxge_hw_status status
= VXGE_HW_OK
;
1919 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1923 status
= __vxge_hw_vpath_rts_table_get(vp
,
1924 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY
,
1925 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID
,
1928 *vid
= VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid
);
1934 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1936 * @vp: Vpath handle.
1937 * @vid: vlan id to be added for this vpath into the list
1939 * Adds the given vlan id into the list for this vpath.
1940 * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
1941 * vxge_hw_vpath_vid_get_next
1945 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle
*vp
, u64 vid
)
1947 enum vxge_hw_status status
= VXGE_HW_OK
;
1950 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1954 status
= __vxge_hw_vpath_rts_table_set(vp
,
1955 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY
,
1956 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID
,
1957 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid
), 0);
1963 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1964 * @vp: Vpath handle.
1966 * Enable promiscuous mode of Titan-e operation.
1968 * See also: vxge_hw_vpath_promisc_disable().
1970 enum vxge_hw_status
vxge_hw_vpath_promisc_enable(
1971 struct __vxge_hw_vpath_handle
*vp
)
1974 struct __vxge_hw_virtualpath
*vpath
;
1975 enum vxge_hw_status status
= VXGE_HW_OK
;
1977 if ((vp
== NULL
) || (vp
->vpath
->ringh
== NULL
)) {
1978 status
= VXGE_HW_ERR_INVALID_HANDLE
;
1984 /* Enable promiscous mode for function 0 only */
1985 if (!(vpath
->hldev
->access_rights
&
1986 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM
))
1989 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
1991 if (!(val64
& VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN
)) {
1993 val64
|= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN
|
1994 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN
|
1995 VXGE_HW_RXMAC_VCFG0_BCAST_EN
|
1996 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN
;
1998 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
2005 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
2006 * @vp: Vpath handle.
2008 * Disable promiscuous mode of Titan-e operation.
2010 * See also: vxge_hw_vpath_promisc_enable().
2012 enum vxge_hw_status
vxge_hw_vpath_promisc_disable(
2013 struct __vxge_hw_vpath_handle
*vp
)
2016 struct __vxge_hw_virtualpath
*vpath
;
2017 enum vxge_hw_status status
= VXGE_HW_OK
;
2019 if ((vp
== NULL
) || (vp
->vpath
->ringh
== NULL
)) {
2020 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2026 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
2028 if (val64
& VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN
) {
2030 val64
&= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN
|
2031 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN
|
2032 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN
);
2034 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
2041 * vxge_hw_vpath_bcast_enable - Enable broadcast
2042 * @vp: Vpath handle.
2044 * Enable receiving broadcasts.
2046 enum vxge_hw_status
vxge_hw_vpath_bcast_enable(
2047 struct __vxge_hw_vpath_handle
*vp
)
2050 struct __vxge_hw_virtualpath
*vpath
;
2051 enum vxge_hw_status status
= VXGE_HW_OK
;
2053 if ((vp
== NULL
) || (vp
->vpath
->ringh
== NULL
)) {
2054 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2060 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
2062 if (!(val64
& VXGE_HW_RXMAC_VCFG0_BCAST_EN
)) {
2063 val64
|= VXGE_HW_RXMAC_VCFG0_BCAST_EN
;
2064 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
2071 * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
2072 * @vp: Vpath handle.
2074 * Enable Titan-e multicast addresses.
2075 * Returns: VXGE_HW_OK on success.
2078 enum vxge_hw_status
vxge_hw_vpath_mcast_enable(
2079 struct __vxge_hw_vpath_handle
*vp
)
2082 struct __vxge_hw_virtualpath
*vpath
;
2083 enum vxge_hw_status status
= VXGE_HW_OK
;
2085 if ((vp
== NULL
) || (vp
->vpath
->ringh
== NULL
)) {
2086 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2092 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
2094 if (!(val64
& VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN
)) {
2095 val64
|= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN
;
2096 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
2103 * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
2104 * @vp: Vpath handle.
2106 * Disable Titan-e multicast addresses.
2107 * Returns: VXGE_HW_OK - success.
2108 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
2112 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle
*vp
)
2115 struct __vxge_hw_virtualpath
*vpath
;
2116 enum vxge_hw_status status
= VXGE_HW_OK
;
2118 if ((vp
== NULL
) || (vp
->vpath
->ringh
== NULL
)) {
2119 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2125 val64
= readq(&vpath
->vp_reg
->rxmac_vcfg0
);
2127 if (val64
& VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN
) {
2128 val64
&= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN
;
2129 writeq(val64
, &vpath
->vp_reg
->rxmac_vcfg0
);
2136 * vxge_hw_vpath_alarm_process - Process Alarms.
2137 * @vpath: Virtual Path.
2138 * @skip_alarms: Do not clear the alarms
2140 * Process vpath alarms.
2143 enum vxge_hw_status
vxge_hw_vpath_alarm_process(
2144 struct __vxge_hw_vpath_handle
*vp
,
2147 enum vxge_hw_status status
= VXGE_HW_OK
;
2150 status
= VXGE_HW_ERR_INVALID_HANDLE
;
2154 status
= __vxge_hw_vpath_alarm_process(vp
->vpath
, skip_alarms
);
2160 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2162 * @vp: Virtual Path handle.
2163 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2164 * interrupts(Can be repeated). If fifo or ring are not enabled
2165 * the MSIX vector for that should be set to 0
2166 * @alarm_msix_id: MSIX vector for alarm.
2168 * This API will associate a given MSIX vector numbers with the four TIM
2169 * interrupts and alarm interrupt.
2172 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle
*vp
, int *tim_msix_id
,
2176 struct __vxge_hw_virtualpath
*vpath
= vp
->vpath
;
2177 struct vxge_hw_vpath_reg __iomem
*vp_reg
= vpath
->vp_reg
;
2178 u32 vp_id
= vp
->vpath
->vp_id
;
2180 val64
= VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2181 (vp_id
* 4) + tim_msix_id
[0]) |
2182 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2183 (vp_id
* 4) + tim_msix_id
[1]);
2185 writeq(val64
, &vp_reg
->interrupt_cfg0
);
2187 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2188 (vpath
->hldev
->first_vp_id
* 4) + alarm_msix_id
),
2189 &vp_reg
->interrupt_cfg2
);
2191 if (vpath
->hldev
->config
.intr_mode
==
2192 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT
) {
2193 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(
2194 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN
,
2195 0, 32), &vp_reg
->one_shot_vect1_en
);
2198 if (vpath
->hldev
->config
.intr_mode
==
2199 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT
) {
2200 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(
2201 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN
,
2202 0, 32), &vp_reg
->one_shot_vect2_en
);
2204 __vxge_hw_pio_mem_write32_upper((u32
)vxge_bVALn(
2205 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN
,
2206 0, 32), &vp_reg
->one_shot_vect3_en
);
2211 * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2212 * @vp: Virtual Path handle.
2215 * The function masks the msix interrupt for the given msix_id
2218 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2223 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle
*vp
, int msix_id
)
2225 struct __vxge_hw_device
*hldev
= vp
->vpath
->hldev
;
2226 __vxge_hw_pio_mem_write32_upper(
2227 (u32
) vxge_bVALn(vxge_mBIT(msix_id
>> 2), 0, 32),
2228 &hldev
->common_reg
->set_msix_mask_vect
[msix_id
% 4]);
2232 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2233 * @vp: Virtual Path handle.
2236 * The function unmasks the msix interrupt for the given msix_id
2239 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2244 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle
*vp
, int msix_id
)
2246 struct __vxge_hw_device
*hldev
= vp
->vpath
->hldev
;
2247 __vxge_hw_pio_mem_write32_upper(
2248 (u32
)vxge_bVALn(vxge_mBIT(msix_id
>> 2), 0, 32),
2249 &hldev
->common_reg
->clear_msix_mask_vect
[msix_id
%4]);
2253 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2254 * @vp: Virtual Path handle.
2256 * Mask Tx and Rx vpath interrupts.
2258 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2260 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle
*vp
)
2262 u64 tim_int_mask0
[4] = {[0 ...3] = 0};
2263 u32 tim_int_mask1
[4] = {[0 ...3] = 0};
2265 struct __vxge_hw_device
*hldev
= vp
->vpath
->hldev
;
2267 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0
,
2268 tim_int_mask1
, vp
->vpath
->vp_id
);
2270 val64
= readq(&hldev
->common_reg
->tim_int_mask0
);
2272 if ((tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] != 0) ||
2273 (tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
] != 0)) {
2274 writeq((tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] |
2275 tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
] | val64
),
2276 &hldev
->common_reg
->tim_int_mask0
);
2279 val64
= readl(&hldev
->common_reg
->tim_int_mask1
);
2281 if ((tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] != 0) ||
2282 (tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
] != 0)) {
2283 __vxge_hw_pio_mem_write32_upper(
2284 (tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] |
2285 tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
] | val64
),
2286 &hldev
->common_reg
->tim_int_mask1
);
2291 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2292 * @vp: Virtual Path handle.
2294 * Unmask Tx and Rx vpath interrupts.
2296 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2298 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle
*vp
)
2300 u64 tim_int_mask0
[4] = {[0 ...3] = 0};
2301 u32 tim_int_mask1
[4] = {[0 ...3] = 0};
2303 struct __vxge_hw_device
*hldev
= vp
->vpath
->hldev
;
2305 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0
,
2306 tim_int_mask1
, vp
->vpath
->vp_id
);
2308 val64
= readq(&hldev
->common_reg
->tim_int_mask0
);
2310 if ((tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] != 0) ||
2311 (tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
] != 0)) {
2312 writeq((~(tim_int_mask0
[VXGE_HW_VPATH_INTR_TX
] |
2313 tim_int_mask0
[VXGE_HW_VPATH_INTR_RX
])) & val64
,
2314 &hldev
->common_reg
->tim_int_mask0
);
2317 if ((tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] != 0) ||
2318 (tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
] != 0)) {
2319 __vxge_hw_pio_mem_write32_upper(
2320 (~(tim_int_mask1
[VXGE_HW_VPATH_INTR_TX
] |
2321 tim_int_mask1
[VXGE_HW_VPATH_INTR_RX
])) & val64
,
2322 &hldev
->common_reg
->tim_int_mask1
);
2327 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2328 * descriptors and process the same.
2329 * @ring: Handle to the ring object used for receive
2331 * The function polls the Rx for the completed descriptors and calls
2332 * the driver via supplied completion callback.
2334 * Returns: VXGE_HW_OK, if the polling is completed successful.
2335 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2336 * descriptors available which are yet to be processed.
2338 * See also: vxge_hw_vpath_poll_rx()
2340 enum vxge_hw_status
vxge_hw_vpath_poll_rx(struct __vxge_hw_ring
*ring
)
2343 enum vxge_hw_status status
= VXGE_HW_OK
;
2350 status
= vxge_hw_ring_rxd_next_completed(ring
, &first_rxdh
, &t_code
);
2351 if (status
== VXGE_HW_OK
)
2352 ring
->callback(ring
, first_rxdh
,
2353 t_code
, ring
->channel
.userdata
);
2355 if (ring
->cmpl_cnt
!= 0) {
2356 ring
->doorbell_cnt
+= ring
->cmpl_cnt
;
2357 if (ring
->doorbell_cnt
>= ring
->rxds_limit
) {
2359 * Each RxD is of 4 qwords, update the number of
2360 * qwords replenished
2362 new_count
= (ring
->doorbell_cnt
* 4);
2364 /* For each block add 4 more qwords */
2365 ring
->total_db_cnt
+= ring
->doorbell_cnt
;
2366 if (ring
->total_db_cnt
>= ring
->rxds_per_block
) {
2368 /* Reset total count */
2369 ring
->total_db_cnt
%= ring
->rxds_per_block
;
2371 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count
),
2372 &ring
->vp_reg
->prc_rxd_doorbell
);
2374 readl(&ring
->common_reg
->titan_general_int_status
);
2375 ring
->doorbell_cnt
= 0;
2383 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2385 * @fifo: Handle to the fifo object used for non offload send
2387 * The function polls the Tx for the completed descriptors and calls
2388 * the driver via supplied completion callback.
2390 * Returns: VXGE_HW_OK, if the polling is completed successful.
2391 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2392 * descriptors available which are yet to be processed.
2394 enum vxge_hw_status
vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo
*fifo
,
2395 struct sk_buff
***skb_ptr
, int nr_skb
,
2398 enum vxge_hw_fifo_tcode t_code
;
2400 enum vxge_hw_status status
= VXGE_HW_OK
;
2401 struct __vxge_hw_channel
*channel
;
2403 channel
= &fifo
->channel
;
2405 status
= vxge_hw_fifo_txdl_next_completed(fifo
,
2406 &first_txdlh
, &t_code
);
2407 if (status
== VXGE_HW_OK
)
2408 if (fifo
->callback(fifo
, first_txdlh
, t_code
,
2409 channel
->userdata
, skb_ptr
, nr_skb
, more
) != VXGE_HW_OK
)
2410 status
= VXGE_HW_COMPLETIONS_REMAIN
;