4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
29 * Tavor IOCTL Routines
31 * Implements all ioctl access into the driver. This includes all routines
32 * necessary for updating firmware, accessing the tavor flash device, and
33 * providing interfaces for VTS.
36 #include <sys/types.h>
39 #include <sys/sunddi.h>
40 #include <sys/modctl.h>
43 #include <sys/ib/adapters/tavor/tavor.h>
45 /* Tavor HCA state pointer (extern) */
46 extern void *tavor_statep
;
49 * The ioctl declarations (for firmware flash burning, register read/write
50 * (DEBUG-only), and VTS interfaces)
52 static int tavor_ioctl_flash_read(tavor_state_t
*state
, dev_t dev
,
53 intptr_t arg
, int mode
);
54 static int tavor_ioctl_flash_write(tavor_state_t
*state
, dev_t dev
,
55 intptr_t arg
, int mode
);
56 static int tavor_ioctl_flash_erase(tavor_state_t
*state
, dev_t dev
,
57 intptr_t arg
, int mode
);
58 static int tavor_ioctl_flash_init(tavor_state_t
*state
, dev_t dev
,
59 intptr_t arg
, int mode
);
60 static int tavor_ioctl_flash_fini(tavor_state_t
*state
, dev_t dev
);
61 static void tavor_ioctl_flash_cleanup(tavor_state_t
*state
);
62 static void tavor_ioctl_flash_cleanup_nolock(tavor_state_t
*state
);
64 static int tavor_ioctl_reg_write(tavor_state_t
*state
, intptr_t arg
,
66 static int tavor_ioctl_reg_read(tavor_state_t
*state
, intptr_t arg
,
69 static int tavor_ioctl_info(tavor_state_t
*state
, dev_t dev
,
70 intptr_t arg
, int mode
);
71 static int tavor_ioctl_ports(tavor_state_t
*state
, intptr_t arg
,
73 static int tavor_ioctl_loopback(tavor_state_t
*state
, intptr_t arg
,
75 static int tavor_ioctl_ddr_read(tavor_state_t
*state
, intptr_t arg
,
78 /* Tavor Flash Functions */
79 static void tavor_flash_read_sector(tavor_state_t
*state
, uint32_t sector_num
);
80 static void tavor_flash_read_quadlet(tavor_state_t
*state
, uint32_t *data
,
82 static int tavor_flash_write_sector(tavor_state_t
*state
, uint32_t sector_num
);
83 static int tavor_flash_write_byte(tavor_state_t
*state
, uint32_t addr
,
85 static int tavor_flash_erase_sector(tavor_state_t
*state
, uint32_t sector_num
);
86 static int tavor_flash_erase_chip(tavor_state_t
*state
);
87 static void tavor_flash_bank(tavor_state_t
*state
, uint32_t addr
);
88 static uint32_t tavor_flash_read(tavor_state_t
*state
, uint32_t addr
);
89 static void tavor_flash_write(tavor_state_t
*state
, uint32_t addr
,
91 static void tavor_flash_init(tavor_state_t
*state
);
92 static void tavor_flash_cfi_init(tavor_state_t
*state
, uint32_t *cfi_info
,
94 static void tavor_flash_fini(tavor_state_t
*state
);
95 static void tavor_flash_reset(tavor_state_t
*state
);
96 static uint32_t tavor_flash_read_cfg(ddi_acc_handle_t pci_config_hdl
,
98 static void tavor_flash_write_cfg(ddi_acc_handle_t pci_config_hdl
,
99 uint32_t addr
, uint32_t data
);
100 static void tavor_flash_cfi_byte(uint8_t *ch
, uint32_t dword
, int i
);
101 static void tavor_flash_cfi_dword(uint32_t *dword
, uint8_t *ch
, int i
);
103 /* Tavor loopback test functions */
104 static void tavor_loopback_free_qps(tavor_loopback_state_t
*lstate
);
105 static void tavor_loopback_free_state(tavor_loopback_state_t
*lstate
);
106 static int tavor_loopback_init(tavor_state_t
*state
,
107 tavor_loopback_state_t
*lstate
);
108 static void tavor_loopback_init_qp_info(tavor_loopback_state_t
*lstate
,
109 tavor_loopback_comm_t
*comm
);
110 static int tavor_loopback_alloc_mem(tavor_loopback_state_t
*lstate
,
111 tavor_loopback_comm_t
*comm
, int sz
);
112 static int tavor_loopback_alloc_qps(tavor_loopback_state_t
*lstate
,
113 tavor_loopback_comm_t
*comm
);
114 static int tavor_loopback_modify_qp(tavor_loopback_state_t
*lstate
,
115 tavor_loopback_comm_t
*comm
, uint_t qp_num
);
116 static int tavor_loopback_copyout(tavor_loopback_ioctl_t
*lb
,
117 intptr_t arg
, int mode
);
118 static int tavor_loopback_post_send(tavor_loopback_state_t
*lstate
,
119 tavor_loopback_comm_t
*tx
, tavor_loopback_comm_t
*rx
);
120 static int tavor_loopback_poll_cq(tavor_loopback_state_t
*lstate
,
121 tavor_loopback_comm_t
*comm
);
123 /* Patchable timeout values for flash operations */
124 int tavor_hw_flash_timeout_gpio_sema
= TAVOR_HW_FLASH_TIMEOUT_GPIO_SEMA
;
125 int tavor_hw_flash_timeout_config
= TAVOR_HW_FLASH_TIMEOUT_CONFIG
;
126 int tavor_hw_flash_timeout_write
= TAVOR_HW_FLASH_TIMEOUT_WRITE
;
127 int tavor_hw_flash_timeout_erase
= TAVOR_HW_FLASH_TIMEOUT_ERASE
;
134 tavor_ioctl(dev_t dev
, int cmd
, intptr_t arg
, int mode
, cred_t
*credp
,
137 tavor_state_t
*state
;
141 if (drv_priv(credp
) != 0) {
145 instance
= TAVOR_DEV_INSTANCE(dev
);
146 if (instance
== -1) {
150 state
= ddi_get_soft_state(tavor_statep
, instance
);
158 case TAVOR_IOCTL_FLASH_READ
:
159 status
= tavor_ioctl_flash_read(state
, dev
, arg
, mode
);
162 case TAVOR_IOCTL_FLASH_WRITE
:
163 status
= tavor_ioctl_flash_write(state
, dev
, arg
, mode
);
166 case TAVOR_IOCTL_FLASH_ERASE
:
167 status
= tavor_ioctl_flash_erase(state
, dev
, arg
, mode
);
170 case TAVOR_IOCTL_FLASH_INIT
:
171 status
= tavor_ioctl_flash_init(state
, dev
, arg
, mode
);
174 case TAVOR_IOCTL_FLASH_FINI
:
175 status
= tavor_ioctl_flash_fini(state
, dev
);
178 case TAVOR_IOCTL_INFO
:
179 status
= tavor_ioctl_info(state
, dev
, arg
, mode
);
182 case TAVOR_IOCTL_PORTS
:
183 status
= tavor_ioctl_ports(state
, arg
, mode
);
186 case TAVOR_IOCTL_DDR_READ
:
187 status
= tavor_ioctl_ddr_read(state
, arg
, mode
);
190 case TAVOR_IOCTL_LOOPBACK
:
191 status
= tavor_ioctl_loopback(state
, arg
, mode
);
195 case TAVOR_IOCTL_REG_WRITE
:
196 status
= tavor_ioctl_reg_write(state
, arg
, mode
);
199 case TAVOR_IOCTL_REG_READ
:
200 status
= tavor_ioctl_reg_read(state
, arg
, mode
);
214 * tavor_ioctl_flash_read()
217 tavor_ioctl_flash_read(tavor_state_t
*state
, dev_t dev
, intptr_t arg
, int mode
)
219 tavor_flash_ioctl_t ioctl_info
;
223 * Check that flash init ioctl has been called first. And check
224 * that the same dev_t that called init is the one calling read now.
226 mutex_enter(&state
->ts_fw_flashlock
);
227 if ((state
->ts_fw_flashdev
!= dev
) ||
228 (state
->ts_fw_flashstarted
== 0)) {
229 mutex_exit(&state
->ts_fw_flashlock
);
233 /* copy user struct to kernel */
234 #ifdef _MULTI_DATAMODEL
235 if (ddi_model_convert_from(mode
& FMODELS
) == DDI_MODEL_ILP32
) {
236 tavor_flash_ioctl32_t info32
;
238 if (ddi_copyin((void *)arg
, &info32
,
239 sizeof (tavor_flash_ioctl32_t
), mode
) != 0) {
240 mutex_exit(&state
->ts_fw_flashlock
);
243 ioctl_info
.tf_type
= info32
.tf_type
;
244 ioctl_info
.tf_sector
= (caddr_t
)(uintptr_t)info32
.tf_sector
;
245 ioctl_info
.tf_sector_num
= info32
.tf_sector_num
;
246 ioctl_info
.tf_addr
= info32
.tf_addr
;
248 #endif /* _MULTI_DATAMODEL */
249 if (ddi_copyin((void *)arg
, &ioctl_info
, sizeof (tavor_flash_ioctl_t
),
251 mutex_exit(&state
->ts_fw_flashlock
);
256 * Determine type of READ ioctl
258 switch (ioctl_info
.tf_type
) {
259 case TAVOR_FLASH_READ_SECTOR
:
260 /* Check if sector num is too large for flash device */
261 if (ioctl_info
.tf_sector_num
>=
262 (state
->ts_fw_device_sz
>> state
->ts_fw_log_sector_sz
)) {
263 mutex_exit(&state
->ts_fw_flashlock
);
267 /* Perform the Sector Read */
268 tavor_flash_reset(state
);
269 tavor_flash_read_sector(state
, ioctl_info
.tf_sector_num
);
271 /* copyout the firmware sector image data */
272 if (ddi_copyout(&state
->ts_fw_sector
[0],
273 &ioctl_info
.tf_sector
[0], 1 << state
->ts_fw_log_sector_sz
,
275 mutex_exit(&state
->ts_fw_flashlock
);
280 case TAVOR_FLASH_READ_QUADLET
:
281 /* Check if addr is too large for flash device */
282 if (ioctl_info
.tf_addr
>= state
->ts_fw_device_sz
) {
283 mutex_exit(&state
->ts_fw_flashlock
);
287 /* Perform the Quadlet Read */
288 tavor_flash_reset(state
);
289 tavor_flash_read_quadlet(state
, &ioctl_info
.tf_quadlet
,
298 /* copy results back to userland */
299 #ifdef _MULTI_DATAMODEL
300 if (ddi_model_convert_from(mode
& FMODELS
) == DDI_MODEL_ILP32
) {
301 tavor_flash_ioctl32_t info32
;
303 info32
.tf_quadlet
= ioctl_info
.tf_quadlet
;
304 info32
.tf_type
= ioctl_info
.tf_type
;
305 info32
.tf_sector_num
= ioctl_info
.tf_sector_num
;
306 info32
.tf_sector
= (caddr32_t
)(uintptr_t)ioctl_info
.tf_sector
;
307 info32
.tf_addr
= ioctl_info
.tf_addr
;
309 if (ddi_copyout(&info32
, (void *)arg
,
310 sizeof (tavor_flash_ioctl32_t
), mode
) != 0) {
311 mutex_exit(&state
->ts_fw_flashlock
);
315 #endif /* _MULTI_DATAMODEL */
316 if (ddi_copyout(&ioctl_info
, (void *)arg
,
317 sizeof (tavor_flash_ioctl_t
), mode
) != 0) {
318 mutex_exit(&state
->ts_fw_flashlock
);
322 mutex_exit(&state
->ts_fw_flashlock
);
327 * tavor_ioctl_flash_write()
330 tavor_ioctl_flash_write(tavor_state_t
*state
, dev_t dev
, intptr_t arg
, int mode
)
332 tavor_flash_ioctl_t ioctl_info
;
336 * Check that flash init ioctl has been called first. And check
337 * that the same dev_t that called init is the one calling write now.
339 mutex_enter(&state
->ts_fw_flashlock
);
340 if ((state
->ts_fw_flashdev
!= dev
) ||
341 (state
->ts_fw_flashstarted
== 0)) {
342 mutex_exit(&state
->ts_fw_flashlock
);
346 /* copy user struct to kernel */
347 #ifdef _MULTI_DATAMODEL
348 if (ddi_model_convert_from(mode
& FMODELS
) == DDI_MODEL_ILP32
) {
349 tavor_flash_ioctl32_t info32
;
351 if (ddi_copyin((void *)arg
, &info32
,
352 sizeof (tavor_flash_ioctl32_t
), mode
) != 0) {
353 mutex_exit(&state
->ts_fw_flashlock
);
356 ioctl_info
.tf_type
= info32
.tf_type
;
357 ioctl_info
.tf_sector
= (caddr_t
)(uintptr_t)info32
.tf_sector
;
358 ioctl_info
.tf_sector_num
= info32
.tf_sector_num
;
359 ioctl_info
.tf_addr
= info32
.tf_addr
;
360 ioctl_info
.tf_byte
= info32
.tf_byte
;
362 #endif /* _MULTI_DATAMODEL */
363 if (ddi_copyin((void *)arg
, &ioctl_info
,
364 sizeof (tavor_flash_ioctl_t
), mode
) != 0) {
365 mutex_exit(&state
->ts_fw_flashlock
);
370 * Determine type of WRITE ioctl
372 switch (ioctl_info
.tf_type
) {
373 case TAVOR_FLASH_WRITE_SECTOR
:
374 /* Check if sector num is too large for flash device */
375 if (ioctl_info
.tf_sector_num
>=
376 (state
->ts_fw_device_sz
>> state
->ts_fw_log_sector_sz
)) {
377 mutex_exit(&state
->ts_fw_flashlock
);
381 /* copy in fw sector image data */
382 if (ddi_copyin(&ioctl_info
.tf_sector
[0],
383 &state
->ts_fw_sector
[0], 1 << state
->ts_fw_log_sector_sz
,
385 mutex_exit(&state
->ts_fw_flashlock
);
389 /* Perform Write Sector */
390 status
= tavor_flash_write_sector(state
,
391 ioctl_info
.tf_sector_num
);
394 case TAVOR_FLASH_WRITE_BYTE
:
395 /* Check if addr is too large for flash device */
396 if (ioctl_info
.tf_addr
>= state
->ts_fw_device_sz
) {
397 mutex_exit(&state
->ts_fw_flashlock
);
401 /* Perform Write Byte */
402 tavor_flash_bank(state
, ioctl_info
.tf_addr
);
403 tavor_flash_reset(state
);
404 status
= tavor_flash_write_byte(state
, ioctl_info
.tf_addr
,
406 tavor_flash_reset(state
);
414 mutex_exit(&state
->ts_fw_flashlock
);
419 * tavor_ioctl_flash_erase()
422 tavor_ioctl_flash_erase(tavor_state_t
*state
, dev_t dev
, intptr_t arg
, int mode
)
424 tavor_flash_ioctl_t ioctl_info
;
428 * Check that flash init ioctl has been called first. And check
429 * that the same dev_t that called init is the one calling erase now.
431 mutex_enter(&state
->ts_fw_flashlock
);
432 if ((state
->ts_fw_flashdev
!= dev
) ||
433 (state
->ts_fw_flashstarted
== 0)) {
434 mutex_exit(&state
->ts_fw_flashlock
);
438 /* copy user struct to kernel */
439 #ifdef _MULTI_DATAMODEL
440 if (ddi_model_convert_from(mode
& FMODELS
) == DDI_MODEL_ILP32
) {
441 tavor_flash_ioctl32_t info32
;
443 if (ddi_copyin((void *)arg
, &info32
,
444 sizeof (tavor_flash_ioctl32_t
), mode
) != 0) {
445 mutex_exit(&state
->ts_fw_flashlock
);
448 ioctl_info
.tf_type
= info32
.tf_type
;
449 ioctl_info
.tf_sector_num
= info32
.tf_sector_num
;
451 #endif /* _MULTI_DATAMODEL */
452 if (ddi_copyin((void *)arg
, &ioctl_info
, sizeof (tavor_flash_ioctl_t
),
454 mutex_exit(&state
->ts_fw_flashlock
);
459 * Determine type of ERASE ioctl
461 switch (ioctl_info
.tf_type
) {
462 case TAVOR_FLASH_ERASE_SECTOR
:
463 /* Check if sector num is too large for flash device */
464 if (ioctl_info
.tf_sector_num
>=
465 (state
->ts_fw_device_sz
>> state
->ts_fw_log_sector_sz
)) {
466 mutex_exit(&state
->ts_fw_flashlock
);
470 /* Perform Sector Erase */
471 status
= tavor_flash_erase_sector(state
,
472 ioctl_info
.tf_sector_num
);
475 case TAVOR_FLASH_ERASE_CHIP
:
476 /* Perform Chip Erase */
477 status
= tavor_flash_erase_chip(state
);
485 mutex_exit(&state
->ts_fw_flashlock
);
490 * tavor_ioctl_flash_init()
493 tavor_ioctl_flash_init(tavor_state_t
*state
, dev_t dev
, intptr_t arg
, int mode
)
495 tavor_flash_init_ioctl_t init_info
;
500 * init cannot be called more than once. If we have already init'd the
501 * flash, return directly.
503 mutex_enter(&state
->ts_fw_flashlock
);
504 if (state
->ts_fw_flashstarted
== 1) {
505 mutex_exit(&state
->ts_fw_flashlock
);
509 /* copyin the user struct to kernel */
510 if (ddi_copyin((void *)arg
, &init_info
,
511 sizeof (tavor_flash_init_ioctl_t
), mode
) != 0) {
512 mutex_exit(&state
->ts_fw_flashlock
);
517 tavor_flash_init(state
);
520 tavor_flash_cfi_init(state
, &init_info
.tf_cfi_info
[0], &intel_xcmd
);
523 * Return error if the command set is unknown.
525 if (state
->ts_fw_cmdset
== TAVOR_FLASH_UNKNOWN_CMDSET
) {
526 mutex_exit(&state
->ts_fw_flashlock
);
530 /* Read HWREV - least significant 8 bits is revision ID */
531 init_info
.tf_hwrev
= pci_config_get32(state
->ts_pci_cfghdl
,
532 TAVOR_HW_FLASH_CFG_HWREV
) & 0xFF;
534 /* Fill in the firmwate revision numbers */
535 init_info
.tf_fwrev
.tfi_maj
= state
->ts_fw
.fw_rev_major
;
536 init_info
.tf_fwrev
.tfi_min
= state
->ts_fw
.fw_rev_minor
;
537 init_info
.tf_fwrev
.tfi_sub
= state
->ts_fw
.fw_rev_subminor
;
539 /* Alloc flash mem for one sector size */
540 state
->ts_fw_sector
= kmem_zalloc(1 <<
541 state
->ts_fw_log_sector_sz
, KM_SLEEP
);
543 /* Set HW part number and length */
544 init_info
.tf_pn_len
= state
->ts_hca_pn_len
;
545 if (state
->ts_hca_pn_len
!= 0) {
546 (void) memcpy(init_info
.tf_hwpn
, state
->ts_hca_pn
,
547 state
->ts_hca_pn_len
);
550 /* Copy ioctl results back to userland */
551 if (ddi_copyout(&init_info
, (void *)arg
,
552 sizeof (tavor_flash_init_ioctl_t
), mode
) != 0) {
554 tavor_ioctl_flash_cleanup_nolock(state
);
556 mutex_exit(&state
->ts_fw_flashlock
);
560 /* Set flash state to started */
561 state
->ts_fw_flashstarted
= 1;
562 state
->ts_fw_flashdev
= dev
;
564 mutex_exit(&state
->ts_fw_flashlock
);
567 * If "flash init" is successful, add an "on close" callback to the
568 * current dev node to ensure that "flash fini" gets called later
569 * even if the userland process prematurely exits.
571 ret
= tavor_umap_db_set_onclose_cb(dev
,
572 TAVOR_ONCLOSE_FLASH_INPROGRESS
,
573 (void (*)(void *))tavor_ioctl_flash_cleanup
, state
);
574 if (ret
!= DDI_SUCCESS
) {
575 (void) tavor_ioctl_flash_fini(state
, dev
);
584 * tavor_ioctl_flash_fini()
587 tavor_ioctl_flash_fini(tavor_state_t
*state
, dev_t dev
)
592 * Check that flash init ioctl has been called first. And check
593 * that the same dev_t that called init is the one calling fini now.
595 mutex_enter(&state
->ts_fw_flashlock
);
596 if ((state
->ts_fw_flashdev
!= dev
) ||
597 (state
->ts_fw_flashstarted
== 0)) {
598 mutex_exit(&state
->ts_fw_flashlock
);
602 tavor_ioctl_flash_cleanup_nolock(state
);
604 mutex_exit(&state
->ts_fw_flashlock
);
607 * If "flash fini" is successful, remove the "on close" callback
608 * that was setup during "flash init".
610 ret
= tavor_umap_db_clear_onclose_cb(dev
,
611 TAVOR_ONCLOSE_FLASH_INPROGRESS
);
612 if (ret
!= DDI_SUCCESS
) {
621 * tavor_ioctl_flash_cleanup()
624 tavor_ioctl_flash_cleanup(tavor_state_t
*state
)
626 mutex_enter(&state
->ts_fw_flashlock
);
627 tavor_ioctl_flash_cleanup_nolock(state
);
628 mutex_exit(&state
->ts_fw_flashlock
);
633 * tavor_ioctl_flash_cleanup_nolock()
636 tavor_ioctl_flash_cleanup_nolock(tavor_state_t
*state
)
638 ASSERT(MUTEX_HELD(&state
->ts_fw_flashlock
));
641 kmem_free(state
->ts_fw_sector
, 1 << state
->ts_fw_log_sector_sz
);
644 tavor_flash_fini(state
);
646 /* Set flash state to fini */
647 state
->ts_fw_flashstarted
= 0;
648 state
->ts_fw_flashdev
= 0;
656 tavor_ioctl_info(tavor_state_t
*state
, dev_t dev
, intptr_t arg
, int mode
)
658 tavor_info_ioctl_t info
;
659 tavor_flash_init_ioctl_t init_info
;
662 * Access to Tavor VTS ioctls is not allowed in "maintenance mode".
664 if (state
->ts_operational_mode
== TAVOR_MAINTENANCE_MODE
) {
668 /* copyin the user struct to kernel */
669 if (ddi_copyin((void *)arg
, &info
, sizeof (tavor_info_ioctl_t
),
675 * Check ioctl revision
677 if (info
.ti_revision
!= TAVOR_VTS_IOCTL_REVISION
) {
682 * If the 'fw_device_sz' has not been initialized yet, we initialize it
683 * here. This is done by leveraging the
684 * tavor_ioctl_flash_init()/fini() calls. We also hold our own mutex
685 * around this operation in case we have multiple VTS threads in
686 * process at the same time.
688 mutex_enter(&state
->ts_info_lock
);
689 if (state
->ts_fw_device_sz
== 0) {
690 if (tavor_ioctl_flash_init(state
, dev
, (intptr_t)&init_info
,
691 (FKIOCTL
| mode
)) != 0) {
692 mutex_exit(&state
->ts_info_lock
);
695 (void) tavor_ioctl_flash_fini(state
, dev
);
697 mutex_exit(&state
->ts_info_lock
);
699 info
.ti_hw_rev
= state
->ts_adapter
.rev_id
;
700 info
.ti_flash_sz
= state
->ts_fw_device_sz
;
701 info
.ti_fw_rev
.tfi_maj
= state
->ts_fw
.fw_rev_major
;
702 info
.ti_fw_rev
.tfi_min
= state
->ts_fw
.fw_rev_minor
;
703 info
.ti_fw_rev
.tfi_sub
= state
->ts_fw
.fw_rev_subminor
;
704 info
.ti_mem_start_offset
= 0;
705 info
.ti_mem_end_offset
= state
->ts_ddr
.ddr_endaddr
-
706 state
->ts_ddr
.ddr_baseaddr
;
708 /* Copy ioctl results back to user struct */
709 if (ddi_copyout(&info
, (void *)arg
, sizeof (tavor_info_ioctl_t
),
718 * tavor_ioctl_ports()
721 tavor_ioctl_ports(tavor_state_t
*state
, intptr_t arg
, int mode
)
723 tavor_ports_ioctl_t info
;
724 tavor_stat_port_ioctl_t portstat
;
725 ibt_hca_portinfo_t pi
;
732 * Access to Tavor VTS ioctls is not allowed in "maintenance mode".
734 if (state
->ts_operational_mode
== TAVOR_MAINTENANCE_MODE
) {
738 /* copyin the user struct to kernel */
739 #ifdef _MULTI_DATAMODEL
740 if (ddi_model_convert_from(mode
& FMODELS
) == DDI_MODEL_ILP32
) {
741 tavor_ports_ioctl32_t info32
;
743 if (ddi_copyin((void *)arg
, &info32
,
744 sizeof (tavor_ports_ioctl32_t
), mode
) != 0) {
747 info
.tp_revision
= info32
.tp_revision
;
749 (tavor_stat_port_ioctl_t
*)(uintptr_t)info32
.tp_ports
;
750 info
.tp_num_ports
= info32
.tp_num_ports
;
753 #endif /* _MULTI_DATAMODEL */
754 if (ddi_copyin((void *)arg
, &info
, sizeof (tavor_ports_ioctl_t
),
760 * Check ioctl revision
762 if (info
.tp_revision
!= TAVOR_VTS_IOCTL_REVISION
) {
766 /* Allocate space for temporary GID table/PKey table */
767 tbl_size
= (1 << state
->ts_cfg_profile
->cp_log_max_gidtbl
);
768 sgid_tbl
= (ib_gid_t
*)kmem_zalloc(tbl_size
* sizeof (ib_gid_t
),
770 tbl_size
= (1 << state
->ts_cfg_profile
->cp_log_max_pkeytbl
);
771 pkey_tbl
= (ib_pkey_t
*)kmem_zalloc(tbl_size
* sizeof (ib_pkey_t
),
774 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgid_tbl
, *pkey_tbl
))
777 * Setup the number of ports, then loop through all ports and
778 * query properties of each.
780 info
.tp_num_ports
= (uint8_t)state
->ts_cfg_profile
->cp_num_ports
;
781 for (i
= 0; i
< info
.tp_num_ports
; i
++) {
783 * Get portstate information from the device. If
784 * tavor_port_query() fails, leave zeroes in user
785 * struct port entry and continue.
787 bzero(&pi
, sizeof (ibt_hca_portinfo_t
));
788 pi
.p_sgid_tbl
= sgid_tbl
;
789 pi
.p_pkey_tbl
= pkey_tbl
;
790 if (tavor_port_query(state
, i
+ 1, &pi
) != 0) {
793 portstat
.tsp_port_num
= pi
.p_port_num
;
794 portstat
.tsp_state
= pi
.p_linkstate
;
795 portstat
.tsp_guid
= pi
.p_sgid_tbl
[0].gid_guid
;
798 * Copy queried port results back to user struct. If
799 * this fails, then break out of loop, attempt to copy
800 * out remaining info to user struct, and return (without
803 if (ddi_copyout(&portstat
,
804 &(((tavor_stat_port_ioctl_t
*)info
.tp_ports
)[i
]),
805 sizeof (tavor_stat_port_ioctl_t
), mode
) != 0) {
810 /* Free the temporary space used for GID table/PKey table */
811 tbl_size
= (1 << state
->ts_cfg_profile
->cp_log_max_gidtbl
);
812 kmem_free(sgid_tbl
, tbl_size
* sizeof (ib_gid_t
));
813 tbl_size
= (1 << state
->ts_cfg_profile
->cp_log_max_pkeytbl
);
814 kmem_free(pkey_tbl
, tbl_size
* sizeof (ib_pkey_t
));
816 /* Copy ioctl results back to user struct */
817 #ifdef _MULTI_DATAMODEL
818 if (ddi_model_convert_from(mode
& FMODELS
) == DDI_MODEL_ILP32
) {
819 tavor_ports_ioctl32_t info32
;
821 info32
.tp_revision
= info
.tp_revision
;
822 info32
.tp_ports
= (caddr32_t
)(uintptr_t)info
.tp_ports
;
823 info32
.tp_num_ports
= info
.tp_num_ports
;
825 if (ddi_copyout(&info32
, (void *)arg
,
826 sizeof (tavor_ports_ioctl32_t
), mode
) != 0) {
830 #endif /* _MULTI_DATAMODEL */
831 if (ddi_copyout(&info
, (void *)arg
, sizeof (tavor_ports_ioctl_t
),
840 * tavor_ioctl_loopback()
843 tavor_ioctl_loopback(tavor_state_t
*state
, intptr_t arg
, int mode
)
845 tavor_loopback_ioctl_t lb
;
846 tavor_loopback_state_t lstate
;
847 ibt_hca_portinfo_t pi
;
848 uint_t tbl_size
, loopmax
, max_usec
;
853 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(lstate
))
856 * Access to Tavor VTS ioctls is not allowed in "maintenance mode".
858 if (state
->ts_operational_mode
== TAVOR_MAINTENANCE_MODE
) {
862 /* copyin the user struct to kernel */
863 #ifdef _MULTI_DATAMODEL
864 if (ddi_model_convert_from(mode
& FMODELS
) == DDI_MODEL_ILP32
) {
865 tavor_loopback_ioctl32_t lb32
;
867 if (ddi_copyin((void *)arg
, &lb32
,
868 sizeof (tavor_loopback_ioctl32_t
), mode
) != 0) {
871 lb
.tlb_revision
= lb32
.tlb_revision
;
872 lb
.tlb_send_buf
= (caddr_t
)(uintptr_t)lb32
.tlb_send_buf
;
873 lb
.tlb_fail_buf
= (caddr_t
)(uintptr_t)lb32
.tlb_fail_buf
;
874 lb
.tlb_buf_sz
= lb32
.tlb_buf_sz
;
875 lb
.tlb_num_iter
= lb32
.tlb_num_iter
;
876 lb
.tlb_pass_done
= lb32
.tlb_pass_done
;
877 lb
.tlb_timeout
= lb32
.tlb_timeout
;
878 lb
.tlb_error_type
= lb32
.tlb_error_type
;
879 lb
.tlb_port_num
= lb32
.tlb_port_num
;
880 lb
.tlb_num_retry
= lb32
.tlb_num_retry
;
882 #endif /* _MULTI_DATAMODEL */
883 if (ddi_copyin((void *)arg
, &lb
, sizeof (tavor_loopback_ioctl_t
),
888 /* Initialize the internal loopback test state structure */
889 bzero(&lstate
, sizeof (tavor_loopback_state_t
));
892 * Check ioctl revision
894 if (lb
.tlb_revision
!= TAVOR_VTS_IOCTL_REVISION
) {
895 lb
.tlb_error_type
= TAVOR_LOOPBACK_INVALID_REVISION
;
896 (void) tavor_loopback_copyout(&lb
, arg
, mode
);
900 /* Validate that specified port number is legal */
901 if (!tavor_portnum_is_valid(state
, lb
.tlb_port_num
)) {
902 lb
.tlb_error_type
= TAVOR_LOOPBACK_INVALID_PORT
;
903 (void) tavor_loopback_copyout(&lb
, arg
, mode
);
907 /* Allocate space for temporary GID table/PKey table */
908 tbl_size
= (1 << state
->ts_cfg_profile
->cp_log_max_gidtbl
);
909 sgid_tbl
= (ib_gid_t
*)kmem_zalloc(tbl_size
* sizeof (ib_gid_t
),
911 tbl_size
= (1 << state
->ts_cfg_profile
->cp_log_max_pkeytbl
);
912 pkey_tbl
= (ib_pkey_t
*)kmem_zalloc(tbl_size
* sizeof (ib_pkey_t
),
916 * Get portstate information from specific port on device
918 bzero(&pi
, sizeof (ibt_hca_portinfo_t
));
919 pi
.p_sgid_tbl
= sgid_tbl
;
920 pi
.p_pkey_tbl
= pkey_tbl
;
921 if (tavor_port_query(state
, lb
.tlb_port_num
, &pi
) != 0) {
922 /* Free the temporary space used for GID table/PKey table */
923 tbl_size
= (1 << state
->ts_cfg_profile
->cp_log_max_gidtbl
);
924 kmem_free(sgid_tbl
, tbl_size
* sizeof (ib_gid_t
));
925 tbl_size
= (1 << state
->ts_cfg_profile
->cp_log_max_pkeytbl
);
926 kmem_free(pkey_tbl
, tbl_size
* sizeof (ib_pkey_t
));
928 lb
.tlb_error_type
= TAVOR_LOOPBACK_INVALID_PORT
;
929 (void) tavor_loopback_copyout(&lb
, arg
, mode
);
930 tavor_loopback_free_state(&lstate
);
934 lstate
.tls_port
= pi
.p_port_num
;
935 lstate
.tls_lid
= pi
.p_base_lid
;
936 lstate
.tls_pkey_ix
= (pi
.p_linkstate
== TAVOR_PORT_LINK_ACTIVE
) ? 1 : 0;
937 lstate
.tls_state
= state
;
938 lstate
.tls_retry
= lb
.tlb_num_retry
;
940 /* Free the temporary space used for GID table/PKey table */
941 tbl_size
= (1 << state
->ts_cfg_profile
->cp_log_max_gidtbl
);
942 kmem_free(sgid_tbl
, tbl_size
* sizeof (ib_gid_t
));
943 tbl_size
= (1 << state
->ts_cfg_profile
->cp_log_max_pkeytbl
);
944 kmem_free(pkey_tbl
, tbl_size
* sizeof (ib_pkey_t
));
947 * Compute the timeout duration in usec per the formula:
948 * to_usec_per_retry = 4.096us * (2 ^ supplied_timeout)
949 * (plus we add a little fudge-factor here too)
951 lstate
.tls_timeout
= lb
.tlb_timeout
;
952 max_usec
= (4096 * (1 << lstate
.tls_timeout
)) / 1000;
953 max_usec
= max_usec
* (lstate
.tls_retry
+ 1);
954 max_usec
= max_usec
+ 10000;
957 * Determine how many times we should loop before declaring a
960 loopmax
= max_usec
/TAVOR_VTS_LOOPBACK_MIN_WAIT_DUR
;
961 if ((max_usec
% TAVOR_VTS_LOOPBACK_MIN_WAIT_DUR
) != 0) {
965 if (lb
.tlb_send_buf
== NULL
|| lb
.tlb_buf_sz
== 0) {
966 lb
.tlb_error_type
= TAVOR_LOOPBACK_SEND_BUF_INVALID
;
967 (void) tavor_loopback_copyout(&lb
, arg
, mode
);
968 tavor_loopback_free_state(&lstate
);
972 /* Allocate protection domain (PD) */
973 if (tavor_loopback_init(state
, &lstate
) != 0) {
974 lb
.tlb_error_type
= lstate
.tls_err
;
975 (void) tavor_loopback_copyout(&lb
, arg
, mode
);
976 tavor_loopback_free_state(&lstate
);
980 /* Allocate and register a TX buffer */
981 if (tavor_loopback_alloc_mem(&lstate
, &lstate
.tls_tx
,
982 lb
.tlb_buf_sz
) != 0) {
984 TAVOR_LOOPBACK_SEND_BUF_MEM_REGION_ALLOC_FAIL
;
985 (void) tavor_loopback_copyout(&lb
, arg
, mode
);
986 tavor_loopback_free_state(&lstate
);
990 /* Allocate and register an RX buffer */
991 if (tavor_loopback_alloc_mem(&lstate
, &lstate
.tls_rx
,
992 lb
.tlb_buf_sz
) != 0) {
994 TAVOR_LOOPBACK_RECV_BUF_MEM_REGION_ALLOC_FAIL
;
995 (void) tavor_loopback_copyout(&lb
, arg
, mode
);
996 tavor_loopback_free_state(&lstate
);
1000 /* Copy in the transmit buffer data */
1001 if (ddi_copyin((void *)lb
.tlb_send_buf
, lstate
.tls_tx
.tlc_buf
,
1002 lb
.tlb_buf_sz
, mode
) != 0) {
1003 lb
.tlb_error_type
= TAVOR_LOOPBACK_SEND_BUF_COPY_FAIL
;
1004 (void) tavor_loopback_copyout(&lb
, arg
, mode
);
1005 tavor_loopback_free_state(&lstate
);
1009 /* Allocate the transmit QP and CQs */
1010 lstate
.tls_err
= TAVOR_LOOPBACK_XMIT_SEND_CQ_ALLOC_FAIL
;
1011 if (tavor_loopback_alloc_qps(&lstate
, &lstate
.tls_tx
) != 0) {
1012 lb
.tlb_error_type
= lstate
.tls_err
;
1013 (void) tavor_loopback_copyout(&lb
, arg
, mode
);
1014 tavor_loopback_free_state(&lstate
);
1018 /* Allocate the receive QP and CQs */
1019 lstate
.tls_err
= TAVOR_LOOPBACK_RECV_SEND_CQ_ALLOC_FAIL
;
1020 if (tavor_loopback_alloc_qps(&lstate
, &lstate
.tls_rx
) != 0) {
1021 lb
.tlb_error_type
= lstate
.tls_err
;
1022 (void) tavor_loopback_copyout(&lb
, arg
, mode
);
1023 tavor_loopback_free_state(&lstate
);
1027 /* Activate the TX QP (connect to RX QP) */
1028 lstate
.tls_err
= TAVOR_LOOPBACK_XMIT_QP_INIT_FAIL
;
1029 if (tavor_loopback_modify_qp(&lstate
, &lstate
.tls_tx
,
1030 lstate
.tls_rx
.tlc_qp_num
) != 0) {
1031 lb
.tlb_error_type
= lstate
.tls_err
;
1032 (void) tavor_loopback_copyout(&lb
, arg
, mode
);
1033 tavor_loopback_free_state(&lstate
);
1037 /* Activate the RX QP (connect to TX QP) */
1038 lstate
.tls_err
= TAVOR_LOOPBACK_RECV_QP_INIT_FAIL
;
1039 if (tavor_loopback_modify_qp(&lstate
, &lstate
.tls_rx
,
1040 lstate
.tls_tx
.tlc_qp_num
) != 0) {
1041 lb
.tlb_error_type
= lstate
.tls_err
;
1042 (void) tavor_loopback_copyout(&lb
, arg
, mode
);
1043 tavor_loopback_free_state(&lstate
);
1047 /* Run the loopback test (for specified number of iterations) */
1048 lb
.tlb_pass_done
= 0;
1049 for (iter
= 0; iter
< lb
.tlb_num_iter
; iter
++) {
1051 bzero(lstate
.tls_rx
.tlc_buf
, lb
.tlb_buf_sz
);
1053 /* Post RDMA Write work request */
1054 if (tavor_loopback_post_send(&lstate
, &lstate
.tls_tx
,
1055 &lstate
.tls_rx
) != IBT_SUCCESS
) {
1056 lb
.tlb_error_type
= TAVOR_LOOPBACK_WQE_POST_FAIL
;
1057 (void) tavor_loopback_copyout(&lb
, arg
, mode
);
1058 tavor_loopback_free_state(&lstate
);
1062 /* Poll the TX CQ for a completion every few ticks */
1063 for (j
= 0; j
< loopmax
; j
++) {
1064 delay(drv_usectohz(TAVOR_VTS_LOOPBACK_MIN_WAIT_DUR
));
1066 ret
= tavor_loopback_poll_cq(&lstate
, &lstate
.tls_tx
);
1067 if (((ret
!= IBT_SUCCESS
) && (ret
!= IBT_CQ_EMPTY
)) ||
1068 ((ret
== IBT_CQ_EMPTY
) && (j
== loopmax
- 1))) {
1069 lb
.tlb_error_type
= TAVOR_LOOPBACK_CQ_POLL_FAIL
;
1070 if (ddi_copyout(lstate
.tls_rx
.tlc_buf
,
1071 lb
.tlb_fail_buf
, lstate
.tls_tx
.tlc_buf_sz
,
1075 (void) tavor_loopback_copyout(&lb
, arg
, mode
);
1076 tavor_loopback_free_state(&lstate
);
1078 } else if (ret
== IBT_CQ_EMPTY
) {
1082 /* Compare the data buffers */
1083 if (bcmp(lstate
.tls_tx
.tlc_buf
, lstate
.tls_rx
.tlc_buf
,
1084 lb
.tlb_buf_sz
) == 0) {
1088 TAVOR_LOOPBACK_SEND_RECV_COMPARE_FAIL
;
1089 if (ddi_copyout(lstate
.tls_rx
.tlc_buf
,
1090 lb
.tlb_fail_buf
, lstate
.tls_tx
.tlc_buf_sz
,
1094 (void) tavor_loopback_copyout(&lb
, arg
, mode
);
1095 tavor_loopback_free_state(&lstate
);
1100 lstate
.tls_err
= TAVOR_LOOPBACK_SUCCESS
;
1101 lb
.tlb_pass_done
= iter
+ 1;
1104 lb
.tlb_error_type
= TAVOR_LOOPBACK_SUCCESS
;
1106 /* Copy ioctl results back to user struct */
1107 ret
= tavor_loopback_copyout(&lb
, arg
, mode
);
1109 /* Free up everything and release all consumed resources */
1110 tavor_loopback_free_state(&lstate
);
1116 * tavor_ioctl_ddr_read()
1119 tavor_ioctl_ddr_read(tavor_state_t
*state
, intptr_t arg
, int mode
)
1121 tavor_ddr_read_ioctl_t rdreg
;
1127 * Access to Tavor VTS ioctls is not allowed in "maintenance mode".
1129 if (state
->ts_operational_mode
== TAVOR_MAINTENANCE_MODE
) {
1133 /* copyin the user struct to kernel */
1134 if (ddi_copyin((void *)arg
, &rdreg
, sizeof (tavor_ddr_read_ioctl_t
),
1140 * Check ioctl revision
1142 if (rdreg
.tdr_revision
!= TAVOR_VTS_IOCTL_REVISION
) {
1147 * Check for valid offset
1149 ddr_size
= (state
->ts_ddr
.ddr_endaddr
- state
->ts_ddr
.ddr_baseaddr
+ 1);
1150 if ((uint64_t)rdreg
.tdr_offset
>= ddr_size
) {
1154 /* Determine base address for requested register read */
1155 baseaddr
= (uintptr_t)state
->ts_reg_ddr_baseaddr
;
1157 /* Ensure that address is properly-aligned */
1158 addr
= (uint32_t *)((baseaddr
+ rdreg
.tdr_offset
) & ~0x3);
1160 /* Read the register pointed to by addr */
1161 rdreg
.tdr_data
= ddi_get32(state
->ts_reg_cmdhdl
, addr
);
1163 /* Copy ioctl results back to user struct */
1164 if (ddi_copyout(&rdreg
, (void *)arg
, sizeof (tavor_ddr_read_ioctl_t
),
1175 * tavor_ioctl_reg_read()
1178 tavor_ioctl_reg_read(tavor_state_t
*state
, intptr_t arg
, int mode
)
1180 tavor_reg_ioctl_t rdreg
;
1186 * Access to Tavor registers is not allowed in "maintenance mode".
1187 * This is primarily because the device may not have BARs to access
1189 if (state
->ts_operational_mode
== TAVOR_MAINTENANCE_MODE
) {
1193 /* Copy in the tavor_reg_ioctl_t structure */
1194 status
= ddi_copyin((void *)arg
, &rdreg
, sizeof (tavor_reg_ioctl_t
),
1200 /* Determine base address for requested register set */
1201 switch (rdreg
.trg_reg_set
) {
1203 baseaddr
= (uintptr_t)state
->ts_reg_cmd_baseaddr
;
1207 baseaddr
= (uintptr_t)state
->ts_reg_uar_baseaddr
;
1211 baseaddr
= (uintptr_t)state
->ts_reg_ddr_baseaddr
;
1218 /* Ensure that address is properly-aligned */
1219 addr
= (uint32_t *)((baseaddr
+ rdreg
.trg_offset
) & ~0x3);
1221 /* Read the register pointed to by addr */
1222 rdreg
.trg_data
= ddi_get32(state
->ts_reg_cmdhdl
, addr
);
1224 /* Copy in the result into the tavor_reg_ioctl_t structure */
1225 status
= ddi_copyout(&rdreg
, (void *)arg
, sizeof (tavor_reg_ioctl_t
),
1236 * tavor_ioctl_reg_write()
1239 tavor_ioctl_reg_write(tavor_state_t
*state
, intptr_t arg
, int mode
)
1241 tavor_reg_ioctl_t wrreg
;
1247 * Access to Tavor registers is not allowed in "maintenance mode".
1248 * This is primarily because the device may not have BARs to access
1250 if (state
->ts_operational_mode
== TAVOR_MAINTENANCE_MODE
) {
1254 /* Copy in the tavor_reg_ioctl_t structure */
1255 status
= ddi_copyin((void *)arg
, &wrreg
, sizeof (tavor_reg_ioctl_t
),
1261 /* Determine base address for requested register set */
1262 switch (wrreg
.trg_reg_set
) {
1264 baseaddr
= (uintptr_t)state
->ts_reg_cmd_baseaddr
;
1268 baseaddr
= (uintptr_t)state
->ts_reg_uar_baseaddr
;
1272 baseaddr
= (uintptr_t)state
->ts_reg_ddr_baseaddr
;
1279 /* Ensure that address is properly-aligned */
1280 addr
= (uint32_t *)((baseaddr
+ wrreg
.trg_offset
) & ~0x3);
1282 /* Write the data to the register pointed to by addr */
1283 ddi_put32(state
->ts_reg_cmdhdl
, addr
, wrreg
.trg_data
);
1290 * tavor_flash_reset()
1293 tavor_flash_reset(tavor_state_t
*state
)
1296 * Performs a reset to the flash device. After a reset the flash will
1297 * be operating in normal mode (capable of read/write, etc.).
1299 switch (state
->ts_fw_cmdset
) {
1300 case TAVOR_FLASH_AMD_CMDSET
:
1301 tavor_flash_write(state
, 0x555, TAVOR_HW_FLASH_RESET_AMD
);
1304 case TAVOR_FLASH_INTEL_CMDSET
:
1305 tavor_flash_write(state
, 0x555, TAVOR_HW_FLASH_RESET_INTEL
);
1314 * tavor_flash_read_sector()
1317 tavor_flash_read_sector(tavor_state_t
*state
, uint32_t sector_num
)
1324 image
= (uint32_t *)&state
->ts_fw_sector
[0];
1327 * Calculate the start and end address of the sector, based on the
1328 * sector number passed in.
1330 addr
= sector_num
<< state
->ts_fw_log_sector_sz
;
1331 end_addr
= addr
+ (1 << state
->ts_fw_log_sector_sz
);
1333 /* Set the flash bank correctly for the given address */
1334 tavor_flash_bank(state
, addr
);
1336 /* Read the entire sector, one quadlet at a time */
1337 for (i
= 0; addr
< end_addr
; i
++, addr
+= 4) {
1338 image
[i
] = tavor_flash_read(state
, addr
);
1343 * tavor_flash_read_quadlet()
1346 tavor_flash_read_quadlet(tavor_state_t
*state
, uint32_t *data
,
1349 /* Set the flash bank correctly for the given address */
1350 tavor_flash_bank(state
, addr
);
1352 /* Read one quadlet of data */
1353 *data
= tavor_flash_read(state
, addr
);
1357 * tavor_flash_write_sector()
1360 tavor_flash_write_sector(tavor_state_t
*state
, uint32_t sector_num
)
1368 sector
= (uchar_t
*)&state
->ts_fw_sector
[0];
1371 * Calculate the start and end address of the sector, based on the
1372 * sector number passed in.
1374 addr
= sector_num
<< state
->ts_fw_log_sector_sz
;
1375 end_addr
= addr
+ (1 << state
->ts_fw_log_sector_sz
);
1377 /* Set the flash bank correctly for the given address */
1378 tavor_flash_bank(state
, addr
);
1380 /* Erase the sector before writing */
1381 tavor_flash_reset(state
);
1382 status
= tavor_flash_erase_sector(state
, sector_num
);
1387 /* Write the entire sector, one byte at a time */
1388 for (i
= 0; addr
< end_addr
; i
++, addr
++) {
1389 status
= tavor_flash_write_byte(state
, addr
, sector
[i
]);
1395 tavor_flash_reset(state
);
1400 * tavor_flash_write_byte()
1403 tavor_flash_write_byte(tavor_state_t
*state
, uint32_t addr
, uchar_t data
)
1409 switch (state
->ts_fw_cmdset
) {
1410 case TAVOR_FLASH_AMD_CMDSET
:
1411 /* Issue Flash Byte program command */
1412 tavor_flash_write(state
, addr
, 0xAA);
1413 tavor_flash_write(state
, addr
, 0x55);
1414 tavor_flash_write(state
, addr
, 0xA0);
1415 tavor_flash_write(state
, addr
, data
);
1418 * Wait for Write Byte to Complete:
1420 * 2) Read status of the write operation
1421 * 3) Determine if we have timed out the write operation
1422 * 4) Compare correct data value to the status value that
1423 * was read from the same address.
1428 stat
= tavor_flash_read(state
, addr
& ~3);
1430 if (i
== tavor_hw_flash_timeout_write
) {
1432 "tavor_flash_write_byte: ACS write "
1433 "timeout: addr: 0x%x, data: 0x%x\n",
1440 } while (data
!= ((stat
>> ((3 - (addr
& 3)) << 3)) & 0xFF));
1443 case TAVOR_FLASH_INTEL_CMDSET
:
1444 /* Issue Flash Byte program command */
1445 tavor_flash_write(state
, addr
, TAVOR_HW_FLASH_ICS_WRITE
);
1446 tavor_flash_write(state
, addr
, data
);
1448 /* wait for completion */
1452 stat
= tavor_flash_read(state
, addr
& ~3);
1454 if (i
== tavor_hw_flash_timeout_write
) {
1456 "tavor_flash_write_byte: ICS write "
1457 "timeout: addr: %x, data: %x\n",
1464 } while ((stat
& TAVOR_HW_FLASH_ICS_READY
) == 0);
1466 if (stat
& TAVOR_HW_FLASH_ICS_ERROR
) {
1468 "tavor_flash_write_byte: ICS write cmd error: "
1469 "addr: %x, data: %x\n",
1477 "tavor_flash_write_byte: unknown cmd set: 0x%x\n",
1478 state
->ts_fw_cmdset
);
1487 * tavor_flash_erase_sector()
1490 tavor_flash_erase_sector(tavor_state_t
*state
, uint32_t sector_num
)
1497 /* Get address from sector num */
1498 addr
= sector_num
<< state
->ts_fw_log_sector_sz
;
1500 switch (state
->ts_fw_cmdset
) {
1501 case TAVOR_FLASH_AMD_CMDSET
:
1502 /* Issue Flash Sector Erase Command */
1503 tavor_flash_write(state
, addr
, 0xAA);
1504 tavor_flash_write(state
, addr
, 0x55);
1505 tavor_flash_write(state
, addr
, 0x80);
1506 tavor_flash_write(state
, addr
, 0xAA);
1507 tavor_flash_write(state
, addr
, 0x55);
1508 tavor_flash_write(state
, addr
, 0x30);
1511 * Wait for Sector Erase to Complete
1513 * 2) read the status at the base addr of the sector
1514 * 3) Determine if we have timed out
1515 * 4) Compare status of address with the value of a fully
1516 * erased quadlet. If these are equal, the sector
1523 stat
= tavor_flash_read(state
, addr
);
1525 if (i
== tavor_hw_flash_timeout_erase
) {
1527 "tavor_flash_erase_sector: "
1528 "ACS erase timeout\n");
1534 } while (stat
!= 0xFFFFFFFF);
1537 case TAVOR_FLASH_INTEL_CMDSET
:
1538 /* Issue Erase Command */
1539 tavor_flash_write(state
, addr
, TAVOR_HW_FLASH_ICS_ERASE
);
1540 tavor_flash_write(state
, addr
, TAVOR_HW_FLASH_ICS_CONFIRM
);
1542 /* wait for completion */
1546 stat
= tavor_flash_read(state
, addr
& ~3);
1548 if (i
== tavor_hw_flash_timeout_erase
) {
1550 "tavor_flash_erase_sector: "
1551 "ICS erase timeout\n");
1557 } while ((stat
& TAVOR_HW_FLASH_ICS_READY
) == 0);
1559 if (stat
& TAVOR_HW_FLASH_ICS_ERROR
) {
1561 "tavor_flash_erase_sector: "
1562 "ICS erase cmd error\n");
1569 "tavor_flash_erase_sector: unknown cmd set: 0x%x\n",
1570 state
->ts_fw_cmdset
);
1575 tavor_flash_reset(state
);
1581 * tavor_flash_erase_chip()
1584 tavor_flash_erase_chip(tavor_state_t
*state
)
1592 switch (state
->ts_fw_cmdset
) {
1593 case TAVOR_FLASH_AMD_CMDSET
:
1594 /* Issue Flash Chip Erase Command */
1595 tavor_flash_write(state
, 0, 0xAA);
1596 tavor_flash_write(state
, 0, 0x55);
1597 tavor_flash_write(state
, 0, 0x80);
1598 tavor_flash_write(state
, 0, 0xAA);
1599 tavor_flash_write(state
, 0, 0x55);
1600 tavor_flash_write(state
, 0, 0x10);
1603 * Wait for Chip Erase to Complete
1605 * 2) read the status at the base addr of the sector
1606 * 3) Determine if we have timed out
1607 * 4) Compare status of address with the value of a
1608 * fully erased quadlet. If these are equal, the
1609 * chip has been erased.
1615 stat
= tavor_flash_read(state
, 0);
1617 if (i
== tavor_hw_flash_timeout_erase
) {
1619 "tavor_flash_erase_chip: erase timeout\n");
1625 } while (stat
!= 0xFFFFFFFF);
1628 case TAVOR_FLASH_INTEL_CMDSET
:
1630 * The Intel chip doesn't have a chip erase command, so erase
1631 * all blocks one at a time.
1633 size
= (0x1 << state
->ts_fw_log_sector_sz
);
1634 num_sect
= state
->ts_fw_device_sz
/ size
;
1636 for (i
= 0; i
< num_sect
; i
++) {
1637 status
= tavor_flash_erase_sector(state
, i
);
1640 "tavor_flash_erase_chip: "
1641 "ICS sector %d erase error\n", i
);
1649 cmn_err(CE_WARN
, "tavor_flash_erase_chip: "
1650 "unknown cmd set: 0x%x\n", state
->ts_fw_cmdset
);
1659 * tavor_flash_bank()
1662 tavor_flash_bank(tavor_state_t
*state
, uint32_t addr
)
1664 ddi_acc_handle_t hdl
;
1668 hdl
= state
->ts_pci_cfghdl
;
1670 /* Determine the bank setting from the address */
1671 bank
= addr
& TAVOR_HW_FLASH_BANK_MASK
;
1673 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(state
->ts_fw_flashbank
))
1676 * If the bank is different from the currently set bank, we need to
1677 * change it. Also, if an 'addr' of 0 is given, this allows the
1678 * capability to force the flash bank to 0. This is useful at init
1679 * time to initially set the bank value
1681 if (state
->ts_fw_flashbank
!= bank
|| addr
== 0) {
1682 /* Set bank using the GPIO settings */
1683 tavor_flash_write_cfg(hdl
, TAVOR_HW_FLASH_GPIO_DATACLEAR
, 0x70);
1684 tavor_flash_write_cfg(hdl
, TAVOR_HW_FLASH_GPIO_DATASET
,
1685 (bank
>> 15) & 0x70);
1687 /* Save the bank state */
1688 state
->ts_fw_flashbank
= bank
;
1693 * tavor_flash_read()
1696 tavor_flash_read(tavor_state_t
*state
, uint32_t addr
)
1698 ddi_acc_handle_t hdl
;
1703 hdl
= state
->ts_pci_cfghdl
;
1706 * The Read operation does the following:
1707 * 1) Write the masked address to the TAVOR_FLASH_ADDR register.
1708 * Only the least significant 19 bits are valid.
1709 * 2) Read back the register until the command has completed.
1710 * 3) Read the data retrieved from the address at the TAVOR_FLASH_DATA
1713 tavor_flash_write_cfg(hdl
, TAVOR_HW_FLASH_ADDR
,
1714 (addr
& TAVOR_HW_FLASH_ADDR_MASK
) | (1 << 29));
1718 data
= tavor_flash_read_cfg(hdl
, TAVOR_HW_FLASH_ADDR
);
1720 } while ((data
& TAVOR_HW_FLASH_CMD_MASK
) &&
1721 (timeout
< tavor_hw_flash_timeout_config
));
1723 if (timeout
== tavor_hw_flash_timeout_config
) {
1724 cmn_err(CE_WARN
, "tavor_flash_read: config command timeout.\n");
1727 data
= tavor_flash_read_cfg(hdl
, TAVOR_HW_FLASH_DATA
);
1733 * tavor_flash_write()
1736 tavor_flash_write(tavor_state_t
*state
, uint32_t addr
, uchar_t data
)
1738 ddi_acc_handle_t hdl
;
1743 hdl
= state
->ts_pci_cfghdl
;
1746 * The Write operation does the following:
1747 * 1) Write the data to be written to the TAVOR_FLASH_DATA offset.
1748 * 2) Write the address to write the data to to the TAVOR_FLASH_ADDR
1750 * 3) Wait until the write completes.
1752 tavor_flash_write_cfg(hdl
, TAVOR_HW_FLASH_DATA
, data
<< 24);
1753 tavor_flash_write_cfg(hdl
, TAVOR_HW_FLASH_ADDR
,
1754 (addr
& 0x7FFFF) | (2 << 29));
1758 cmd
= tavor_flash_read_cfg(hdl
, TAVOR_HW_FLASH_ADDR
);
1760 } while ((cmd
& TAVOR_HW_FLASH_CMD_MASK
) &&
1761 (timeout
< tavor_hw_flash_timeout_config
));
1763 if (timeout
== tavor_hw_flash_timeout_config
) {
1764 cmn_err(CE_WARN
, "tavor_flash_write: config cmd timeout.\n");
1769 * tavor_flash_init()
1772 tavor_flash_init(tavor_state_t
*state
)
1775 ddi_acc_handle_t hdl
;
1780 hdl
= state
->ts_pci_cfghdl
;
1782 /* Init the flash */
1785 * Grab the GPIO semaphore. This allows us exclusive access to the
1786 * GPIO settings on the Tavor for the duration of the flash burning
1791 word
= tavor_flash_read_cfg(hdl
, TAVOR_HW_FLASH_GPIO_SEMA
);
1798 } while (sema_cnt
< tavor_hw_flash_timeout_gpio_sema
);
1801 * Determine if we timed out trying to grab the GPIO semaphore
1803 if (sema_cnt
== tavor_hw_flash_timeout_gpio_sema
) {
1804 cmn_err(CE_WARN
, "tavor_flash_init: GPIO SEMA timeout\n");
1807 /* Save away original GPIO Values */
1808 state
->ts_fw_gpio
[0] = tavor_flash_read_cfg(hdl
,
1809 TAVOR_HW_FLASH_GPIO_DIR
);
1810 state
->ts_fw_gpio
[1] = tavor_flash_read_cfg(hdl
,
1811 TAVOR_HW_FLASH_GPIO_POL
);
1812 state
->ts_fw_gpio
[2] = tavor_flash_read_cfg(hdl
,
1813 TAVOR_HW_FLASH_GPIO_MOD
);
1814 state
->ts_fw_gpio
[3] = tavor_flash_read_cfg(hdl
,
1815 TAVOR_HW_FLASH_GPIO_DAT
);
1817 /* Set New GPIO Values */
1818 gpio
= state
->ts_fw_gpio
[0] | 0x70;
1819 tavor_flash_write_cfg(hdl
, TAVOR_HW_FLASH_GPIO_DIR
, gpio
);
1821 gpio
= state
->ts_fw_gpio
[1] & ~0x70;
1822 tavor_flash_write_cfg(hdl
, TAVOR_HW_FLASH_GPIO_POL
, gpio
);
1824 gpio
= state
->ts_fw_gpio
[2] & ~0x70;
1825 tavor_flash_write_cfg(hdl
, TAVOR_HW_FLASH_GPIO_MOD
, gpio
);
1827 /* Set CPUMODE to enable tavor to access the flash device */
1828 tavor_flash_write_cfg(hdl
, TAVOR_HW_FLASH_CPUMODE
,
1829 1 << TAVOR_HW_FLASH_CPU_SHIFT
);
1831 /* Initialize to bank 0 */
1832 tavor_flash_bank(state
, 0);
1836 * tavor_flash_cfi_init
1837 * Implements access to the CFI (Common Flash Interface) data
1840 tavor_flash_cfi_init(tavor_state_t
*state
, uint32_t *cfi_info
, int *intel_xcmd
)
1843 uint32_t sector_sz_bytes
;
1845 uint8_t cfi_ch_info
[TAVOR_CFI_INFO_SIZE
];
1849 * Determine if the user command supports the Intel Extended
1850 * Command Set. The query string is contained in the fourth
1853 tavor_flash_cfi_byte(cfi_ch_info
, cfi_info
[0x04], 0x10);
1854 if (cfi_ch_info
[0x10] == 'M' &&
1855 cfi_ch_info
[0x11] == 'X' &&
1856 cfi_ch_info
[0x12] == '2') {
1857 *intel_xcmd
= 1; /* support is there */
1861 tavor_flash_write(state
, 0x55, TAVOR_FLASH_CFI_INIT
);
1863 /* Read in CFI data */
1864 for (i
= 0; i
< TAVOR_CFI_INFO_SIZE
; i
+= 4) {
1865 data
= tavor_flash_read(state
, i
);
1866 tavor_flash_cfi_byte(cfi_ch_info
, data
, i
);
1869 /* Determine chip set */
1870 state
->ts_fw_cmdset
= TAVOR_FLASH_UNKNOWN_CMDSET
;
1871 if (cfi_ch_info
[0x20] == 'Q' &&
1872 cfi_ch_info
[0x22] == 'R' &&
1873 cfi_ch_info
[0x24] == 'Y') {
1875 * Mode: x16 working in x8 mode (Intel).
1876 * Pack data - skip spacing bytes.
1878 for (i
= 0; i
< TAVOR_CFI_INFO_SIZE
; i
+= 2) {
1879 cfi_ch_info
[i
/2] = cfi_ch_info
[i
];
1882 state
->ts_fw_cmdset
= cfi_ch_info
[0x13];
1883 if (state
->ts_fw_cmdset
!= TAVOR_FLASH_INTEL_CMDSET
&&
1884 state
->ts_fw_cmdset
!= TAVOR_FLASH_AMD_CMDSET
) {
1886 "tavor_flash_cfi_init: UNKNOWN chip cmd set\n");
1887 state
->ts_fw_cmdset
= TAVOR_FLASH_UNKNOWN_CMDSET
;
1891 /* Determine total bytes in one sector size */
1892 sector_sz_bytes
= ((cfi_ch_info
[0x30] << 8) | cfi_ch_info
[0x2F]) << 8;
1894 /* Calculate equivalent of log2 (n) */
1895 for (bit_count
= 0; sector_sz_bytes
> 1; bit_count
++) {
1896 sector_sz_bytes
>>= 1;
1899 /* Set sector size */
1900 state
->ts_fw_log_sector_sz
= bit_count
;
1902 /* Set flash size */
1903 state
->ts_fw_device_sz
= 0x1 << cfi_ch_info
[0x27];
1905 /* Reset to turn off CFI mode */
1906 tavor_flash_reset(state
);
1909 * Pass CFI data back to user command.
1911 for (i
= 0; i
< TAVOR_FLASH_CFI_SIZE_QUADLET
; i
++) {
1912 tavor_flash_cfi_dword(&cfi_info
[i
], cfi_ch_info
, i
<< 2);
1915 if (*intel_xcmd
== 1) {
1917 * Inform the user cmd that this driver does support the
1918 * Intel Extended Command Set.
1920 cfi_ch_info
[0x10] = 'M';
1921 cfi_ch_info
[0x11] = 'X';
1922 cfi_ch_info
[0x12] = '2';
1924 cfi_ch_info
[0x10] = 'Q';
1925 cfi_ch_info
[0x11] = 'R';
1926 cfi_ch_info
[0x12] = 'Y';
1928 cfi_ch_info
[0x13] = state
->ts_fw_cmdset
;
1929 tavor_flash_cfi_dword(&cfi_info
[0x4], cfi_ch_info
, 0x10);
1933 * tavor_flash_fini()
1936 tavor_flash_fini(tavor_state_t
*state
)
1938 ddi_acc_handle_t hdl
;
1941 hdl
= state
->ts_pci_cfghdl
;
1943 /* Restore original GPIO Values */
1944 tavor_flash_write_cfg(hdl
, TAVOR_HW_FLASH_GPIO_DIR
,
1945 state
->ts_fw_gpio
[0]);
1946 tavor_flash_write_cfg(hdl
, TAVOR_HW_FLASH_GPIO_POL
,
1947 state
->ts_fw_gpio
[1]);
1948 tavor_flash_write_cfg(hdl
, TAVOR_HW_FLASH_GPIO_MOD
,
1949 state
->ts_fw_gpio
[2]);
1950 tavor_flash_write_cfg(hdl
, TAVOR_HW_FLASH_GPIO_DAT
,
1951 state
->ts_fw_gpio
[3]);
1953 /* Give up semaphore */
1954 tavor_flash_write_cfg(hdl
, TAVOR_HW_FLASH_GPIO_SEMA
, 0);
1958 * tavor_flash_read_cfg
1961 tavor_flash_read_cfg(ddi_acc_handle_t pci_config_hdl
, uint32_t addr
)
1966 * Perform flash read operation:
1967 * 1) Place addr to read from on the TAVOR_HW_FLASH_CFG_ADDR register
1968 * 2) Read data at that addr from the TAVOR_HW_FLASH_CFG_DATA register
1970 pci_config_put32(pci_config_hdl
, TAVOR_HW_FLASH_CFG_ADDR
, addr
);
1971 read
= pci_config_get32(pci_config_hdl
, TAVOR_HW_FLASH_CFG_DATA
);
1977 * tavor_flash_write_cfg
1980 tavor_flash_write_cfg(ddi_acc_handle_t pci_config_hdl
, uint32_t addr
,
1984 * Perform flash write operation:
1985 * 1) Place addr to write to on the TAVOR_HW_FLASH_CFG_ADDR register
1986 * 2) Place data to write on to the TAVOR_HW_FLASH_CFG_DATA register
1988 pci_config_put32(pci_config_hdl
, TAVOR_HW_FLASH_CFG_ADDR
, addr
);
1989 pci_config_put32(pci_config_hdl
, TAVOR_HW_FLASH_CFG_DATA
, data
);
1993 * Support routines to convert Common Flash Interface (CFI) data
1994 * from a 32 bit word to a char array, and from a char array to
1998 tavor_flash_cfi_byte(uint8_t *ch
, uint32_t dword
, int i
)
2000 ch
[i
] = (uint8_t)((dword
& 0xFF000000) >> 24);
2001 ch
[i
+1] = (uint8_t)((dword
& 0x00FF0000) >> 16);
2002 ch
[i
+2] = (uint8_t)((dword
& 0x0000FF00) >> 8);
2003 ch
[i
+3] = (uint8_t)((dword
& 0x000000FF));
2007 tavor_flash_cfi_dword(uint32_t *dword
, uint8_t *ch
, int i
)
2010 ((uint32_t)ch
[i
] << 24 |
2011 (uint32_t)ch
[i
+1] << 16 |
2012 (uint32_t)ch
[i
+2] << 8 |
2017 * tavor_loopback_free_qps
2020 tavor_loopback_free_qps(tavor_loopback_state_t
*lstate
)
2024 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*lstate
))
2026 if (lstate
->tls_tx
.tlc_qp_hdl
!= NULL
) {
2027 (void) tavor_qp_free(lstate
->tls_state
,
2028 &lstate
->tls_tx
.tlc_qp_hdl
, IBC_FREE_QP_AND_QPN
, NULL
,
2031 if (lstate
->tls_rx
.tlc_qp_hdl
!= NULL
) {
2032 (void) tavor_qp_free(lstate
->tls_state
,
2033 &lstate
->tls_rx
.tlc_qp_hdl
, IBC_FREE_QP_AND_QPN
, NULL
,
2036 lstate
->tls_tx
.tlc_qp_hdl
= NULL
;
2037 lstate
->tls_rx
.tlc_qp_hdl
= NULL
;
2038 for (i
= 0; i
< 2; i
++) {
2039 if (lstate
->tls_tx
.tlc_cqhdl
[i
] != NULL
) {
2040 (void) tavor_cq_free(lstate
->tls_state
,
2041 &lstate
->tls_tx
.tlc_cqhdl
[i
], TAVOR_NOSLEEP
);
2043 if (lstate
->tls_rx
.tlc_cqhdl
[i
] != NULL
) {
2044 (void) tavor_cq_free(lstate
->tls_state
,
2045 &lstate
->tls_rx
.tlc_cqhdl
[i
], TAVOR_NOSLEEP
);
2047 lstate
->tls_tx
.tlc_cqhdl
[i
] = NULL
;
2048 lstate
->tls_rx
.tlc_cqhdl
[i
] = NULL
;
2053 * tavor_loopback_free_state
2056 tavor_loopback_free_state(tavor_loopback_state_t
*lstate
)
2058 tavor_loopback_free_qps(lstate
);
2059 if (lstate
->tls_tx
.tlc_mrhdl
!= NULL
) {
2060 (void) tavor_mr_deregister(lstate
->tls_state
,
2061 &lstate
->tls_tx
.tlc_mrhdl
, TAVOR_MR_DEREG_ALL
,
2064 if (lstate
->tls_rx
.tlc_mrhdl
!= NULL
) {
2065 (void) tavor_mr_deregister(lstate
->tls_state
,
2066 &lstate
->tls_rx
.tlc_mrhdl
, TAVOR_MR_DEREG_ALL
,
2069 if (lstate
->tls_pd_hdl
!= NULL
) {
2070 (void) tavor_pd_free(lstate
->tls_state
, &lstate
->tls_pd_hdl
);
2072 if (lstate
->tls_tx
.tlc_buf
!= NULL
) {
2073 kmem_free(lstate
->tls_tx
.tlc_buf
, lstate
->tls_tx
.tlc_buf_sz
);
2075 if (lstate
->tls_rx
.tlc_buf
!= NULL
) {
2076 kmem_free(lstate
->tls_rx
.tlc_buf
, lstate
->tls_rx
.tlc_buf_sz
);
2078 bzero(lstate
, sizeof (tavor_loopback_state_t
));
2082 * tavor_loopback_init
2085 tavor_loopback_init(tavor_state_t
*state
, tavor_loopback_state_t
*lstate
)
2087 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*lstate
))
2089 lstate
->tls_hca_hdl
= (ibc_hca_hdl_t
)state
;
2090 lstate
->tls_status
= tavor_pd_alloc(lstate
->tls_state
,
2091 &lstate
->tls_pd_hdl
, TAVOR_NOSLEEP
);
2092 if (lstate
->tls_status
!= IBT_SUCCESS
) {
2093 lstate
->tls_err
= TAVOR_LOOPBACK_PROT_DOMAIN_ALLOC_FAIL
;
2101 * tavor_loopback_init_qp_info
2104 tavor_loopback_init_qp_info(tavor_loopback_state_t
*lstate
,
2105 tavor_loopback_comm_t
*comm
)
2107 bzero(&comm
->tlc_cq_attr
, sizeof (ibt_cq_attr_t
));
2108 bzero(&comm
->tlc_qp_attr
, sizeof (ibt_qp_alloc_attr_t
));
2109 bzero(&comm
->tlc_qp_info
, sizeof (ibt_qp_info_t
));
2112 comm
->tlc_cq_attr
.cq_size
= 128;
2113 comm
->tlc_qp_attr
.qp_sizes
.cs_sq_sgl
= 3;
2114 comm
->tlc_qp_attr
.qp_sizes
.cs_rq_sgl
= 3;
2115 comm
->tlc_qp_attr
.qp_sizes
.cs_sq
= 16;
2116 comm
->tlc_qp_attr
.qp_sizes
.cs_rq
= 16;
2117 comm
->tlc_qp_attr
.qp_flags
= IBT_WR_SIGNALED
;
2119 comm
->tlc_qp_info
.qp_state
= IBT_STATE_RESET
;
2120 comm
->tlc_qp_info
.qp_trans
= IBT_RC_SRV
;
2121 comm
->tlc_qp_info
.qp_flags
= IBT_CEP_RDMA_RD
| IBT_CEP_RDMA_WR
;
2122 comm
->tlc_qp_info
.qp_transport
.rc
.rc_path
.cep_hca_port_num
=
2124 comm
->tlc_qp_info
.qp_transport
.rc
.rc_path
.cep_pkey_ix
=
2125 lstate
->tls_pkey_ix
;
2126 comm
->tlc_qp_info
.qp_transport
.rc
.rc_path
.cep_timeout
=
2127 lstate
->tls_timeout
;
2128 comm
->tlc_qp_info
.qp_transport
.rc
.rc_path
.cep_adds_vect
.av_srvl
= 0;
2129 comm
->tlc_qp_info
.qp_transport
.rc
.rc_path
.cep_adds_vect
.av_srate
=
2131 comm
->tlc_qp_info
.qp_transport
.rc
.rc_path
.cep_adds_vect
.av_send_grh
= 0;
2132 comm
->tlc_qp_info
.qp_transport
.rc
.rc_path
.cep_adds_vect
.av_dlid
=
2134 comm
->tlc_qp_info
.qp_transport
.rc
.rc_retry_cnt
= lstate
->tls_retry
;
2135 comm
->tlc_qp_info
.qp_transport
.rc
.rc_sq_psn
= 0;
2136 comm
->tlc_qp_info
.qp_transport
.rc
.rc_rq_psn
= 0;
2137 comm
->tlc_qp_info
.qp_transport
.rc
.rc_rdma_ra_in
= 4;
2138 comm
->tlc_qp_info
.qp_transport
.rc
.rc_rdma_ra_out
= 4;
2139 comm
->tlc_qp_info
.qp_transport
.rc
.rc_dst_qpn
= 0;
2140 comm
->tlc_qp_info
.qp_transport
.rc
.rc_min_rnr_nak
= IBT_RNR_NAK_655ms
;
2141 comm
->tlc_qp_info
.qp_transport
.rc
.rc_path_mtu
= IB_MTU_1K
;
2145 * tavor_loopback_alloc_mem
2148 tavor_loopback_alloc_mem(tavor_loopback_state_t
*lstate
,
2149 tavor_loopback_comm_t
*comm
, int sz
)
2151 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*comm
))
2153 /* Allocate buffer of specified size */
2154 comm
->tlc_buf_sz
= sz
;
2155 comm
->tlc_buf
= kmem_zalloc(sz
, KM_NOSLEEP
);
2156 if (comm
->tlc_buf
== NULL
) {
2160 /* Register the buffer as a memory region */
2161 comm
->tlc_memattr
.mr_vaddr
= (uint64_t)(uintptr_t)comm
->tlc_buf
;
2162 comm
->tlc_memattr
.mr_len
= (ib_msglen_t
)sz
;
2163 comm
->tlc_memattr
.mr_as
= NULL
;
2164 comm
->tlc_memattr
.mr_flags
= IBT_MR_NOSLEEP
|
2165 IBT_MR_ENABLE_REMOTE_WRITE
| IBT_MR_ENABLE_LOCAL_WRITE
;
2167 comm
->tlc_status
= tavor_mr_register(lstate
->tls_state
,
2168 lstate
->tls_pd_hdl
, &comm
->tlc_memattr
, &comm
->tlc_mrhdl
, NULL
);
2170 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*comm
->tlc_mrhdl
))
2172 comm
->tlc_mrdesc
.md_vaddr
= comm
->tlc_mrhdl
->mr_bindinfo
.bi_addr
;
2173 comm
->tlc_mrdesc
.md_lkey
= comm
->tlc_mrhdl
->mr_lkey
;
2174 comm
->tlc_mrdesc
.md_rkey
= comm
->tlc_mrhdl
->mr_rkey
;
2175 if (comm
->tlc_status
!= IBT_SUCCESS
) {
2182 * tavor_loopback_alloc_qps
2185 tavor_loopback_alloc_qps(tavor_loopback_state_t
*lstate
,
2186 tavor_loopback_comm_t
*comm
)
2188 uint32_t i
, real_size
;
2189 tavor_qp_info_t qpinfo
;
2191 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*comm
))
2192 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*lstate
))
2194 /* Allocate send and recv CQs */
2195 for (i
= 0; i
< 2; i
++) {
2196 bzero(&comm
->tlc_cq_attr
, sizeof (ibt_cq_attr_t
));
2197 comm
->tlc_cq_attr
.cq_size
= 128;
2198 comm
->tlc_status
= tavor_cq_alloc(lstate
->tls_state
,
2199 (ibt_cq_hdl_t
)NULL
, &comm
->tlc_cq_attr
, &real_size
,
2200 &comm
->tlc_cqhdl
[i
], TAVOR_NOSLEEP
);
2201 if (comm
->tlc_status
!= IBT_SUCCESS
) {
2202 lstate
->tls_err
+= i
;
2207 /* Allocate the QP */
2208 tavor_loopback_init_qp_info(lstate
, comm
);
2209 comm
->tlc_qp_attr
.qp_pd_hdl
= (ibt_pd_hdl_t
)lstate
->tls_pd_hdl
;
2210 comm
->tlc_qp_attr
.qp_scq_hdl
= (ibt_cq_hdl_t
)comm
->tlc_cqhdl
[0];
2211 comm
->tlc_qp_attr
.qp_rcq_hdl
= (ibt_cq_hdl_t
)comm
->tlc_cqhdl
[1];
2212 comm
->tlc_qp_attr
.qp_ibc_scq_hdl
= (ibt_opaque1_t
)comm
->tlc_cqhdl
[0];
2213 comm
->tlc_qp_attr
.qp_ibc_rcq_hdl
= (ibt_opaque1_t
)comm
->tlc_cqhdl
[1];
2214 qpinfo
.qpi_attrp
= &comm
->tlc_qp_attr
;
2215 qpinfo
.qpi_type
= IBT_RC_RQP
;
2216 qpinfo
.qpi_ibt_qphdl
= NULL
;
2217 qpinfo
.qpi_queueszp
= &comm
->tlc_chan_sizes
;
2218 qpinfo
.qpi_qpn
= &comm
->tlc_qp_num
;
2219 comm
->tlc_status
= tavor_qp_alloc(lstate
->tls_state
, &qpinfo
,
2220 TAVOR_NOSLEEP
, NULL
);
2221 if (comm
->tlc_status
== DDI_SUCCESS
) {
2222 comm
->tlc_qp_hdl
= qpinfo
.qpi_qphdl
;
2225 if (comm
->tlc_status
!= IBT_SUCCESS
) {
2226 lstate
->tls_err
+= 2;
2233 * tavor_loopback_modify_qp
2236 tavor_loopback_modify_qp(tavor_loopback_state_t
*lstate
,
2237 tavor_loopback_comm_t
*comm
, uint_t qp_num
)
2239 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*comm
))
2240 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*lstate
))
2242 /* Modify QP to INIT */
2243 tavor_loopback_init_qp_info(lstate
, comm
);
2244 comm
->tlc_qp_info
.qp_state
= IBT_STATE_INIT
;
2245 comm
->tlc_status
= tavor_qp_modify(lstate
->tls_state
, comm
->tlc_qp_hdl
,
2246 IBT_CEP_SET_STATE
, &comm
->tlc_qp_info
, &comm
->tlc_queue_sizes
);
2247 if (comm
->tlc_status
!= IBT_SUCCESS
) {
2252 * Modify QP to RTR (set destination LID and QP number to local
2253 * LID and QP number)
2255 comm
->tlc_qp_info
.qp_state
= IBT_STATE_RTR
;
2256 comm
->tlc_qp_info
.qp_transport
.rc
.rc_path
.cep_adds_vect
.av_dlid
2258 comm
->tlc_qp_info
.qp_transport
.rc
.rc_dst_qpn
= qp_num
;
2259 comm
->tlc_status
= tavor_qp_modify(lstate
->tls_state
, comm
->tlc_qp_hdl
,
2260 IBT_CEP_SET_STATE
, &comm
->tlc_qp_info
, &comm
->tlc_queue_sizes
);
2261 if (comm
->tlc_status
!= IBT_SUCCESS
) {
2262 lstate
->tls_err
+= 1;
2266 /* Modify QP to RTS */
2267 comm
->tlc_qp_info
.qp_current_state
= IBT_STATE_RTR
;
2268 comm
->tlc_qp_info
.qp_state
= IBT_STATE_RTS
;
2269 comm
->tlc_status
= tavor_qp_modify(lstate
->tls_state
, comm
->tlc_qp_hdl
,
2270 IBT_CEP_SET_STATE
, &comm
->tlc_qp_info
, &comm
->tlc_queue_sizes
);
2271 if (comm
->tlc_status
!= IBT_SUCCESS
) {
2272 lstate
->tls_err
+= 2;
2279 * tavor_loopback_copyout
2282 tavor_loopback_copyout(tavor_loopback_ioctl_t
*lb
, intptr_t arg
, int mode
)
2284 #ifdef _MULTI_DATAMODEL
2285 if (ddi_model_convert_from(mode
& FMODELS
) == DDI_MODEL_ILP32
) {
2286 tavor_loopback_ioctl32_t lb32
;
2288 lb32
.tlb_revision
= lb
->tlb_revision
;
2290 (caddr32_t
)(uintptr_t)lb
->tlb_send_buf
;
2292 (caddr32_t
)(uintptr_t)lb
->tlb_fail_buf
;
2293 lb32
.tlb_buf_sz
= lb
->tlb_buf_sz
;
2294 lb32
.tlb_num_iter
= lb
->tlb_num_iter
;
2295 lb32
.tlb_pass_done
= lb
->tlb_pass_done
;
2296 lb32
.tlb_timeout
= lb
->tlb_timeout
;
2297 lb32
.tlb_error_type
= lb
->tlb_error_type
;
2298 lb32
.tlb_port_num
= lb
->tlb_port_num
;
2299 lb32
.tlb_num_retry
= lb
->tlb_num_retry
;
2301 if (ddi_copyout(&lb32
, (void *)arg
,
2302 sizeof (tavor_loopback_ioctl32_t
), mode
) != 0) {
2306 #endif /* _MULTI_DATAMODEL */
2307 if (ddi_copyout(lb
, (void *)arg
, sizeof (tavor_loopback_ioctl_t
),
2315 * tavor_loopback_post_send
2318 tavor_loopback_post_send(tavor_loopback_state_t
*lstate
,
2319 tavor_loopback_comm_t
*tx
, tavor_loopback_comm_t
*rx
)
2323 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*tx
))
2325 bzero(&tx
->tlc_sgl
, sizeof (ibt_wr_ds_t
));
2326 bzero(&tx
->tlc_wr
, sizeof (ibt_send_wr_t
));
2328 /* Initialize local address for TX buffer */
2329 tx
->tlc_sgl
.ds_va
= tx
->tlc_mrdesc
.md_vaddr
;
2330 tx
->tlc_sgl
.ds_key
= tx
->tlc_mrdesc
.md_lkey
;
2331 tx
->tlc_sgl
.ds_len
= tx
->tlc_buf_sz
;
2333 /* Initialize the remaining details of the work request */
2334 tx
->tlc_wr
.wr_id
= tx
->tlc_wrid
++;
2335 tx
->tlc_wr
.wr_flags
= IBT_WR_SEND_SIGNAL
;
2336 tx
->tlc_wr
.wr_nds
= 1;
2337 tx
->tlc_wr
.wr_sgl
= &tx
->tlc_sgl
;
2338 tx
->tlc_wr
.wr_opcode
= IBT_WRC_RDMAW
;
2339 tx
->tlc_wr
.wr_trans
= IBT_RC_SRV
;
2341 /* Initialize the remote address for RX buffer */
2342 tx
->tlc_wr
.wr
.rc
.rcwr
.rdma
.rdma_raddr
= rx
->tlc_mrdesc
.md_vaddr
;
2343 tx
->tlc_wr
.wr
.rc
.rcwr
.rdma
.rdma_rkey
= rx
->tlc_mrdesc
.md_rkey
;
2344 tx
->tlc_complete
= 0;
2345 ret
= tavor_post_send(lstate
->tls_state
, tx
->tlc_qp_hdl
, &tx
->tlc_wr
,
2347 if (ret
!= IBT_SUCCESS
) {
2354 * tavor_loopback_poll_cq
2357 tavor_loopback_poll_cq(tavor_loopback_state_t
*lstate
,
2358 tavor_loopback_comm_t
*comm
)
2360 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*comm
))
2362 comm
->tlc_wc
.wc_status
= 0;
2363 comm
->tlc_num_polled
= 0;
2364 comm
->tlc_status
= tavor_cq_poll(lstate
->tls_state
,
2365 comm
->tlc_cqhdl
[0], &comm
->tlc_wc
, 1, &comm
->tlc_num_polled
);
2366 if ((comm
->tlc_status
== IBT_SUCCESS
) &&
2367 (comm
->tlc_wc
.wc_status
!= IBT_WC_SUCCESS
)) {
2368 comm
->tlc_status
= ibc_get_ci_failure(0);
2370 return (comm
->tlc_status
);